pax_global_header00006660000000000000000000000064151076404540014520gustar00rootroot0000000000000052 comment=3441b633c2fe2c494e958780ba0f4227b1327634 ninja-1.13.2/000077500000000000000000000000001510764045400127035ustar00rootroot00000000000000ninja-1.13.2/.clang-format000066400000000000000000000020361510764045400152570ustar00rootroot00000000000000# Copyright 2014 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This isn't meant to be authoritative, but it's good enough to be useful. # Still use your best judgement for formatting decisions: clang-format # sometimes makes strange choices. BasedOnStyle: Google AllowShortFunctionsOnASingleLine: Inline AllowShortIfStatementsOnASingleLine: false AllowShortLoopsOnASingleLine: false ConstructorInitializerAllOnOneLineOrOnePerLine: false Cpp11BracedListStyle: false IndentCaseLabels: false DerivePointerBinding: false ninja-1.13.2/.clang-tidy000066400000000000000000000011601510764045400147350ustar00rootroot00000000000000--- Checks: ' ,readability-avoid-const-params-in-decls, ,readability-inconsistent-declaration-parameter-name, ,readability-non-const-parameter, ,readability-redundant-string-cstr, ,readability-redundant-string-init, ,readability-simplify-boolean-expr, ,cppcoreguidelines-pro-type-cstyle-cast, ' WarningsAsErrors: ' ,readability-avoid-const-params-in-decls, ,readability-inconsistent-declaration-parameter-name, ,readability-non-const-parameter, ,readability-redundant-string-cstr, ,readability-redundant-string-init, ,readability-simplify-boolean-expr, ,cppcoreguidelines-pro-type-cstyle-cast, ' ninja-1.13.2/.editorconfig000066400000000000000000000002301510764045400153530ustar00rootroot00000000000000root = true [*] charset = utf-8 indent_style = space indent_size = 2 insert_final_newline = true end_of_line = lf [CMakeLists.txt] indent_style = tab ninja-1.13.2/.github/000077500000000000000000000000001510764045400142435ustar00rootroot00000000000000ninja-1.13.2/.github/dependabot.yml000066400000000000000000000002431510764045400170720ustar00rootroot00000000000000version: 2 updates: # Maintain dependencies for GitHub Actions - package-ecosystem: "github-actions" directory: "/" schedule: interval: "weekly" ninja-1.13.2/.github/workflows/000077500000000000000000000000001510764045400163005ustar00rootroot00000000000000ninja-1.13.2/.github/workflows/linux-musl.yml000066400000000000000000000035011510764045400211370ustar00rootroot00000000000000name: ci-linux-musl on: workflow_dispatch: pull_request: push: branches: ['**'] tags-ignore: ['**'] # Don't trigger on tag pushes release: types: [published] concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true permissions: {} jobs: build: runs-on: ubuntu-24.04 container: alpine:edge permissions: contents: read strategy: fail-fast: false matrix: build_method: ["python", "cmake"] steps: - name: Host - checkout uses: actions/checkout@v4 with: fetch-depth: 0 persist-credentials: false - name: Install ninja build optional dependencies run: apk update && apk add -u --no-cache python3 build-base cmake re2c - name: Configure ninja build if: matrix.build_method == 'cmake' run: cmake -B build -D CMAKE_BUILD_TYPE="Release" -D CMAKE_COMPILE_WARNING_AS_ERROR="ON" - name: Cmake Build ninja if: matrix.build_method == 'cmake' run: cmake --build build --parallel --config Release - name: Cmake test ninja if: matrix.build_method == 'cmake' run: build/ninja_test --gtest_color=yes - name: Python Build ninja if: matrix.build_method == 'python' run: python3 configure.py --warnings-as-errors --bootstrap --verbose - name: Python test ninja if: matrix.build_method == 'python' run: | ./ninja all python3 misc/ninja_syntax_test.py # python3 misc/output_test.py - name: Move ninja binary if: matrix.build_method == 'cmake' run: mv -f build/ninja ninja - name: ninja-ninja --version run: ./ninja --version >> $GITHUB_STEP_SUMMARY - name: binary info via file run: file ./ninja >> $GITHUB_STEP_SUMMARY ninja-1.13.2/.github/workflows/linux.yml000066400000000000000000000136461510764045400201740ustar00rootroot00000000000000name: Linux on: pull_request: push: branches: ['**'] tags-ignore: ['**'] # Don't trigger on tag pushes release: types: [published] jobs: fedora: runs-on: [ubuntu-latest] container: image: fedora:40 steps: - uses: actions/checkout@v4 - name: Install dependencies run: dnf install -y ninja-build cmake gtest-devel re2c clang util-linux clang-tools-extra - name: Linting run: misc/ci.py - name: Configure with CMake run: cmake -Bbuild -G"Ninja Multi-Config" -DNINJA_CLANG_TIDY=1 - name: Build debug ninja run: CLICOLOR_FORCE=1 ninja working-directory: build - name: Test debug ninja working-directory: build/Debug run: | ./ninja_test --gtest_color=yes ../../misc/output_test.py ../../misc/jobserver_test.py - name: Build release ninja run: CLICOLOR_FORCE=1 ninja -f build-Release.ninja working-directory: build - name: Test release ninja working-directory: build/Release run: | ./ninja_test --gtest_color=yes ../../misc/output_test.py ../../misc/jobserver_test.py build: strategy: matrix: host-os: ["ubuntu-latest", "ubuntu-24.04-arm"] include: - host-os: "ubuntu-24.04-arm" arch: "-aarch64" fail-fast: false defaults: run: shell: bash runs-on: ${{ matrix.host-os }} container: rockylinux/rockylinux:8 steps: - uses: actions/checkout@v4 - uses: codespell-project/actions-codespell@master with: ignore_words_list: fo,wee,addin,notin - name: Install dependencies run: | dnf install -y make gcc-c++ libasan clang-analyzer cmake dnf-plugins-core epel-release dnf config-manager --set-enabled powertools dnf install -y gtest-devel p7zip p7zip-plugins ninja-build - name: Build debug ninja env: CFLAGS: -fstack-protector-all -fsanitize=address CXXFLAGS: -fstack-protector-all -fsanitize=address run: | scan-build -o scanlogs cmake -GNinja -DCMAKE_BUILD_TYPE=Debug -B debug-build scan-build -o scanlogs cmake --build debug-build --parallel --config Debug - name: Test debug ninja run: ASAN_OPTIONS=detect_leaks=0 ./ninja_test working-directory: debug-build - name: Build release ninja run: | cmake -GNinja -DCMAKE_BUILD_TYPE=Release -B release-build -DCMAKE_COMPILE_WARNING_AS_ERROR=1 cmake --build release-build --parallel --config Release strip release-build/ninja - name: Test release ninja run: ./ninja_test working-directory: release-build - name: Create ninja archive run: | mkdir artifact 7z a artifact/ninja-linux${{ matrix.arch }}.zip ./release-build/ninja # Upload ninja binary archive as an artifact - name: Upload artifact uses: actions/upload-artifact@v4 with: name: ninja${{ matrix.arch }}-binary-archives path: artifact - name: Upload release asset if: github.event.action == 'published' uses: actions/upload-release-asset@v1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: upload_url: ${{ github.event.release.upload_url }} asset_path: ./artifact/ninja-linux${{ matrix.arch }}.zip asset_name: ninja-linux${{ matrix.arch }}.zip asset_content_type: application/zip test: runs-on: [ubuntu-latest] container: image: ubuntu:20.04 steps: - uses: actions/checkout@v4 - name: Install dependencies run: | apt update apt install -y python3-pytest ninja-build python3-pip clang libgtest-dev pip3 install cmake==3.17.* - name: Configure (GCC) run: cmake -Bbuild-gcc -DCMAKE_BUILD_TYPE=Debug -G'Ninja Multi-Config' - name: Build (GCC, Debug) run: cmake --build build-gcc --config Debug - name: Unit tests (GCC, Debug) run: ./build-gcc/Debug/ninja_test - name: Python tests (GCC, Debug) run: pytest-3 --color=yes ../.. working-directory: build-gcc/Debug - name: Build (GCC, Release) run: cmake --build build-gcc --config Release - name: Unit tests (GCC, Release) run: ./build-gcc/Release/ninja_test - name: Python tests (GCC, Release) run: pytest-3 --color=yes ../.. working-directory: build-gcc/Release - name: Configure (Clang) run: CC=clang CXX=clang++ cmake -Bbuild-clang -DCMAKE_BUILD_TYPE=Debug -G'Ninja Multi-Config' - name: Build (Clang, Debug) run: cmake --build build-clang --config Debug - name: Unit tests (Clang, Debug) run: ./build-clang/Debug/ninja_test - name: Python tests (Clang, Debug) run: pytest-3 --color=yes ../.. working-directory: build-clang/Debug - name: Build (Clang, Release) run: cmake --build build-clang --config Release - name: Unit tests (Clang, Release) run: ./build-clang/Release/ninja_test - name: Python tests (Clang, Release) run: pytest-3 --color=yes ../.. working-directory: build-clang/Release build-with-python: runs-on: [ubuntu-latest] container: image: ${{ matrix.image }} strategy: matrix: image: ["ubuntu:20.04", "ubuntu:22.04", "ubuntu:24.04"] steps: - uses: actions/checkout@v4 - name: Install dependencies run: | apt update apt install -y g++ python3 - name: ${{ matrix.image }} run: | # Do not set --warnings-as-errors here as that triggers an irrelevant # compiler warnings in with ubuntu:24.04. See issue #2615 python3 configure.py --bootstrap ./ninja all python3 misc/ninja_syntax_test.py ./misc/output_test.py ninja-1.13.2/.github/workflows/macos.yml000066400000000000000000000025371510764045400201340ustar00rootroot00000000000000name: macOS on: pull_request: push: branches: ['**'] tags-ignore: ['**'] # Don't trigger on tag pushes release: types: [published] jobs: build: runs-on: macos-13 steps: - uses: actions/checkout@v4 - name: Install dependencies run: brew install re2c p7zip cmake - name: Build ninja shell: bash env: MACOSX_DEPLOYMENT_TARGET: 10.15 run: | cmake -Bbuild -GXcode '-DCMAKE_OSX_ARCHITECTURES=arm64;x86_64' -DCMAKE_COMPILE_WARNING_AS_ERROR=1 cmake --build build --config Release - name: Test ninja (Release) run: ./ninja_test working-directory: build/Release - name: Create ninja archive shell: bash run: | mkdir artifact 7z a artifact/ninja-mac.zip ./build/Release/ninja # Upload ninja binary archive as an artifact - name: Upload artifact uses: actions/upload-artifact@v4 with: name: ninja-binary-archives path: artifact - name: Upload release asset if: github.event.action == 'published' uses: actions/upload-release-asset@v1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: upload_url: ${{ github.event.release.upload_url }} asset_path: ./artifact/ninja-mac.zip asset_name: ninja-mac.zip asset_content_type: application/zip ninja-1.13.2/.github/workflows/windows.yml000066400000000000000000000033541510764045400205220ustar00rootroot00000000000000name: Windows on: pull_request: push: branches: ['**'] tags-ignore: ['**'] # Don't trigger on tag pushes release: types: [published] jobs: build: runs-on: windows-latest strategy: fail-fast: false matrix: include: - arch: 'x64' suffix: '' - arch: 'arm64' suffix: 'arm64' steps: - uses: actions/checkout@v4 - name: Install dependencies run: choco install re2c - name: Build ninja shell: bash run: | cmake -Bbuild -A ${{ matrix.arch }} -DCMAKE_COMPILE_WARNING_AS_ERROR=1 cmake --build build --parallel --config Debug cmake --build build --parallel --config Release - name: Test ninja (Debug) if: matrix.arch != 'arm64' run: .\ninja_test.exe working-directory: build/Debug - name: Test ninja (Release) if: matrix.arch != 'arm64' run: .\ninja_test.exe working-directory: build/Release - name: Create ninja archive shell: bash run: | mkdir artifact 7z a artifact/ninja-win${{ matrix.suffix }}.zip ./build/Release/ninja.exe # Upload ninja binary archive as an artifact - name: Upload artifact uses: actions/upload-artifact@v4 with: name: ninja-binary-archives${{ matrix.suffix }} path: artifact - name: Upload release asset if: github.event.action == 'published' uses: actions/upload-release-asset@v1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: upload_url: ${{ github.event.release.upload_url }} asset_path: ./artifact/ninja-win${{ matrix.suffix }}.zip asset_name: ninja-win${{ matrix.suffix }}.zip asset_content_type: application/zip ninja-1.13.2/.gitignore000066400000000000000000000012041510764045400146700ustar00rootroot00000000000000*.pyc *.obj *.exe *.pdb *.ilk /build*/ /build.ninja /ninja /ninja.bootstrap /build_log_perftest /canon_perftest /clparser_perftest /elide_middle_perftest /depfile_parser_perftest /hash_collision_bench /ninja_test /manifest_parser_perftest /graph.png /doc/manual.html /doc/doxygen *.patch .DS_Store # Eclipse project files .project .cproject # SublimeText project files *.sublime-project *.sublime-workspace # Ninja output .ninja_deps .ninja_log # Visual Studio Code project files /.vscode/ /.ccls-cache/ # Qt Creator project files /CMakeLists.txt.user # clangd /.clangd/ /compile_commands.json /.cache/ # Visual Studio files /.vs/ /out/ ninja-1.13.2/CMakeLists.txt000066400000000000000000000251111510764045400154430ustar00rootroot00000000000000cmake_minimum_required(VERSION 3.15) include(CheckSymbolExists) include(CheckIPOSupported) option(NINJA_BUILD_BINARY "Build ninja binary" ON) option(NINJA_FORCE_PSELECT "Use pselect() even on platforms that provide ppoll()" OFF) project(ninja CXX) # --- optional link-time optimization check_ipo_supported(RESULT lto_supported OUTPUT error) if(lto_supported) message(STATUS "IPO / LTO enabled") set(CMAKE_POLICY_DEFAULT_CMP0069 NEW) set(CMAKE_INTERPROCEDURAL_OPTIMIZATION_RELEASE TRUE) else() message(STATUS "IPO / LTO not supported: <${error}>") endif() # --- compiler flags if(MSVC) set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded$<$:Debug>") string(REPLACE "/GR" "" CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS}) # Note that these settings are separately specified in configure.py, and # these lists should be kept in sync. add_compile_options(/W4 /wd4100 /wd4267 /wd4706 /wd4702 /wd4244 /GR- /Zc:__cplusplus) add_compile_definitions(_CRT_SECURE_NO_WARNINGS) else() include(CheckCXXCompilerFlag) check_cxx_compiler_flag(-Wno-deprecated flag_no_deprecated) if(flag_no_deprecated) add_compile_options(-Wno-deprecated) endif() if(CMAKE_VERSION VERSION_LESS 3.24) check_cxx_compiler_flag(-fdiagnostics-color flag_color_diag) if(flag_color_diag) add_compile_options(-fdiagnostics-color) endif() elseif(NOT DEFINED ENV{CMAKE_COLOR_DIAGNOSTICS}) set(CMAKE_COLOR_DIAGNOSTICS ON) endif() if(NOT NINJA_FORCE_PSELECT) # Check whether ppoll() is usable on the target platform. # Set -DUSE_PPOLL=1 if this is the case. # # NOTE: Use check_cxx_symbol_exists() instead of check_symbol_exists() # because on Linux, only exposes the symbol when _GNU_SOURCE # is defined. # # Both g++ and clang++ define the symbol by default, because the C++ # standard library headers require it, but *not* gcc and clang, which # are used by check_symbol_exists(). include(CheckCXXSymbolExists) check_cxx_symbol_exists(ppoll poll.h HAVE_PPOLL) if(HAVE_PPOLL) add_compile_definitions(USE_PPOLL=1) endif() endif() endif() # --- optional re2c set(RE2C_MAJOR_VERSION 0) find_program(RE2C re2c) if(RE2C) execute_process(COMMAND "${RE2C}" --vernum OUTPUT_VARIABLE RE2C_RAW_VERSION) math(EXPR RE2C_MAJOR_VERSION "${RE2C_RAW_VERSION} / 10000") endif() if(${RE2C_MAJOR_VERSION} GREATER 1) # the depfile parser and ninja lexers are generated using re2c. function(re2c IN OUT) add_custom_command(DEPENDS ${IN} OUTPUT ${OUT} COMMAND ${RE2C} -b -i --no-generation-date --no-version -o ${OUT} ${IN} ) endfunction() re2c(${PROJECT_SOURCE_DIR}/src/depfile_parser.in.cc ${PROJECT_BINARY_DIR}/depfile_parser.cc) re2c(${PROJECT_SOURCE_DIR}/src/lexer.in.cc ${PROJECT_BINARY_DIR}/lexer.cc) add_library(libninja-re2c OBJECT ${PROJECT_BINARY_DIR}/depfile_parser.cc ${PROJECT_BINARY_DIR}/lexer.cc) else() message(WARNING "re2c 2 or later was not found; changes to src/*.in.cc will not affect your build.") add_library(libninja-re2c OBJECT src/depfile_parser.cc src/lexer.cc) endif() target_include_directories(libninja-re2c PRIVATE src) # --- Check for 'browse' mode support function(check_platform_supports_browse_mode RESULT) # Make sure the inline.sh script works on this platform. # It uses the shell commands such as 'od', which may not be available. execute_process( COMMAND sh -c "echo 'TEST' | src/inline.sh var" WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} RESULT_VARIABLE inline_result OUTPUT_QUIET ERROR_QUIET ) if(NOT inline_result EQUAL "0") # The inline script failed, so browse mode is not supported. set(${RESULT} "0" PARENT_SCOPE) if(NOT WIN32) message(WARNING "browse feature omitted due to inline script failure") endif() return() endif() # Now check availability of the unistd header check_symbol_exists(fork "unistd.h" HAVE_FORK) check_symbol_exists(pipe "unistd.h" HAVE_PIPE) set(browse_supported 0) if (HAVE_FORK AND HAVE_PIPE) set(browse_supported 1) endif () set(${RESULT} "${browse_supported}" PARENT_SCOPE) if(NOT browse_supported) message(WARNING "browse feature omitted due to missing `fork` and `pipe` functions") endif() endfunction() set(NINJA_PYTHON "python" CACHE STRING "Python interpreter to use for the browse tool") check_platform_supports_browse_mode(platform_supports_ninja_browse) # Core source files all build into ninja library. add_library(libninja OBJECT src/build_log.cc src/build.cc src/clean.cc src/clparser.cc src/dyndep.cc src/dyndep_parser.cc src/debug_flags.cc src/deps_log.cc src/disk_interface.cc src/edit_distance.cc src/elide_middle.cc src/eval_env.cc src/graph.cc src/graphviz.cc src/jobserver.cc src/json.cc src/line_printer.cc src/manifest_parser.cc src/metrics.cc src/missing_deps.cc src/parser.cc src/real_command_runner.cc src/state.cc src/status_printer.cc src/string_piece_util.cc src/util.cc src/version.cc ) if(WIN32) target_sources(libninja PRIVATE src/subprocess-win32.cc src/includes_normalize-win32.cc src/jobserver-win32.cc src/msvc_helper-win32.cc src/msvc_helper_main-win32.cc src/getopt.c src/minidump-win32.cc ) # Build getopt.c, which can be compiled as either C or C++, as C++ # so that build environments which lack a C compiler, but have a C++ # compiler may build ninja. set_source_files_properties(src/getopt.c PROPERTIES LANGUAGE CXX) # windows.h defines min() and max() which conflict with std::min() # and std::max(), which both might be used in sources. Avoid compile # errors by telling windows.h to not define those two. add_compile_definitions(NOMINMAX) else() target_sources(libninja PRIVATE src/jobserver-posix.cc src/subprocess-posix.cc ) if(CMAKE_SYSTEM_NAME STREQUAL "OS400" OR CMAKE_SYSTEM_NAME STREQUAL "AIX") target_sources(libninja PRIVATE src/getopt.c) # Build getopt.c, which can be compiled as either C or C++, as C++ # so that build environments which lack a C compiler, but have a C++ # compiler may build ninja. set_source_files_properties(src/getopt.c PROPERTIES LANGUAGE CXX) endif() # Needed for perfstat_cpu_total if(CMAKE_SYSTEM_NAME STREQUAL "AIX") target_link_libraries(libninja PUBLIC "-lperfstat") endif() endif() target_compile_features(libninja PUBLIC cxx_std_11) target_compile_features(libninja-re2c PUBLIC cxx_std_11) #Fixes GetActiveProcessorCount on MinGW if(MINGW) target_compile_definitions(libninja PRIVATE _WIN32_WINNT=0x0601 __USE_MINGW_ANSI_STDIO=1) endif() # On IBM i (identified as "OS400" for compatibility reasons) and AIX, this fixes missing # PRId64 (and others) at compile time in C++ sources if(CMAKE_SYSTEM_NAME STREQUAL "OS400" OR CMAKE_SYSTEM_NAME STREQUAL "AIX") add_compile_definitions(__STDC_FORMAT_MACROS) endif() # Main executable is library plus main() function. if(NINJA_BUILD_BINARY) add_executable(ninja src/ninja.cc) target_link_libraries(ninja PRIVATE libninja libninja-re2c) if(WIN32) target_sources(ninja PRIVATE windows/ninja.manifest) endif() option(NINJA_CLANG_TIDY "Run clang-tidy on source files" OFF) if(NINJA_CLANG_TIDY) set_target_properties(libninja PROPERTIES CXX_CLANG_TIDY "clang-tidy;--use-color") set_target_properties(ninja PROPERTIES CXX_CLANG_TIDY "clang-tidy;--use-color") endif() endif() # Adds browse mode into the ninja binary if it's supported by the host platform. if(platform_supports_ninja_browse) # Inlines src/browse.py into the browse_py.h header, so that it can be included # by src/browse.cc add_custom_command( OUTPUT build/browse_py.h MAIN_DEPENDENCY src/browse.py DEPENDS src/inline.sh COMMAND ${CMAKE_COMMAND} -E make_directory ${PROJECT_BINARY_DIR}/build COMMAND src/inline.sh kBrowsePy < src/browse.py > ${PROJECT_BINARY_DIR}/build/browse_py.h WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} VERBATIM ) if(NINJA_BUILD_BINARY) target_compile_definitions(ninja PRIVATE NINJA_HAVE_BROWSE) target_sources(ninja PRIVATE src/browse.cc) endif() set_source_files_properties(src/browse.cc PROPERTIES OBJECT_DEPENDS "${PROJECT_BINARY_DIR}/build/browse_py.h" INCLUDE_DIRECTORIES "${PROJECT_BINARY_DIR}" COMPILE_DEFINITIONS NINJA_PYTHON="${NINJA_PYTHON}" ) endif() include(CTest) if(BUILD_TESTING) # Can be removed if cmake min version is >=3.24 if (POLICY CMP0135) cmake_policy(SET CMP0135 NEW) endif() find_package(GTest) if(NOT GTest_FOUND) include(FetchContent) FetchContent_Declare( googletest # GoogleTest v1.17.0 requires at least C++17 and cmake 3.16 which are above ninja project minimum requirements. # GoogleTest v1.16.0 requires at least C++14 and cmake 3.13 which are within the ninja project minimum requirements. URL https://github.com/google/googletest/releases/download/v1.16.0/googletest-1.16.0.tar.gz URL_HASH SHA256=78c676fc63881529bf97bf9d45948d905a66833fbfa5318ea2cd7478cb98f399 ) FetchContent_MakeAvailable(googletest) endif() # Tests all build into ninja_test executable. add_executable(ninja_test src/build_log_test.cc src/build_test.cc src/clean_test.cc src/clparser_test.cc src/depfile_parser_test.cc src/deps_log_test.cc src/disk_interface_test.cc src/dyndep_parser_test.cc src/edit_distance_test.cc src/elide_middle_test.cc src/explanations_test.cc src/graph_test.cc src/jobserver_test.cc src/json_test.cc src/lexer_test.cc src/manifest_parser_test.cc src/missing_deps_test.cc src/ninja_test.cc src/state_test.cc src/string_piece_util_test.cc src/subprocess_test.cc src/test.cc src/util_test.cc ) if(WIN32) target_sources(ninja_test PRIVATE src/includes_normalize_test.cc src/msvc_helper_test.cc windows/ninja.manifest) if(MSVC) # Silence warnings about using unlink rather than _unlink target_compile_definitions(ninja_test PRIVATE _CRT_NONSTDC_NO_DEPRECATE) endif() endif() find_package(Threads REQUIRED) target_link_libraries(ninja_test PRIVATE libninja libninja-re2c GTest::gtest Threads::Threads) foreach(perftest build_log_perftest canon_perftest clparser_perftest depfile_parser_perftest elide_middle_perftest hash_collision_bench manifest_parser_perftest ) add_executable(${perftest} src/${perftest}.cc) target_link_libraries(${perftest} PRIVATE libninja libninja-re2c) endforeach() if(CMAKE_SYSTEM_NAME STREQUAL "AIX" AND CMAKE_SIZEOF_VOID_P EQUAL 4) # These tests require more memory than will fit in the standard AIX shared stack/heap (256M) target_link_options(hash_collision_bench PRIVATE "-Wl,-bmaxdata:0x80000000") target_link_options(manifest_parser_perftest PRIVATE "-Wl,-bmaxdata:0x80000000") endif() add_test(NAME NinjaTest COMMAND ninja_test) endif() if(NINJA_BUILD_BINARY) install(TARGETS ninja) endif() ninja-1.13.2/CONTRIBUTING.md000066400000000000000000000026521510764045400151410ustar00rootroot00000000000000# How to successfully make changes to Ninja We're very wary of changes that increase the complexity of Ninja (in particular, new build file syntax or command-line flags) or increase the maintenance burden of Ninja. Ninja is already successfully used by hundreds of developers for large projects and it already achieves (most of) the goals we set out for it to do. It's probably best to discuss new feature ideas on the [mailing list](https://groups.google.com/forum/#!forum/ninja-build) or in an issue before creating a PR. ## Coding guidelines Generally it's the [Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html) with a few additions: * We have used `using namespace std;` a lot in the past. For new contributions, please try to avoid relying on it and instead whenever possible use `std::`. However, please do not change existing code simply to add `std::` unless your contribution already needs to change that line of code anyway. * Use `///` for [Doxygen](http://www.doxygen.nl/) (use `\a` to refer to arguments). * It's not necessary to document each argument, especially when they're relatively self-evident (e.g. in `CanonicalizePath(string* path, string* err)`, the arguments are hopefully obvious). If you're unsure about code formatting, please use [clang-format](https://clang.llvm.org/docs/ClangFormat.html). However, please do not format code that is not otherwise part of your contribution. ninja-1.13.2/COPYING000066400000000000000000000261361510764045400137460ustar00rootroot00000000000000 Apache License Version 2.0, January 2010 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ninja-1.13.2/README.md000066400000000000000000000053351510764045400141700ustar00rootroot00000000000000# Ninja Ninja is a small build system with a focus on speed. https://ninja-build.org/ See [the manual](https://ninja-build.org/manual.html) or `doc/manual.asciidoc` included in the distribution for background and more details. Binaries for Linux, Mac and Windows are available on [GitHub](https://github.com/ninja-build/ninja/releases). Run `./ninja -h` for Ninja help. Installation is not necessary because the only required file is the resulting ninja binary. However, to enable features like Bash completion and Emacs and Vim editing modes, some files in misc/ must be copied to appropriate locations. If you're interested in making changes to Ninja, read [CONTRIBUTING.md](CONTRIBUTING.md) first. ## Building Ninja itself You can either build Ninja via the custom generator script written in Python or via CMake. For more details see [the wiki](https://github.com/ninja-build/ninja/wiki). ### Python ``` ./configure.py --bootstrap ``` This will generate the `ninja` binary and a `build.ninja` file you can now use to build Ninja with itself. If you have a GoogleTest source directory, you can build the tests by passing its path with `--gtest-source-dir=PATH` option, or the `GTEST_SOURCE_DIR` environment variable, e.g.: ``` ./configure.py --bootstrap --gtest-source-dir=/path/to/googletest ./ninja all # build ninja_test and other auxiliary binaries ./ninja_test` # run the unit-test suite. ``` Use the CMake build below if you want to use a preinstalled binary version of the library. ### CMake To build the ninja binary without building the unit tests, disable test building by setting `BUILD_TESTING` to `OFF`: ``` cmake -Bbuild-cmake -DBUILD_TESTING=OFF cmake --build build-cmake ``` The `ninja` binary will now be inside the `build-cmake` directory (you can choose any other name you like). To run the unit tests, omit the `-DBUILD_TESTING=OFF` option, and after building, run: ``` ./build-cmake/ninja_test ``` ## Generating documentation ### Ninja Manual You must have `asciidoc` and `xsltproc` in your PATH, then do: ``` ./configure.py ninja manual doc/manual.html ``` Which will generate `doc/manual.html`. To generate the PDF version of the manual, you must have `dblatext` in your PATH then do: ``` ./configure.py # only if you didn't do it previously. ninja doc/manual.pdf ``` Which will generate `doc/manual.pdf`. ### Doxygen documentation If you have `doxygen` installed, you can build documentation extracted from C++ declarations and comments to help you navigate the code. Note that Ninja is a standalone executable, not a library, so there is no public API, all details exposed here are internal. ``` ./configure.py # if needed ninja doxygen ``` Then open `doc/doxygen/html/index.html` in a browser to look at it. ninja-1.13.2/RELEASING.md000066400000000000000000000030131510764045400145330ustar00rootroot00000000000000Notes to myself on all the steps to make for a Ninja release. ### Push new release branch: 1. Run afl-fuzz for a day or so and run ninja_test 2. Consider sending a heads-up to the ninja-build mailing list first 3. Make sure branches 'master' and 'release' are synced up locally 4. Update src/version.cc with new version (with ".git"), then ``` git commit -am 'mark this 1.5.0.git' ``` 5. git checkout release; git merge master 6. Fix version number in src/version.cc (it will likely conflict in the above) 7. Fix version in doc/manual.asciidoc (exists only on release branch) 8. commit, tag, push (don't forget to push --tags) ``` git commit -am v1.5.0; git push origin release git tag v1.5.0; git push --tags # Push the 1.5.0.git change on master too: git checkout master; git push origin master ``` 9. Construct release notes from prior notes credits: `git shortlog -s --no-merges REV..` ### Release on GitHub: 1. Go to [Tags](https://github.com/ninja-build/ninja/tags) 2. Open the newly created tag and select "Create release from tag" 3. Create the release which will trigger a build which automatically attaches the binaries ### Make announcement on mailing list: 1. copy old mail ### Update website: 1. Make sure your ninja checkout is on the v1.5.0 tag 2. Clone https://github.com/ninja-build/ninja-build.github.io 3. In that repo, `./update-docs.sh` 4. Update index.html with newest version and link to release notes 5. `git commit -m 'run update-docs.sh, 1.5.0 release'` 6. `git push origin master` ninja-1.13.2/appveyor.yml000066400000000000000000000015231510764045400152740ustar00rootroot00000000000000version: 1.0.{build} image: - Visual Studio 2017 - Ubuntu2204 environment: CLICOLOR_FORCE: 1 CHERE_INVOKING: 1 # Tell Bash to inherit the current working directory matrix: - MSYSTEM: MINGW64 - MSYSTEM: LINUX matrix: exclude: - image: Visual Studio 2017 MSYSTEM: LINUX - image: Ubuntu2204 MSYSTEM: MINGW64 for: - matrix: only: - MSYSTEM: MINGW64 build_script: ps: "C:\\msys64\\usr\\bin\\bash -lc @\"\n pacman -S --quiet --noconfirm --needed re2c 2>&1\n ./configure.py --bootstrap --platform mingw 2>&1\n ./ninja all\n ./misc/ninja_syntax_test.py 2>&1\n\"@" - matrix: only: - image: Ubuntu2204 build_script: - ./configure.py --bootstrap - ./ninja all - misc/ninja_syntax_test.py - misc/output_test.py test: off ninja-1.13.2/configure.py000077500000000000000000000672751510764045400152620ustar00rootroot00000000000000#!/usr/bin/env python3 # # Copyright 2001 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Script that generates the build.ninja for ninja itself. Projects that use ninja themselves should either write a similar script or use a meta-build system that supports Ninja output.""" from optparse import OptionParser import os import shlex import subprocess import sys from typing import Optional, Union, Dict, List, Any, TYPE_CHECKING sourcedir = os.path.dirname(os.path.realpath(__file__)) sys.path.insert(0, os.path.join(sourcedir, 'misc')) if TYPE_CHECKING: import misc.ninja_syntax as ninja_syntax else: import ninja_syntax class Platform(object): """Represents a host/target platform and its specific build attributes.""" def __init__(self, platform: Optional[str]) -> None: self._platform = platform if self._platform is not None: return self._platform = sys.platform if self._platform.startswith('linux'): self._platform = 'linux' elif self._platform.startswith('freebsd'): self._platform = 'freebsd' elif self._platform.startswith('gnukfreebsd'): self._platform = 'freebsd' elif self._platform.startswith('openbsd'): self._platform = 'openbsd' elif self._platform.startswith('solaris') or self._platform == 'sunos5': self._platform = 'solaris' elif self._platform.startswith('mingw'): self._platform = 'mingw' elif self._platform.startswith('win'): self._platform = 'msvc' elif self._platform.startswith('bitrig'): self._platform = 'bitrig' elif self._platform.startswith('netbsd'): self._platform = 'netbsd' elif self._platform.startswith('aix'): self._platform = 'aix' elif self._platform.startswith('os400'): self._platform = 'os400' elif self._platform.startswith('dragonfly'): self._platform = 'dragonfly' @staticmethod def known_platforms() -> List[str]: return ['linux', 'darwin', 'freebsd', 'openbsd', 'solaris', 'sunos5', 'mingw', 'msvc', 'gnukfreebsd', 'bitrig', 'netbsd', 'aix', 'dragonfly'] def platform(self) -> str: return self._platform # type: ignore # Incompatible return value type def is_linux(self) -> bool: return self._platform == 'linux' def is_mingw(self) -> bool: return self._platform == 'mingw' def is_msvc(self) -> bool: return self._platform == 'msvc' def msvc_needs_fs(self) -> bool: popen = subprocess.Popen(['cl', '/nologo', '/help'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = popen.communicate() return b'/FS' in out def is_windows(self) -> bool: return self.is_mingw() or self.is_msvc() def is_solaris(self) -> bool: return self._platform == 'solaris' def is_aix(self) -> bool: return self._platform == 'aix' def is_os400_pase(self) -> bool: return self._platform == 'os400' or os.uname().sysname.startswith('OS400') # type: ignore # Module has no attribute "uname" def uses_usr_local(self) -> bool: return self._platform in ('freebsd', 'openbsd', 'bitrig', 'dragonfly', 'netbsd') def supports_ppoll(self) -> bool: return self._platform in ('freebsd', 'linux', 'openbsd', 'bitrig', 'dragonfly') def supports_ninja_browse(self) -> bool: return (not self.is_windows() and not self.is_solaris() and not self.is_aix()) def can_rebuild_in_place(self) -> bool: return not (self.is_windows() or self.is_aix()) class Bootstrap: """API shim for ninja_syntax.Writer that instead runs the commands. Used to bootstrap Ninja from scratch. In --bootstrap mode this class is used to execute all the commands to build an executable. It also proxies all calls to an underlying ninja_syntax.Writer, to behave like non-bootstrap mode. """ def __init__(self, writer: ninja_syntax.Writer, verbose: bool = False) -> None: self.writer = writer self.verbose = verbose # Map of variable name => expanded variable value. self.vars: Dict[str, str] = {} # Map of rule name => dict of rule attributes. self.rules: Dict[str, Dict[str, Any]] = { 'phony': {} } def comment(self, text: str) -> None: return self.writer.comment(text) def newline(self) -> None: return self.writer.newline() def variable(self, key: str, val: str) -> None: # In bootstrap mode, we have no ninja process to catch /showIncludes # output. self.vars[key] = self._expand(val).replace('/showIncludes', '') return self.writer.variable(key, val) def rule(self, name: str, **kwargs: Any) -> None: self.rules[name] = kwargs return self.writer.rule(name, **kwargs) def build( self, outputs: Union[str, List[str]], rule: str, inputs: Optional[Union[str, List[str]]] = None, **kwargs: Any ) -> List[str]: ruleattr = self.rules[rule] cmd = ruleattr.get('command') if cmd is None: # A phony rule, for example. return # type: ignore # Return value expected # Implement just enough of Ninja variable expansion etc. to # make the bootstrap build work. local_vars = { 'in': self._expand_paths(inputs), 'out': self._expand_paths(outputs) } for key, val in kwargs.get('variables', []): local_vars[key] = ' '.join(ninja_syntax.as_list(val)) self._run_command(self._expand(cmd, local_vars)) return self.writer.build(outputs, rule, inputs, **kwargs) def default(self, paths: Union[str, List[str]]) -> None: return self.writer.default(paths) def _expand_paths(self, paths: Optional[Union[str, List[str]]]) -> str: """Expand $vars in an array of paths, e.g. from a 'build' block.""" paths = ninja_syntax.as_list(paths) return ' '.join(map(self._shell_escape, (map(self._expand, paths)))) def _expand(self, str: str, local_vars: Dict[str, str] = {}) -> str: """Expand $vars in a string.""" return ninja_syntax.expand(str, self.vars, local_vars) def _shell_escape(self, path: str) -> str: """Quote paths containing spaces.""" return '"%s"' % path if ' ' in path else path def _run_command(self, cmdline: str) -> None: """Run a subcommand, quietly. Prints the full command on error.""" try: if self.verbose: print(cmdline) subprocess.check_call(cmdline, shell=True) except subprocess.CalledProcessError: print('when running: ', cmdline) raise parser = OptionParser() profilers = ['gmon', 'pprof'] parser.add_option('--bootstrap', action='store_true', help='bootstrap a ninja binary from nothing') parser.add_option('--verbose', action='store_true', help='enable verbose build') parser.add_option('--platform', help='target platform (' + '/'.join(Platform.known_platforms()) + ')', choices=Platform.known_platforms()) parser.add_option('--host', help='host platform (' + '/'.join(Platform.known_platforms()) + ')', choices=Platform.known_platforms()) parser.add_option('--debug', action='store_true', help='enable debugging extras',) parser.add_option('--warnings-as-errors', action='store_true', help='convert warnings into errors during build',) parser.add_option('--profile', metavar='TYPE', choices=profilers, help='enable profiling (' + '/'.join(profilers) + ')',) parser.add_option('--gtest-source-dir', metavar='PATH', help='Path to GoogleTest source directory. If not provided ' + 'GTEST_SOURCE_DIR will be probed in the environment. ' + 'Tests will not be built without a value.') parser.add_option('--with-python', metavar='EXE', help='use EXE as the Python interpreter', default=os.path.basename(sys.executable)) parser.add_option('--force-pselect', action='store_true', help='ppoll() is used by default where available, ' 'but some platforms may need to use pselect instead',) (options, args) = parser.parse_args() if args: print('ERROR: extra unparsed command-line arguments:', args) sys.exit(1) platform = Platform(options.platform) if options.host: host = Platform(options.host) else: host = platform BUILD_FILENAME = 'build.ninja' ninja_writer = ninja_syntax.Writer(open(BUILD_FILENAME, 'w')) n: Union[ninja_syntax.Writer, Bootstrap] = ninja_writer if options.bootstrap: # Make the build directory. try: os.mkdir('build') except OSError: pass # Wrap ninja_writer with the Bootstrapper, which also executes the # commands. print('bootstrapping ninja...') n = Bootstrap(n, verbose=options.verbose) # type: ignore # Incompatible types in assignment n.comment('This file is used to build ninja itself.') n.comment('It is generated by ' + os.path.basename(__file__) + '.') n.newline() n.variable('ninja_required_version', '1.3') n.newline() n.comment('The arguments passed to configure.py, for rerunning it.') configure_args = sys.argv[1:] if '--bootstrap' in configure_args: configure_args.remove('--bootstrap') n.variable('configure_args', ' '.join(configure_args)) env_keys = set(['CXX', 'AR', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS']) configure_env = dict((k, os.environ[k]) for k in os.environ if k in env_keys) if configure_env: config_str = ' '.join([k + '=' + shlex.quote(configure_env[k]) for k in configure_env]) n.variable('configure_env', config_str + '$ ') n.newline() CXX = configure_env.get('CXX', 'c++') objext = '.o' if platform.is_msvc(): CXX = 'cl' objext = '.obj' def src(filename: str) -> str: return os.path.join('$root', 'src', filename) def built(filename: str) -> str: return os.path.join('$builddir', filename) def doc(filename: str) -> str: return os.path.join('$root', 'doc', filename) def cc(name: str, **kwargs: Any) -> List[str]: return n.build(built(name + objext), 'cxx', src(name + '.c'), **kwargs) def cxx(name: str, **kwargs: Any) -> List[str]: return n.build(built(name + objext), 'cxx', src(name + '.cc'), **kwargs) def binary(name: str) -> str: if platform.is_windows(): exe = name + '.exe' n.build(name, 'phony', exe) return exe return name root = sourcedir if root == os.getcwd(): # In the common case where we're building directly in the source # tree, simplify all the paths to just be cwd-relative. root = '.' n.variable('root', root) n.variable('builddir', 'build') n.variable('cxx', CXX) if platform.is_msvc(): n.variable('ar', 'link') else: n.variable('ar', configure_env.get('AR', 'ar')) def search_system_path(file_name: str) -> Optional[str]: # type: ignore # Missing return statement """Find a file in the system path.""" for dir in os.environ['path'].split(';'): path = os.path.join(dir, file_name) if os.path.exists(path): return path # Note that build settings are separately specified in CMakeLists.txt and # these lists should be kept in sync. if platform.is_msvc(): if not search_system_path('cl.exe'): raise Exception('cl.exe not found. Run again from the Developer Command Prompt for VS') cflags = ['/showIncludes', '/nologo', # Don't print startup banner. '/utf-8', '/Zi', # Create pdb with debug info. '/W4', # Highest warning level. '/WX', # Warnings as errors. '/wd4530', '/wd4100', '/wd4706', '/wd4244', '/wd4512', '/wd4800', '/wd4702', # Disable warnings about constant conditional expressions. '/wd4127', # Disable warnings about passing "this" during initialization. '/wd4355', # Disable warnings about ignored typedef in DbgHelp.h '/wd4091', '/GR-', # Disable RTTI. '/Zc:__cplusplus', # Disable size_t -> int truncation warning. # We never have strings or arrays larger than 2**31. '/wd4267', '/DNOMINMAX', '/D_CRT_SECURE_NO_WARNINGS', '/D_HAS_EXCEPTIONS=0', '/DNINJA_PYTHON="%s"' % options.with_python] if options.warnings_as_errors: cflags.append('/WX') if platform.msvc_needs_fs(): cflags.append('/FS') ldflags = ['/DEBUG', '/libpath:$builddir'] if not options.debug: cflags += ['/Ox', '/DNDEBUG', '/GL'] ldflags += ['/LTCG', '/OPT:REF', '/OPT:ICF'] else: cflags = ['-g', '-Wall', '-Wextra', '-Wno-deprecated', '-Wno-missing-field-initializers', '-Wno-unused-parameter', '-fno-rtti', '-fno-exceptions', '-std=c++14', '-fvisibility=hidden', '-pipe', '-DNINJA_PYTHON="%s"' % options.with_python] if options.warnings_as_errors: cflags += [ "-Werror" ] if options.debug: cflags += ['-D_GLIBCXX_DEBUG', '-D_GLIBCXX_DEBUG_PEDANTIC'] cflags.remove('-fno-rtti') # Needed for above pedanticness. else: cflags += ['-O2', '-DNDEBUG'] try: proc = subprocess.Popen( [CXX, '-fdiagnostics-color', '-c', '-x', 'c++', '/dev/null', '-o', '/dev/null'], stdout=open(os.devnull, 'wb'), stderr=subprocess.STDOUT) if proc.wait() == 0: cflags += ['-fdiagnostics-color'] except: pass if platform.is_mingw(): cflags += ['-D_WIN32_WINNT=0x0601', '-D__USE_MINGW_ANSI_STDIO=1'] ldflags = ['-L$builddir'] if platform.uses_usr_local(): cflags.append('-I/usr/local/include') ldflags.append('-L/usr/local/lib') if platform.is_aix(): # printf formats for int64_t, uint64_t; large file support cflags.append('-D__STDC_FORMAT_MACROS') cflags.append('-D_LARGE_FILES') libs = [] if platform.is_mingw(): cflags.remove('-fvisibility=hidden'); ldflags.append('-static') elif platform.is_solaris(): cflags.remove('-fvisibility=hidden') elif platform.is_aix(): cflags.remove('-fvisibility=hidden') elif platform.is_msvc(): pass else: if options.profile == 'gmon': cflags.append('-pg') ldflags.append('-pg') elif options.profile == 'pprof': cflags.append('-fno-omit-frame-pointer') libs.extend(['-Wl,--no-as-needed', '-lprofiler']) if platform.supports_ppoll() and not options.force_pselect: cflags.append('-DUSE_PPOLL') if platform.supports_ninja_browse(): cflags.append('-DNINJA_HAVE_BROWSE') # Search for generated headers relative to build dir. cflags.append('-I.') def shell_escape(str: str) -> str: """Escape str such that it's interpreted as a single argument by the shell.""" # This isn't complete, but it's just enough to make NINJA_PYTHON work. if platform.is_windows(): return str if '"' in str: return "'%s'" % str.replace("'", "\\'") return str if 'CFLAGS' in configure_env: cflags.append(configure_env['CFLAGS']) ldflags.append(configure_env['CFLAGS']) if 'CXXFLAGS' in configure_env: cflags.append(configure_env['CXXFLAGS']) ldflags.append(configure_env['CXXFLAGS']) n.variable('cflags', ' '.join(shell_escape(flag) for flag in cflags)) if 'LDFLAGS' in configure_env: ldflags.append(configure_env['LDFLAGS']) n.variable('ldflags', ' '.join(shell_escape(flag) for flag in ldflags)) n.newline() if platform.is_msvc(): n.rule('cxx', command='$cxx $cflags -c $in /Fo$out /Fd' + built('$pdb'), description='CXX $out', deps='msvc' # /showIncludes is included in $cflags. ) else: n.rule('cxx', command='$cxx -MMD -MT $out -MF $out.d $cflags -c $in -o $out', depfile='$out.d', deps='gcc', description='CXX $out') n.newline() if host.is_msvc(): n.rule('ar', command='lib /nologo /ltcg /out:$out $in', description='LIB $out') elif host.is_mingw(): n.rule('ar', command='$ar crs $out $in', description='AR $out') else: n.rule('ar', command='rm -f $out && $ar crs $out $in', description='AR $out') n.newline() if platform.is_msvc(): n.rule('link', command='$cxx $in $libs /nologo /link $ldflags /out:$out', description='LINK $out') else: n.rule('link', command='$cxx $ldflags -o $out $in $libs', description='LINK $out') n.newline() objs = [] if platform.supports_ninja_browse(): n.comment('browse_py.h is used to inline browse.py.') n.rule('inline', command='"%s"' % src('inline.sh') + ' $varname < $in > $out', description='INLINE $out') n.build(built('browse_py.h'), 'inline', src('browse.py'), implicit=src('inline.sh'), variables=[('varname', 'kBrowsePy')]) n.newline() objs += cxx('browse', order_only=built('browse_py.h')) n.newline() n.comment('the depfile parser and ninja lexers are generated using re2c.') def has_re2c() -> bool: try: proc = subprocess.Popen(['re2c', '-V'], stdout=subprocess.PIPE) return int(proc.communicate()[0], 10) >= 1503 except OSError: return False if has_re2c(): n.rule('re2c', command='re2c -b -i --no-generation-date --no-version -o $out $in', description='RE2C $out') # Generate the .cc files in the source directory so we can check them in. n.build(src('depfile_parser.cc'), 're2c', src('depfile_parser.in.cc')) n.build(src('lexer.cc'), 're2c', src('lexer.in.cc')) else: print("warning: A compatible version of re2c (>= 0.15.3) was not found; " "changes to src/*.in.cc will not affect your build.") n.newline() cxxvariables = [] if platform.is_msvc(): cxxvariables = [('pdb', 'ninja.pdb')] n.comment('Generate a library for `ninja-re2c`.') re2c_objs = [] for name in ['depfile_parser', 'lexer']: re2c_objs += cxx(name, variables=cxxvariables) if platform.is_msvc(): n.build(built('ninja-re2c.lib'), 'ar', re2c_objs) else: n.build(built('libninja-re2c.a'), 'ar', re2c_objs) n.newline() n.comment('Core source files all build into ninja library.') objs.extend(re2c_objs) for name in ['build', 'build_log', 'clean', 'clparser', 'debug_flags', 'deps_log', 'disk_interface', 'dyndep', 'dyndep_parser', 'edit_distance', 'elide_middle', 'eval_env', 'graph', 'graphviz', 'jobserver', 'json', 'line_printer', 'manifest_parser', 'metrics', 'missing_deps', 'parser', 'real_command_runner', 'state', 'status_printer', 'string_piece_util', 'util', 'version']: objs += cxx(name, variables=cxxvariables) if platform.is_windows(): for name in ['subprocess-win32', 'includes_normalize-win32', 'jobserver-win32', 'msvc_helper-win32', 'msvc_helper_main-win32']: objs += cxx(name, variables=cxxvariables) if platform.is_msvc(): objs += cxx('minidump-win32', variables=cxxvariables) objs += cc('getopt') else: for name in ['jobserver-posix', 'subprocess-posix']: objs += cxx(name, variables=cxxvariables) if platform.is_aix(): objs += cc('getopt') if platform.is_msvc(): ninja_lib = n.build(built('ninja.lib'), 'ar', objs) else: ninja_lib = n.build(built('libninja.a'), 'ar', objs) n.newline() if platform.is_msvc(): libs.append('ninja.lib') else: libs.append('-lninja') if platform.is_aix() and not platform.is_os400_pase(): libs.append('-lperfstat') all_targets = [] n.comment('Main executable is library plus main() function.') objs = cxx('ninja', variables=cxxvariables) ninja = n.build(binary('ninja'), 'link', objs, implicit=ninja_lib, variables=[('libs', libs)]) n.newline() all_targets += ninja if options.bootstrap: # We've built the ninja binary. Don't run any more commands # through the bootstrap executor, but continue writing the # build.ninja file. n = ninja_writer # Build the ninja_test executable only if the GTest source directory # is provided explicitly. Either from the environment with GTEST_SOURCE_DIR # or with the --gtest-source-dir command-line option. # # Do not try to look for an installed binary version, and link against it # because doing so properly is platform-specific (use the CMake build for # this). if options.gtest_source_dir: gtest_src_dir = options.gtest_source_dir else: gtest_src_dir = os.environ.get('GTEST_SOURCE_DIR') if gtest_src_dir: # Verify GoogleTest source directory, and add its include directory # to the global include search path (even for non-test sources) to # keep the build plan generation simple. gtest_all_cc = os.path.join(gtest_src_dir, 'googletest', 'src', 'gtest-all.cc') if not os.path.exists(gtest_all_cc): print('ERROR: Missing GoogleTest source file: %s' % gtest_all_cc) sys.exit(1) n.comment('Tests all build into ninja_test executable.') # Test-specific version of cflags, must include the GoogleTest # include directory. test_cflags = cflags.copy() test_cflags.append('-I' + os.path.join(gtest_src_dir, 'googletest', 'include')) test_variables = [('cflags', test_cflags)] if platform.is_msvc(): test_variables += [('pdb', 'ninja_test.pdb')] test_names = [ 'build_log_test', 'build_test', 'clean_test', 'clparser_test', 'depfile_parser_test', 'deps_log_test', 'disk_interface_test', 'dyndep_parser_test', 'edit_distance_test', 'elide_middle_test', 'explanations_test', 'graph_test', 'jobserver_test', 'json_test', 'lexer_test', 'manifest_parser_test', 'ninja_test', 'state_test', 'string_piece_util_test', 'subprocess_test', 'test', 'util_test', ] if platform.is_windows(): test_names += [ 'includes_normalize_test', 'msvc_helper_test', ] objs = [] for name in test_names: objs += cxx(name, variables=test_variables) # Build GTest as a monolithic source file. # This requires one extra include search path, so replace the # value of 'cflags' in our list. gtest_all_variables = test_variables[1:] + [ ('cflags', test_cflags + ['-I' + os.path.join(gtest_src_dir, 'googletest') ]), ] # Do not use cxx() directly to ensure the object file is under $builddir. objs += n.build(built('gtest_all' + objext), 'cxx', gtest_all_cc, variables=gtest_all_variables) ninja_test = n.build(binary('ninja_test'), 'link', objs, implicit=ninja_lib, variables=[('libs', libs)]) n.newline() all_targets += ninja_test n.comment('Ancillary executables.') if platform.is_aix() and '-maix64' not in ldflags: # Both hash_collision_bench and manifest_parser_perftest require more # memory than will fit in the standard 32-bit AIX shared stack/heap (256M) libs.append('-Wl,-bmaxdata:0x80000000') for name in ['build_log_perftest', 'canon_perftest', 'elide_middle_perftest', 'depfile_parser_perftest', 'hash_collision_bench', 'manifest_parser_perftest', 'clparser_perftest']: if platform.is_msvc(): cxxvariables = [('pdb', name + '.pdb')] objs = cxx(name, variables=cxxvariables) all_targets += n.build(binary(name), 'link', objs, implicit=ninja_lib, variables=[('libs', libs)]) n.newline() n.comment('Generate a graph using the "graph" tool.') n.rule('gendot', command='./ninja -t graph all > $out') n.rule('gengraph', command='dot -Tpng $in > $out') dot = n.build(built('graph.dot'), 'gendot', ['ninja', 'build.ninja']) n.build('graph.png', 'gengraph', dot) n.newline() n.comment('Generate the manual using asciidoc.') n.rule('asciidoc', command='asciidoc -b docbook -d book -o $out $in', description='ASCIIDOC $out') n.rule('xsltproc', command='xsltproc --nonet doc/docbook.xsl $in > $out', description='XSLTPROC $out') docbookxml = n.build(built('manual.xml'), 'asciidoc', doc('manual.asciidoc')) manual = n.build(doc('manual.html'), 'xsltproc', docbookxml, implicit=[doc('style.css'), doc('docbook.xsl')]) n.build('manual', 'phony', order_only=manual) n.newline() n.rule('dblatex', command='dblatex -q -o $out -p doc/dblatex.xsl $in', description='DBLATEX $out') n.build(doc('manual.pdf'), 'dblatex', docbookxml, implicit=[doc('dblatex.xsl')]) n.comment('Generate Doxygen.') n.rule('doxygen', command='doxygen $in', description='DOXYGEN $in') n.variable('doxygen_mainpage_generator', src('gen_doxygen_mainpage.sh')) n.rule('doxygen_mainpage', command='$doxygen_mainpage_generator $in > $out', description='DOXYGEN_MAINPAGE $out') mainpage = n.build(built('doxygen_mainpage'), 'doxygen_mainpage', ['README.md', 'COPYING'], implicit=['$doxygen_mainpage_generator']) n.build('doxygen', 'doxygen', doc('doxygen.config'), implicit=mainpage) n.newline() if not host.is_mingw(): n.comment('Regenerate build files if build script changes.') n.rule('configure', command='${configure_env}%s $root/configure.py $configure_args' % options.with_python, generator=True) n.build('build.ninja', 'configure', implicit=['$root/configure.py', os.path.normpath('$root/misc/ninja_syntax.py')]) n.newline() n.default(ninja) n.newline() if host.is_linux(): n.comment('Packaging') n.rule('rpmbuild', command="misc/packaging/rpmbuild.sh", description='Building rpms..') n.build('rpm', 'rpmbuild') n.newline() n.build('all', 'phony', all_targets) n.close() # type: ignore # Item "Bootstrap" of "Writer | Bootstrap" has no attribute "close" print('wrote %s.' % BUILD_FILENAME) if options.bootstrap: print('bootstrap complete. rebuilding...') rebuild_args = [] if platform.can_rebuild_in_place(): rebuild_args.append('./ninja') else: if platform.is_windows(): bootstrap_exe = 'ninja.bootstrap.exe' final_exe = 'ninja.exe' else: bootstrap_exe = './ninja.bootstrap' final_exe = './ninja' if os.path.exists(bootstrap_exe): os.unlink(bootstrap_exe) os.rename(final_exe, bootstrap_exe) rebuild_args.append(bootstrap_exe) if options.verbose: rebuild_args.append('-v') subprocess.check_call(rebuild_args) ninja-1.13.2/doc/000077500000000000000000000000001510764045400134505ustar00rootroot00000000000000ninja-1.13.2/doc/README.md000066400000000000000000000011101510764045400147200ustar00rootroot00000000000000This directory contains the Ninja manual and support files used in building it. Here's a brief overview of how it works. The source text, `manual.asciidoc`, is written in the AsciiDoc format. AsciiDoc can generate HTML but it doesn't look great; instead, we use AsciiDoc to generate the Docbook XML format and then provide our own Docbook XSL tweaks to produce HTML from that. In theory using AsciiDoc and DocBook allows us to produce nice PDF documentation etc. In reality it's not clear anyone wants that, but the build rules are in place to generate it if you install dblatex. ninja-1.13.2/doc/dblatex.xsl000066400000000000000000000006221510764045400156230ustar00rootroot00000000000000 0 0 ninja-1.13.2/doc/docbook.xsl000066400000000000000000000026641510764045400156300ustar00rootroot00000000000000 ]> book toc 0 1 ul ninja-1.13.2/doc/doxygen.config000066400000000000000000001436321510764045400163250ustar00rootroot00000000000000# Doxyfile 1.4.5 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project # # All text after a hash (#) is considered a comment and will be ignored # The format is: # TAG = value [value, ...] # For lists items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (" ") #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # The PROJECT_NAME tag is a single word (or a sequence of words surrounded # by quotes) that should identify the project. PROJECT_NAME = "Ninja" # The PROJECT_NUMBER tag can be used to enter a project or revision number. # This could be handy for archiving the generated documentation or # if some version control system is used. # PROJECT_NUMBER = "0" # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) # base path where the generated documentation will be put. # If a relative path is entered, it will be relative to the location # where doxygen was started. If left blank the current directory will be used. OUTPUT_DIRECTORY = "doc/doxygen/" # If the CREATE_SUBDIRS tag is set to YES, then doxygen will create # 4096 sub-directories (in 2 levels) under the output directory of each output # format and will distribute the generated files over these directories. # Enabling this option can be useful when feeding doxygen a huge amount of # source files, where putting all generated files in the same directory would # otherwise cause performance problems for the file system. CREATE_SUBDIRS = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. # The default language is English, other supported languages are: # Brazilian, Catalan, Chinese, Chinese-Traditional, Croatian, Czech, Danish, # Dutch, Finnish, French, German, Greek, Hungarian, Italian, Japanese, # Japanese-en (Japanese with English messages), Korean, Korean-en, Norwegian, # Polish, Portuguese, Romanian, Russian, Serbian, Slovak, Slovene, Spanish, # Swedish, and Ukrainian. OUTPUT_LANGUAGE = English # This tag can be used to specify the encoding used in the generated output. # The encoding is not always determined by the language that is chosen, # but also whether or not the output is meant for Windows or non-Windows users. # In case there is a difference, setting the USE_WINDOWS_ENCODING tag to YES # forces the Windows encoding (this is the default for the Windows binary), # whereas setting the tag to NO uses a Unix-style encoding (the default for # all platforms other than Windows). # Obsolet option. #USE_WINDOWS_ENCODING = YES # If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will # include brief member descriptions after the members that are listed in # the file and class documentation (similar to JavaDoc). # Set to NO to disable this. BRIEF_MEMBER_DESC = YES # If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend # the brief description of a member or function before the detailed description. # Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. REPEAT_BRIEF = YES # This tag implements a quasi-intelligent brief description abbreviator # that is used to form the text in various listings. Each string # in this list, if found as the leading text of the brief description, will be # stripped from the text and the result after processing the whole list, is # used as the annotated text. Otherwise, the brief description is used as-is. # If left blank, the following values are used ("$name" is automatically # replaced with the name of the entity): "The $name class" "The $name widget" # "The $name file" "is" "provides" "specifies" "contains" # "represents" "a" "an" "the" ABBREVIATE_BRIEF = # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then # Doxygen will generate a detailed section even if there is only a brief # description. ALWAYS_DETAILED_SEC = NO # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. INLINE_INHERITED_MEMB = YES # If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full # path before files name in the file list and in the header files. If set # to NO the shortest path that makes the file name unique will be used. FULL_PATH_NAMES = YES # If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag # can be used to strip a user-defined part of the path. Stripping is # only done if one of the specified strings matches the left-hand part of # the path. The tag can be used to show relative paths in the file list. # If left blank the directory from which doxygen is run is used as the # path to strip. STRIP_FROM_PATH = src # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of # the path mentioned in the documentation of a class, which tells # the reader which header file to include in order to use a class. # If left blank only the name of the header file containing the class # definition is used. Otherwise one should specify the include paths that # are normally passed to the compiler using the -I flag. STRIP_FROM_INC_PATH = src/ # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter # (but less readable) file names. This can be useful is your file systems # doesn't support long names like on DOS, Mac, or CD-ROM. SHORT_NAMES = NO # If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen # will interpret the first line (until the first dot) of a JavaDoc-style # comment as the brief description. If set to NO, the JavaDoc # comments will behave just like the Qt-style comments (thus requiring an # explicit @brief command for a brief description. JAVADOC_AUTOBRIEF = YES # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen # treat a multi-line C++ special comment block (i.e. a block of //! or /// # comments) as a brief description. This used to be the default behaviour. # The new default is to treat a multi-line C++ comment block as a detailed # description. Set this tag to YES if you prefer the old behaviour instead. MULTILINE_CPP_IS_BRIEF = NO # If the DETAILS_AT_TOP tag is set to YES then Doxygen # will output the detailed description near the top, like JavaDoc. # If set to NO, the detailed description appears after the member # documentation. # Has become obsolete. #DETAILS_AT_TOP = NO # If the INHERIT_DOCS tag is set to YES (the default) then an undocumented # member inherits the documentation from any documented member that it # re-implements. INHERIT_DOCS = YES # If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce # a new page for each member. If set to NO, the documentation of a member will # be part of the file/class/namespace that contains it. SEPARATE_MEMBER_PAGES = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. # Doxygen uses this value to replace tabs by spaces in code fragments. TAB_SIZE = 2 # This tag can be used to specify a number of aliases that acts # as commands in the documentation. An alias has the form "name=value". # For example adding "sideeffect=\par Side Effects:\n" will allow you to # put the command \sideeffect (or @sideeffect) in the documentation, which # will result in a user-defined paragraph with heading "Side Effects:". # You can put \n's in the value part of an alias to insert newlines. ALIASES = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C # sources only. Doxygen will then generate output that is more tailored for C. # For instance, some of the names that are used will be different. The list # of all members will be omitted, etc. OPTIMIZE_OUTPUT_FOR_C = NO # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java # sources only. Doxygen will then generate output that is more tailored for Java. # For instance, namespaces will be presented as packages, qualified scopes # will look different, etc. OPTIMIZE_OUTPUT_JAVA = NO # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want to # include (a tag file for) the STL sources as input, then you should # set this tag to YES in order to let doxygen match functions declarations and # definitions whose arguments contain STL classes (e.g. func(std::string); v.s. # func(std::string) {}). This also make the inheritance and collaboration # diagrams that involve STL classes more complete and accurate. # BUILTIN_STL_SUPPORT = NO # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES, then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. DISTRIBUTE_GROUP_DOC = NO # Set the SUBGROUPING tag to YES (the default) to allow class member groups of # the same type (for instance a group of public functions) to be put as a # subgroup of that type (e.g. under the Public Functions section). Set it to # NO to prevent subgrouping. Alternatively, this can be done per class using # the \nosubgrouping command. SUBGROUPING = YES #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in # documentation are documented, even if no documentation was available. # Private class members and static file members will be hidden unless # the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES EXTRACT_ALL = YES # If the EXTRACT_PRIVATE tag is set to YES all private members of a class # will be included in the documentation. EXTRACT_PRIVATE = YES # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. EXTRACT_STATIC = YES # If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) # defined locally in source files will be included in the documentation. # If set to NO only classes defined in header files are included. EXTRACT_LOCAL_CLASSES = YES # This flag is only useful for Objective-C code. When set to YES local # methods, which are defined in the implementation section but not in # the interface are included in the documentation. # If set to NO (the default) only methods in the interface are included. EXTRACT_LOCAL_METHODS = NO # If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all # undocumented members of documented classes, files or namespaces. # If set to NO (the default) these members will be included in the # various overviews, but no documentation section is generated. # This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_MEMBERS = NO # If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. # If set to NO (the default) these classes will be included in the various # overviews. This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_CLASSES = NO # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all # friend (class|struct|union) declarations. # If set to NO (the default) these declarations will be included in the # documentation. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any # documentation blocks found inside the body of a function. # If set to NO (the default) these blocks will be appended to the # function's detailed documentation block. HIDE_IN_BODY_DOCS = NO # The INTERNAL_DOCS tag determines if documentation # that is typed after a \internal command is included. If the tag is set # to NO (the default) then the documentation will be excluded. # Set it to YES to include the internal documentation. INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate # file names in lower-case letters. If set to YES upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # and Mac users are advised to set this option to NO. CASE_SENSE_NAMES = YES # If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen # will show members with their full class and namespace scopes in the # documentation. If set to YES the scope will be hidden. HIDE_SCOPE_NAMES = NO # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put a list of the files that are included by a file in the documentation # of that file. SHOW_INCLUDE_FILES = YES # If the INLINE_INFO tag is set to YES (the default) then a tag [inline] # is inserted in the documentation for inline members. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen # will sort the (detailed) documentation of file and class members # alphabetically by member name. If set to NO the members will appear in # declaration order. SORT_MEMBER_DOCS = YES # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the # brief documentation of file, namespace and class members alphabetically # by member name. If set to NO (the default) the members will appear in # declaration order. SORT_BRIEF_DOCS = YES # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be # sorted by fully-qualified names, including namespaces. If set to # NO (the default), the class list will be sorted only by class name, # not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. # Note: This option applies only to the class list, not to the # alphabetical list. SORT_BY_SCOPE_NAME = NO # The GENERATE_TODOLIST tag can be used to enable (YES) or # disable (NO) the todo list. This list is created by putting \todo # commands in the documentation. GENERATE_TODOLIST = YES # The GENERATE_TESTLIST tag can be used to enable (YES) or # disable (NO) the test list. This list is created by putting \test # commands in the documentation. GENERATE_TESTLIST = YES # The GENERATE_BUGLIST tag can be used to enable (YES) or # disable (NO) the bug list. This list is created by putting \bug # commands in the documentation. GENERATE_BUGLIST = YES # The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or # disable (NO) the deprecated list. This list is created by putting # \deprecated commands in the documentation. GENERATE_DEPRECATEDLIST= YES # The ENABLED_SECTIONS tag can be used to enable conditional # documentation sections, marked by \if sectionname ... \endif. ENABLED_SECTIONS = # The MAX_INITIALIZER_LINES tag determines the maximum number of lines # the initial value of a variable or define consists of for it to appear in # the documentation. If the initializer consists of more lines than specified # here it will be hidden. Use a value of 0 to hide initializers completely. # The appearance of the initializer of individual variables and defines in the # documentation can be controlled using \showinitializer or \hideinitializer # command in the documentation regardless of this setting. MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated # at the bottom of the documentation of classes and structs. If set to YES the # list will mention the files that were used to generate the documentation. SHOW_USED_FILES = YES # If the sources in your project are distributed over multiple directories # then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy # in the documentation. The default is YES. SHOW_DIRECTORIES = YES # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from the # version control system). Doxygen will invoke the program by executing (via # popen()) the command , where is the value of # the FILE_VERSION_FILTER tag, and is the name of an input file # provided by doxygen. Whatever the program writes to standard output # is used as the file version. See the manual for examples. FILE_VERSION_FILTER = #--------------------------------------------------------------------------- # configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated # by doxygen. Possible values are YES and NO. If left blank NO is used. QUIET = NO # The WARNINGS tag can be used to turn on/off the warning messages that are # generated by doxygen. Possible values are YES and NO. If left blank # NO is used. WARNINGS = YES # If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings # for undocumented members. If EXTRACT_ALL is set to YES then this flag will # automatically be disabled. WARN_IF_UNDOCUMENTED = YES # If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some # parameters in a documented function, or documenting parameters that # don't exist or using markup commands wrongly. WARN_IF_DOC_ERROR = YES # This WARN_NO_PARAMDOC option can be abled to get warnings for # functions that are documented, but have no documentation for their parameters # or return value. If set to NO (the default) doxygen will only warn about # wrong or incomplete parameter documentation, but not about the absence of # documentation. WARN_NO_PARAMDOC = NO # The WARN_FORMAT tag determines the format of the warning messages that # doxygen can produce. The string should contain the $file, $line, and $text # tags, which will be replaced by the file and line number from which the # warning originated and the warning text. Optionally the format may contain # $version, which will be replaced by the version of the file (if it could # be obtained via FILE_VERSION_FILTER) WARN_FORMAT = "$file:$line: $text " # The WARN_LOGFILE tag can be used to specify a file to which warning # and error messages should be written. If left blank the output is written # to stderr. WARN_LOGFILE = #--------------------------------------------------------------------------- # configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag can be used to specify the files and/or directories that contain # documented source files. You may enter file names like "myfile.cpp" or # directories like "/usr/src/myproject". Separate the files or directories # with spaces. INPUT = src \ build/doxygen_mainpage # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank the following patterns are tested: # *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx # *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py FILE_PATTERNS = *.cc \ *.h # The RECURSIVE tag can be used to turn specify whether or not subdirectories # should be searched for input files as well. Possible values are YES and NO. # If left blank NO is used. RECURSIVE = YES # The EXCLUDE tag can be used to specify files and/or directories that should # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. EXCLUDE = # The EXCLUDE_SYMLINKS tag can be used select whether or not files or # directories that are symbolic links (a Unix filesystem feature) are excluded # from the input. EXCLUDE_SYMLINKS = NO # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. Note that the wildcards are matched # against the file with absolute path, so to exclude all test directories # for example use the pattern */test/* EXCLUDE_PATTERNS = # The EXAMPLE_PATH tag can be used to specify one or more files or # directories that contain example code fragments that are included (see # the \include command). EXAMPLE_PATH = src # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank all files are included. EXAMPLE_PATTERNS = *.cpp \ *.cc \ *.h \ *.hh \ INSTALL DEPENDENCIES CHANGELOG LICENSE LGPL # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude # commands irrespective of the value of the RECURSIVE tag. # Possible values are YES and NO. If left blank NO is used. EXAMPLE_RECURSIVE = YES # The IMAGE_PATH tag can be used to specify one or more files or # directories that contain image that are included in the documentation (see # the \image command). IMAGE_PATH = src # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command , where # is the value of the INPUT_FILTER tag, and is the name of an # input file. Doxygen will then use the output that the filter program writes # to standard output. If FILTER_PATTERNS is specified, this tag will be # ignored. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. Doxygen will compare the file name with each pattern and apply the # filter if there is a match. The filters are a list of the form: # pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further # info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER # is applied to all files. FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER) will be used to filter the input files when producing source # files to browse (i.e. when SOURCE_BROWSER is set to YES). FILTER_SOURCE_FILES = NO #--------------------------------------------------------------------------- # configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will # be generated. Documented entities will be cross-referenced with these sources. # Note: To get rid of all source code in the generated output, make sure also # VERBATIM_HEADERS is set to NO. SOURCE_BROWSER = YES # Setting the INLINE_SOURCES tag to YES will include the body # of functions and classes directly in the documentation. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct # doxygen to hide any special comment blocks from generated source code # fragments. Normal C and C++ comments will always remain visible. STRIP_CODE_COMMENTS = NO # If the REFERENCED_BY_RELATION tag is set to YES (the default) # then for each documented function all documented # functions referencing it will be listed. REFERENCED_BY_RELATION = YES # If the REFERENCES_RELATION tag is set to YES (the default) # then for each documented function all documented entities # called/used by that function will be listed. REFERENCES_RELATION = YES # If the USE_HTAGS tag is set to YES then the references to source code # will point to the HTML generated by the htags(1) tool instead of doxygen # built-in source browser. The htags tool is part of GNU's global source # tagging system (see http://www.gnu.org/software/global/global.html). You # will need version 4.8.6 or higher. USE_HTAGS = NO # If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. VERBATIM_HEADERS = YES #--------------------------------------------------------------------------- # configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index # of all compounds will be generated. Enable this if the project # contains a lot of classes, structs, unions or interfaces. ALPHABETICAL_INDEX = YES # If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then # the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns # in which this list will be split (can be a number in the range [1..20]) COLS_IN_ALPHA_INDEX = 2 # In case all classes in a project start with a common prefix, all # classes will be put under the same header in the alphabetical index. # The IGNORE_PREFIX tag can be used to specify one or more prefixes that # should be ignored while generating the index headers. IGNORE_PREFIX = #--------------------------------------------------------------------------- # configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES (the default) Doxygen will # generate HTML output. GENERATE_HTML = YES # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `html' will be used as the default path. HTML_OUTPUT = html # The HTML_FILE_EXTENSION tag can be used to specify the file extension for # each generated HTML page (for example: .htm,.php,.asp). If it is left blank # doxygen will generate files with .html extension. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a personal HTML header for # each generated HTML page. If it is left blank doxygen will generate a # standard header. HTML_HEADER = # The HTML_FOOTER tag can be used to specify a personal HTML footer for # each generated HTML page. If it is left blank doxygen will generate a # standard footer. HTML_FOOTER = # The HTML_STYLESHEET tag can be used to specify a user-defined cascading # style sheet that is used by each HTML page. It can be used to # fine-tune the look of the HTML output. If the tag is left blank doxygen # will generate a default style sheet. Note that doxygen will try to copy # the style sheet file to the HTML output directory, so don't put your own # stylesheet in the HTML output directory as well, or it will be erased! HTML_STYLESHEET = # If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, # files or namespaces will be aligned in HTML using tables. If set to # NO a bullet list will be used. HTML_ALIGN_MEMBERS = YES # If the GENERATE_HTMLHELP tag is set to YES, additional index files # will be generated that can be used as input for tools like the # Microsoft HTML help workshop to generate a compressed HTML help file (.chm) # of the generated HTML documentation. GENERATE_HTMLHELP = YES # If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can # be used to specify the file name of the resulting .chm file. You # can add a path in front of the file if the result should not be # written to the html output directory. CHM_FILE = # If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can # be used to specify the location (absolute path including file name) of # the HTML help compiler (hhc.exe). If non-empty doxygen will try to run # the HTML help compiler on the generated index.hhp. HHC_LOCATION = # If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag # controls if a separate .chi index file is generated (YES) or that # it should be included in the master .chm file (NO). GENERATE_CHI = NO # If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag # controls whether a binary table of contents is generated (YES) or a # normal table of contents (NO) in the .chm file. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members # to the contents of the HTML help documentation and to the tree view. TOC_EXPAND = NO # The DISABLE_INDEX tag can be used to turn on/off the condensed index at # top of each HTML page. The value NO (the default) enables the index and # the value YES disables it. DISABLE_INDEX = NO # This tag can be used to set the number of enum values (range [1..20]) # that doxygen will group on one line in the generated HTML documentation. ENUM_VALUES_PER_LINE = 4 # If the GENERATE_TREEVIEW tag is set to YES, a side panel will be # generated containing a tree-like index structure (just like the one that # is generated for HTML Help). For this to work a browser that supports # JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+, # Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are # probably better off using the HTML help feature. GENERATE_TREEVIEW = YES # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be # used to set the initial width (in pixels) of the frame in which the tree # is shown. TREEVIEW_WIDTH = 250 #--------------------------------------------------------------------------- # configuration options related to the LaTeX output #--------------------------------------------------------------------------- # If the GENERATE_LATEX tag is set to YES (the default) Doxygen will # generate Latex output. GENERATE_LATEX = NO # The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `latex' will be used as the default path. LATEX_OUTPUT = latex # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be # invoked. If left blank `latex' will be used as the default command name. LATEX_CMD_NAME = # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to # generate index for LaTeX. If left blank `makeindex' will be used as the # default command name. MAKEINDEX_CMD_NAME = # If the COMPACT_LATEX tag is set to YES Doxygen generates more compact # LaTeX documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_LATEX = NO # The PAPER_TYPE tag can be used to set the paper type that is used # by the printer. Possible values are: a4, a4wide, letter, legal and # executive. If left blank a4wide will be used. PAPER_TYPE = a4 # The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX # packages that should be included in the LaTeX output. EXTRA_PACKAGES = # The LATEX_HEADER tag can be used to specify a personal LaTeX header for # the generated latex document. The header should contain everything until # the first chapter. If it is left blank doxygen will generate a # standard header. Notice: only use this tag if you know what you are doing! LATEX_HEADER = # If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated # is prepared for conversion to pdf (using ps2pdf). The pdf file will # contain links (just like the HTML output) instead of page references # This makes the output suitable for online browsing using a pdf viewer. PDF_HYPERLINKS = YES # If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of # plain latex in the generated Makefile. Set this option to YES to get a # higher quality PDF documentation. USE_PDFLATEX = YES # If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. # command to the generated LaTeX files. This will instruct LaTeX to keep # running if errors occur, instead of asking the user for help. # This option is also used when generating formulas in HTML. LATEX_BATCHMODE = YES # If LATEX_HIDE_INDICES is set to YES then doxygen will not # include the index chapters (such as File Index, Compound Index, etc.) # in the output. LATEX_HIDE_INDICES = NO #--------------------------------------------------------------------------- # configuration options related to the RTF output #--------------------------------------------------------------------------- # If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output # The RTF output is optimized for Word 97 and may not look very pretty with # other RTF readers or editors. GENERATE_RTF = NO # The RTF_OUTPUT tag is used to specify where the RTF docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `rtf' will be used as the default path. RTF_OUTPUT = rtf # If the COMPACT_RTF tag is set to YES Doxygen generates more compact # RTF documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_RTF = NO # If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated # will contain hyperlink fields. The RTF file will # contain links (just like the HTML output) instead of page references. # This makes the output suitable for online browsing using WORD or other # programs which support those fields. # Note: wordpad (write) and others do not support links. RTF_HYPERLINKS = NO # Load stylesheet definitions from file. Syntax is similar to doxygen's # config file, i.e. a series of assignments. You only have to provide # replacements, missing definitions are set to their default value. RTF_STYLESHEET_FILE = # Set optional variables used in the generation of an rtf document. # Syntax is similar to doxygen's config file. RTF_EXTENSIONS_FILE = #--------------------------------------------------------------------------- # configuration options related to the man page output #--------------------------------------------------------------------------- # If the GENERATE_MAN tag is set to YES (the default) Doxygen will # generate man pages GENERATE_MAN = NO # The MAN_OUTPUT tag is used to specify where the man pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `man' will be used as the default path. MAN_OUTPUT = man # The MAN_EXTENSION tag determines the extension that is added to # the generated man pages (default is the subroutine's section .3) MAN_EXTENSION = .3 # If the MAN_LINKS tag is set to YES and Doxygen generates man output, # then it will generate one additional man file for each entity # documented in the real man page(s). These additional files # only source the real man page, but without them the man command # would be unable to find the correct page. The default is NO. MAN_LINKS = NO #--------------------------------------------------------------------------- # configuration options related to the XML output #--------------------------------------------------------------------------- # If the GENERATE_XML tag is set to YES Doxygen will # generate an XML file that captures the structure of # the code including all documentation. GENERATE_XML = NO # The XML_OUTPUT tag is used to specify where the XML pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `xml' will be used as the default path. XML_OUTPUT = xml # The XML_SCHEMA tag can be used to specify an XML schema, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_SCHEMA = # The XML_DTD tag can be used to specify an XML DTD, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_DTD = # If the XML_PROGRAMLISTING tag is set to YES Doxygen will # dump the program listings (including syntax highlighting # and cross-referencing information) to the XML output. Note that # enabling this will significantly increase the size of the XML output. XML_PROGRAMLISTING = YES #--------------------------------------------------------------------------- # configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- # If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will # generate an AutoGen Definitions (see autogen.sf.net) file # that captures the structure of the code including all # documentation. Note that this feature is still experimental # and incomplete at the moment. GENERATE_AUTOGEN_DEF = NO #--------------------------------------------------------------------------- # configuration options related to the Perl module output #--------------------------------------------------------------------------- # If the GENERATE_PERLMOD tag is set to YES Doxygen will # generate a Perl module file that captures the structure of # the code including all documentation. Note that this # feature is still experimental and incomplete at the # moment. GENERATE_PERLMOD = NO # If the PERLMOD_LATEX tag is set to YES Doxygen will generate # the necessary Makefile rules, Perl scripts and LaTeX code to be able # to generate PDF and DVI output from the Perl module output. PERLMOD_LATEX = NO # If the PERLMOD_PRETTY tag is set to YES the Perl module output will be # nicely formatted so it can be parsed by a human reader. This is useful # if you want to understand what is going on. On the other hand, if this # tag is set to NO the size of the Perl module output will be much smaller # and Perl will parse it just the same. PERLMOD_PRETTY = YES # The names of the make variables in the generated doxyrules.make file # are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. # This is useful so different doxyrules.make files included by the same # Makefile don't overwrite each other's variables. PERLMOD_MAKEVAR_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the preprocessor #--------------------------------------------------------------------------- # If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will # evaluate all C-preprocessor directives found in the sources and include # files. ENABLE_PREPROCESSING = YES # If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro # names in the source code. If set to NO (the default) only conditional # compilation will be performed. Macro expansion can be done in a controlled # way by setting EXPAND_ONLY_PREDEF to YES. MACRO_EXPANSION = YES # If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES # then the macro expansion is limited to the macros specified with the # PREDEFINED and EXPAND_AS_DEFINED tags. EXPAND_ONLY_PREDEF = YES # If the SEARCH_INCLUDES tag is set to YES (the default) the includes files # in the INCLUDE_PATH (see below) will be search if a #include is found. SEARCH_INCLUDES = YES # The INCLUDE_PATH tag can be used to specify one or more directories that # contain include files that are not input files but should be processed by # the preprocessor. INCLUDE_PATH = # You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard # patterns (like *.h and *.hpp) to filter out the header-files in the # directories. If left blank, the patterns specified with FILE_PATTERNS will # be used. INCLUDE_FILE_PATTERNS = # The PREDEFINED tag can be used to specify one or more macro names that # are defined before the preprocessor is started (similar to the -D option of # gcc). The argument of the tag is a list of macros of the form: name # or name=definition (no spaces). If the definition and the = are # omitted =1 is assumed. To prevent a macro definition from being # undefined via #undef or recursively expanded use the := operator # instead of the = operator. PREDEFINED = # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then # this tag can be used to specify a list of macro names that should be expanded. # The macro definition that is found in the sources will be used. # Use the PREDEFINED tag if you want to use a different macro definition. EXPAND_AS_DEFINED = # If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then # doxygen's preprocessor will remove all function-like macros that are alone # on a line, have an all uppercase name, and do not end with a semicolon. Such # function macros are typically used for boiler-plate code, and will confuse # the parser if not removed. SKIP_FUNCTION_MACROS = YES #--------------------------------------------------------------------------- # Configuration::additions related to external references #--------------------------------------------------------------------------- # The TAGFILES option can be used to specify one or more tagfiles. # Optionally an initial location of the external documentation # can be added for each tagfile. The format of a tag file without # this location is as follows: # TAGFILES = file1 file2 ... # Adding location for the tag files is done as follows: # TAGFILES = file1=loc1 "file2 = loc2" ... # where "loc1" and "loc2" can be relative or absolute paths or # URLs. If a location is present for each tag, the installdox tool # does not have to be run to correct the links. # Note that each tag file must have a unique name # (where the name does NOT include the path) # If a tag file is not located in the directory in which doxygen # is run, you must also specify the path to the tagfile here. TAGFILES = # When a file name is specified after GENERATE_TAGFILE, doxygen will create # a tag file that is based on the input files it reads. GENERATE_TAGFILE = doc/doxygen/html/Ninja.TAGFILE # If the ALLEXTERNALS tag is set to YES all external classes will be listed # in the class index. If set to NO only the inherited external classes # will be listed. ALLEXTERNALS = YES # If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed # in the modules index. If set to NO, only the current project's groups will # be listed. EXTERNAL_GROUPS = YES # The PERL_PATH should be the absolute path and name of the perl script # interpreter (i.e. the result of `which perl'). PERL_PATH = /usr/bin/perl #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- # If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will # generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base # or super classes. Setting the tag to NO turns the diagrams off. Note that # this option is superseded by the HAVE_DOT option below. This is only a # fallback. It is recommended to install and use dot, since it yields more # powerful graphs. CLASS_DIAGRAMS = YES # If set to YES, the inheritance and collaboration graphs will hide # inheritance and usage relations if the target is undocumented # or is not a class. HIDE_UNDOC_RELATIONS = YES # If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is # available from the path. This tool is part of Graphviz, a graph visualization # toolkit from AT&T and Lucent Bell Labs. The other options in this section # have no effect if this option is set to NO (the default) HAVE_DOT = YES # If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect inheritance relations. Setting this tag to YES will force the # the CLASS_DIAGRAMS tag to NO. CLASS_GRAPH = YES # If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect implementation dependencies (inheritance, containment, and # class references variables) of the class with other documented classes. COLLABORATION_GRAPH = NO # If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen # will generate a graph for groups, showing the direct groups dependencies GROUP_GRAPHS = YES # If the UML_LOOK tag is set to YES doxygen will generate inheritance and # collaboration diagrams in a style similar to the OMG's Unified Modeling # Language. UML_LOOK = NO # UML_LOOK = YES # If set to YES, the inheritance and collaboration graphs will show the # relations between templates and their instances. TEMPLATE_RELATIONS = YES # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT # tags are set to YES then doxygen will generate a graph for each documented # file showing the direct and indirect include dependencies of the file with # other documented files. INCLUDE_GRAPH = YES # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and # HAVE_DOT tags are set to YES then doxygen will generate a graph for each # documented header file showing the documented files that directly or # indirectly include this file. INCLUDED_BY_GRAPH = YES # If the CALL_GRAPH and HAVE_DOT tags are set to YES then doxygen will # generate a call dependency graph for every global function or class method. # Note that enabling this option will significantly increase the time of a run. # So in most cases it will be better to enable call graphs for selected # functions only using the \callgraph command. CALL_GRAPH = NO # If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen # will graphical hierarchy of all classes instead of a textual one. GRAPHICAL_HIERARCHY = YES # If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES # then doxygen will show the dependencies a directory has on other directories # in a graphical way. The dependency relations are determined by the #include # relations between the files in the directories. DIRECTORY_GRAPH = YES # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images # generated by dot. Possible values are png, jpg, or gif # If left blank png will be used. DOT_IMAGE_FORMAT = png # The tag DOT_PATH can be used to specify the path where the dot tool can be # found. If left blank, it is assumed the dot tool can be found in the path. DOT_PATH = # The DOTFILE_DIRS tag can be used to specify one or more directories that # contain dot files that are included in the documentation (see the # \dotfile command). DOTFILE_DIRS = # The MAX_DOT_GRAPH_WIDTH tag can be used to set the maximum allowed width # (in pixels) of the graphs generated by dot. If a graph becomes larger than # this value, doxygen will try to truncate the graph, so that it fits within # the specified constraint. Beware that most browsers cannot cope with very # large images. # Obsolet option. #MAX_DOT_GRAPH_WIDTH = 1280 # The MAX_DOT_GRAPH_HEIGHT tag can be used to set the maximum allows height # (in pixels) of the graphs generated by dot. If a graph becomes larger than # this value, doxygen will try to truncate the graph, so that it fits within # the specified constraint. Beware that most browsers cannot cope with very # large images. # Obsolet option. #MAX_DOT_GRAPH_HEIGHT = 1024 # The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the # graphs generated by dot. A depth value of 3 means that only nodes reachable # from the root by following a path via at most 3 edges will be shown. Nodes # that lay further from the root node will be omitted. Note that setting this # option to 1 or 2 may greatly reduce the computation time needed for large # code bases. Also note that a graph may be further truncated if the graph's # image dimensions are not sufficient to fit the graph (see MAX_DOT_GRAPH_WIDTH # and MAX_DOT_GRAPH_HEIGHT). If 0 is used for the depth value (the default), # the graph is not depth-constrained. MAX_DOT_GRAPH_DEPTH = 0 # Set the DOT_TRANSPARENT tag to YES to generate images with a transparent # background. This is disabled by default, which results in a white background. # Warning: Depending on the platform used, enabling this option may lead to # badly anti-aliased labels on the edges of a graph (i.e. they become hard to # read). DOT_TRANSPARENT = NO # Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output # files in one run (i.e. multiple -o and -T options on the command line). This # makes dot run faster, but since only newer versions of dot (>1.8.10) # support this, this feature is disabled by default. # JW # DOT_MULTI_TARGETS = NO DOT_MULTI_TARGETS = YES # If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will # generate a legend page explaining the meaning of the various boxes and # arrows in the dot generated graphs. GENERATE_LEGEND = YES # If the DOT_CLEANUP tag is set to YES (the default) Doxygen will # remove the intermediate dot files that are used to generate # the various graphs. DOT_CLEANUP = YES #--------------------------------------------------------------------------- # Configuration::additions related to the search engine #--------------------------------------------------------------------------- # The SEARCHENGINE tag specifies whether or not a search engine should be # used. If set to NO the values of all tags below this one will be ignored. # JW SEARCHENGINE = NO SEARCHENGINE = YES ninja-1.13.2/doc/manual.asciidoc000066400000000000000000001445731510764045400164430ustar00rootroot00000000000000The Ninja build system ====================== v1.13.2, Nov 2025 Introduction ------------ Ninja is yet another build system. It takes as input the interdependencies of files (typically source code and output executables) and orchestrates building them, _quickly_. Ninja joins a sea of other build systems. Its distinguishing goal is to be fast. It is born from http://neugierig.org/software/chromium/notes/2011/02/ninja.html[my work on the Chromium browser project], which has over 30,000 source files and whose other build systems (including one built from custom non-recursive Makefiles) would take ten seconds to start building after changing one file. Ninja is under a second. Philosophical overview ~~~~~~~~~~~~~~~~~~~~~~ Where other build systems are high-level languages, Ninja aims to be an assembler. Build systems get slow when they need to make decisions. When you are in an edit-compile cycle you want it to be as fast as possible -- you want the build system to do the minimum work necessary to figure out what needs to be built immediately. Ninja contains the barest functionality necessary to describe arbitrary dependency graphs. Its lack of syntax makes it impossible to express complex decisions. Instead, Ninja is intended to be used with a separate program generating its input files. The generator program (like the `./configure` found in autotools projects) can analyze system dependencies and make as many decisions as possible up front so that incremental builds stay fast. Going beyond autotools, even build-time decisions like "which compiler flags should I use?" or "should I build a debug or release-mode binary?" belong in the `.ninja` file generator. Design goals ~~~~~~~~~~~~ Here are the design goals of Ninja: * very fast (i.e., instant) incremental builds, even for very large projects. * very little policy about how code is built. Different projects and higher-level build systems have different opinions about how code should be built; for example, should built objects live alongside the sources or should all build output go into a separate directory? Is there a "package" rule that builds a distributable package of the project? Sidestep these decisions by trying to allow either to be implemented, rather than choosing, even if that results in more verbosity. * get dependencies correct, and in particular situations that are difficult to get right with Makefiles (e.g. outputs need an implicit dependency on the command line used to generate them; to build C source code you need to use gcc's `-M` flags for header dependencies). * when convenience and speed are in conflict, prefer speed. Some explicit _non-goals_: * convenient syntax for writing build files by hand. _You should generate your ninja files using another program_. This is how we can sidestep many policy decisions. * built-in rules. _Out of the box, Ninja has no rules for e.g. compiling C code._ * build-time customization of the build. _Options belong in the program that generates the ninja files_. * build-time decision-making ability such as conditionals or search paths. _Making decisions is slow._ To restate, Ninja is faster than other build systems because it is painfully simple. You must tell Ninja exactly what to do when you create your project's `.ninja` files. Comparison to Make ~~~~~~~~~~~~~~~~~~ Ninja is closest in spirit and functionality to Make, relying on simple dependencies between file timestamps. But fundamentally, make has a lot of _features_: suffix rules, functions, built-in rules that e.g. search for RCS files when building source. Make's language was designed to be written by humans. Many projects find make alone adequate for their build problems. In contrast, Ninja has almost no features; just those necessary to get builds correct while punting most complexity to generation of the ninja input files. Ninja by itself is unlikely to be useful for most projects. Here are some of the features Ninja adds to Make. (These sorts of features can often be implemented using more complicated Makefiles, but they are not part of make itself.) * Ninja has special support for discovering extra dependencies at build time, making it easy to get <> correct for C/C++ code. * A build edge may have multiple outputs. * Outputs implicitly depend on the command line that was used to generate them, which means that changing e.g. compilation flags will cause the outputs to rebuild. * Output directories are always implicitly created before running the command that relies on them. * Rules can provide shorter descriptions of the command being run, so you can print e.g. `CC foo.o` instead of a long command line while building. * Builds are always run in parallel, based by default on the number of CPUs your system has. Underspecified build dependencies will result in incorrect builds. * Command output is always buffered. This means commands running in parallel don't interleave their output, and when a command fails we can print its failure output next to the full command line that produced the failure. Using Ninja for your project ---------------------------- Ninja currently works on Unix-like systems and Windows. It's seen the most testing on Linux (and has the best performance there) but it runs fine on Mac OS X and FreeBSD. If your project is small, Ninja's speed impact is likely unnoticeable. (However, even for small projects it sometimes turns out that Ninja's limited syntax forces simpler build rules that result in faster builds.) Another way to say this is that if you're happy with the edit-compile cycle time of your project already then Ninja won't help. There are many other build systems that are more user-friendly or featureful than Ninja itself. For some recommendations: the Ninja author found http://gittup.org/tup/[the tup build system] influential in Ninja's design, and thinks https://github.com/apenwarr/redo[redo]'s design is quite clever. Ninja's benefit comes from using it in conjunction with a smarter meta-build system. https://gn.googlesource.com/gn/[gn]:: The meta-build system used to generate build files for Google Chrome and related projects (v8, node.js), as well as Google Fuchsia. gn can generate Ninja files for all platforms supported by Chrome. https://cmake.org/[CMake]:: A widely used meta-build system that can generate Ninja files on Linux as of CMake version 2.8.8. Newer versions of CMake support generating Ninja files on Windows and Mac OS X too. https://github.com/ninja-build/ninja/wiki/List-of-generators-producing-ninja-build-files[others]:: Ninja ought to fit perfectly into other meta-build software like https://premake.github.io/[premake]. If you do this work, please let us know! Running Ninja ~~~~~~~~~~~~~ Run `ninja`. By default, it looks for a file named `build.ninja` in the current directory and builds all out-of-date targets. You can specify which targets (files) to build as command line arguments. There is also a special syntax `target^` for specifying a target as the first output of some rule containing the source you put in the command line, if one exists. For example, if you specify target as `foo.c^` then `foo.o` will get built (assuming you have those targets in your build files). `ninja -h` prints help output. Many of Ninja's flags intentionally match those of Make; e.g `ninja -C build -j 20` changes into the `build` directory and runs 20 build commands in parallel. (Note that Ninja defaults to running commands in parallel anyway, so typically you don't need to pass `-j`.) GNU Jobserver support ~~~~~~~~~~~~~~~~~~~~~ Since version 1.13., Ninja builds can follow the https://https://www.gnu.org/software/make/manual/html_node/Job-Slots.html[GNU Make jobserver] client protocol. This is useful when Ninja is invoked as part of a larger build system controlled by a top-level GNU Make instance, or any other jobserver pool implementation, as it allows better coordination between concurrent build tasks. This feature is automatically enabled under the following conditions: - Dry-run (i.e. `-n` or `--dry-run`) is not enabled. - No explicit job count (e.g. `-j`) is passed on the command line. - The `MAKEFLAGS` environment variable is defined and describes a valid jobserver mode using `--jobserver-auth=SEMAPHORE_NAME` on Windows, or `--jobserver-auth=fifo:PATH` on Posix. In this case, Ninja will use the jobserver pool of job slots to control parallelism, instead of its default parallel implementation. Note that load-average limitations (i.e. when using `-l`) are still being enforced in this mode. IMPORTANT: On Posix, only the FIFO-based version of the protocol, which is implemented by GNU Make 4.4 and higher, is supported. Ninja will detect when a pipe-based jobserver is being used (i.e. when `MAKEFLAGS` contains `--jobserver-auth=,`) and will print a warning, but will otherwise ignore it. Environment variables ~~~~~~~~~~~~~~~~~~~~~ Ninja supports two environment variables to control its behavior: `NINJA_STATUS`, the progress status printed before the rule being run. Several placeholders are available: `%s`:: The number of started edges. `%t`:: The total number of edges that must be run to complete the build. `%p`:: The percentage of finished edges. `%r`:: The number of currently running edges. `%u`:: The number of remaining edges to start. `%f`:: The number of finished edges. `%o`:: Overall rate of finished edges per second `%c`:: Current rate of finished edges per second (average over builds specified by `-j` or its default) `%e`:: Elapsed time in seconds. _(Available since Ninja 1.2.)_ `%E`:: Remaining time (ETA) in seconds. _(Available since Ninja 1.12.)_ `%w`:: Elapsed time in [h:]mm:ss format. _(Available since Ninja 1.12.)_ `%W`:: Remaining time (ETA) in [h:]mm:ss format. _(Available since Ninja 1.12.)_ `%P`:: The percentage (in ppp% format) of time elapsed out of predicted total runtime. _(Available since Ninja 1.12.)_ `%%`:: A plain `%` character. The default progress status is `"[%f/%t] "` (note the trailing space to separate from the build rule). Another example of possible progress status could be `"[%u/%r/%f] "`. If `MAKEFLAGS` is defined in the environment, if may alter how Ninja dispatches parallel build commands. See the GNU Jobserver support section for details. Extra tools ~~~~~~~~~~~ The `-t` flag on the Ninja command line runs some tools that we have found useful during Ninja's development. The current tools are: [horizontal] `query`:: dump the inputs and outputs of a given target. `browse`:: browse the dependency graph in a web browser. Clicking a file focuses the view on that file, showing inputs and outputs. This feature requires a Python installation. By default, port 8000 is used and a web browser will be opened. This can be changed as follows: + ---- ninja -t browse --port=8000 --no-browser mytarget ---- + `graph`:: output a file in the syntax used by `graphviz`, an automatic graph layout tool. Use it like: + ---- ninja -t graph mytarget | dot -Tpng -ograph.png ---- + In the Ninja source tree, `ninja graph.png` generates an image for Ninja itself. If no target is given generate a graph for all root targets. `targets`:: output a list of targets either by rule or by depth. If used like +ninja -t targets rule _name_+ it prints the list of targets using the given rule to be built. If no rule is given, it prints the source files (the leaves of the graph). If used like +ninja -t targets depth _digit_+ it prints the list of targets in a depth-first manner starting by the root targets (the ones with no outputs). Indentation is used to mark dependencies. If the depth is zero it prints all targets. If no arguments are provided +ninja -t targets depth 1+ is assumed. In this mode targets may be listed several times. If used like this +ninja -t targets all+ it prints all the targets available without indentation and it is faster than the _depth_ mode. `commands`:: given a list of targets, print a list of commands which, if executed in order, may be used to rebuild those targets, assuming that all output files are out of date. `inputs`:: given a list of targets, print a list of all inputs used to rebuild those targets. _Available since Ninja 1.11._ `multi-inputs`:: print one or more sets of inputs required to build targets. Each line will consist of a target, a delimiter, an input and a terminator character. The list produced by the tool can be helpful if one would like to know which targets that are affected by a certain input. + The output will be a series of lines with the following elements: + ---- ---- + The default `` is a single TAB character. The delimiter can be modified to any string using the `--delimiter` argument. + The default `` is a line terminator (i.e. `\n` on Posix and `\r\n` on Windows). The terminator can be changed to `\0` by using the `--print0` argument. + ---- ---- + Example usage of the `multi-inputs` tool: + ---- ninja -t multi-inputs target1 target2 target3 ---- + Example of produced output from the `multi-inputs` tool: + ---- target1 file1.c target2 file1.c target2 file2.c target3 file1.c target3 file2.c target3 file3.c ---- + _Note that a given input may appear for several targets if it is used by more than one targets._ _Available since Ninja 1.13._ `clean`:: remove built files. By default, it removes all built files except for those created by the generator. Adding the `-g` flag also removes built files created by the generator (see <>). Additional arguments are targets, which removes the given targets and recursively all files built for them. + If used like +ninja -t clean -r _rules_+ it removes all files built using the given rules. + Files created but not referenced in the graph are not removed. This tool takes in account the +-v+ and the +-n+ options (note that +-n+ implies +-v+). `cleandead`:: remove files produced by previous builds that are no longer in the build file. _Available since Ninja 1.10._ `compdb`:: given a list of rules, each of which is expected to be a C family language compiler rule whose first input is the name of the source file, prints on standard output a compilation database in the http://clang.llvm.org/docs/JSONCompilationDatabase.html[JSON format] expected by the Clang tooling interface. _Available since Ninja 1.2._ `compdb-targets`:: like `compdb`, but takes a list of targets instead of rules, and expects at least one target. The resulting compilation database contains all commands required to build the indicated targets, and _only_ those commands. `deps`:: show all dependencies stored in the `.ninja_deps` file. When given a target, show just the target's dependencies. _Available since Ninja 1.4._ `missingdeps`:: given a list of targets, look for targets that depend on a generated file, but do not have a properly (possibly transitive) dependency on the generator. Such targets may cause build flakiness on clean builds. + The broken targets can be found assuming deps log / depfile dependency information is correct. Any target that depends on a generated file (output of a generator-target) implicitly, but does not have an explicit or order-only dependency path to the generator-target, is considered broken. + The tool's findings can be verified by trying to build the listed targets in a clean outdir without building any other targets. The build should fail for each of them with a missing include error or equivalent pointing to the generated file. _Available since Ninja 1.11._ `recompact`:: recompact the `.ninja_deps` file. _Available since Ninja 1.4._ `restat`:: updates all recorded file modification timestamps in the `.ninja_log` file. _Available since Ninja 1.10._ `rules`:: output the list of all rules. It can be used to know which rule name to pass to +ninja -t targets rule _name_+ or +ninja -t compdb+. Adding the `-d` flag also prints the description of the rules. `msvc`:: Available on Windows hosts only. Helper tool to invoke the `cl.exe` compiler with a pre-defined set of environment variables, as in: + ---- ninja -t msvc -e ENVFILE -- cl.exe ---- + Where `ENVFILE` is a binary file that contains an environment block suitable for CreateProcessA() on Windows (i.e. a series of zero-terminated strings that look like NAME=VALUE, followed by an extra zero terminator). Note that this uses the local codepage encoding. + This tool also supports a deprecated way of parsing the compiler's output when the `/showIncludes` flag is used, and generating a GCC-compatible depfile from it: + ---- ninja -t msvc -o DEPFILE [-p STRING] -- cl.exe /showIncludes ---- + When using this option, `-p STRING` can be used to pass the localized line prefix that `cl.exe` uses to output dependency information. For English-speaking regions this is `"Note: including file: "` without the double quotes, but will be different for other regions. + Note that Ninja supports this natively now, with the use of `deps = msvc` and `msvc_deps_prefix` in Ninja files. Native support also avoids launching an extra tool process each time the compiler must be called, which can speed up builds noticeably on Windows. `wincodepage`:: Available on Windows hosts (_since Ninja 1.11_). Prints the Windows code page whose encoding is expected in the build file. The output has the form: + ---- Build file encoding: ---- + Additional lines may be added in future versions of Ninja. + The `` is one of: `UTF-8`::: Encode as UTF-8. `ANSI`::: Encode to the system-wide ANSI code page. Writing your own Ninja files ---------------------------- The remainder of this manual is only useful if you are constructing Ninja files yourself: for example, if you're writing a meta-build system or supporting a new language. Conceptual overview ~~~~~~~~~~~~~~~~~~~ Ninja evaluates a graph of dependencies between files, and runs whichever commands are necessary to make your build target up to date as determined by file modification times. If you are familiar with Make, Ninja is very similar. A build file (default name: `build.ninja`) provides a list of _rules_ -- short names for longer commands, like how to run the compiler -- along with a list of _build_ statements saying how to build files using the rules -- which rule to apply to which inputs to produce which outputs. Conceptually, `build` statements describe the dependency graph of your project, while `rule` statements describe how to generate the files along a given edge of the graph. Syntax example ~~~~~~~~~~~~~~ Here's a basic `.ninja` file that demonstrates most of the syntax. It will be used as an example for the following sections. --------------------------------- cflags = -Wall rule cc command = gcc $cflags -c $in -o $out build foo.o: cc foo.c --------------------------------- Variables ~~~~~~~~~ Despite the non-goal of being convenient to write by hand, to keep build files readable (debuggable), Ninja supports declaring shorter reusable names for strings. A declaration like the following ---------------- cflags = -g ---------------- can be used on the right side of an equals sign, dereferencing it with a dollar sign, like this: ---------------- rule cc command = gcc $cflags -c $in -o $out ---------------- Variables can also be referenced using curly braces like `${in}`. Variables might better be called "bindings", in that a given variable cannot be changed, only shadowed. There is more on how shadowing works later in this document. Rules ~~~~~ Rules declare a short name for a command line. They begin with a line consisting of the `rule` keyword and a name for the rule. Then follows an indented set of `variable = value` lines. The basic example above declares a new rule named `cc`, along with the command to run. In the context of a rule, the `command` variable defines the command to run, `$in` expands to the list of input files (`foo.c`), and `$out` to the output files (`foo.o`) for the command. A full list of special variables is provided in <>. Build statements ~~~~~~~~~~~~~~~~ Build statements declare a relationship between input and output files. They begin with the `build` keyword, and have the format +build _outputs_: _rulename_ _inputs_+. Such a declaration says that all of the output files are derived from the input files. When the output files are missing or when the inputs change, Ninja will run the rule to regenerate the outputs. The basic example above describes how to build `foo.o`, using the `cc` rule. In the scope of a `build` block (including in the evaluation of its associated `rule`), the variable `$in` is the list of inputs and the variable `$out` is the list of outputs. A build statement may be followed by an indented set of `key = value` pairs, much like a rule. These variables will shadow any variables when evaluating the variables in the command. For example: ---------------- cflags = -Wall -Werror rule cc command = gcc $cflags -c $in -o $out # If left unspecified, builds get the outer $cflags. build foo.o: cc foo.c # But you can shadow variables like cflags for a particular build. build special.o: cc special.c cflags = -Wall # The variable was only shadowed for the scope of special.o; # Subsequent build lines get the outer (original) cflags. build bar.o: cc bar.c ---------------- For more discussion of how scoping works, consult <>. If you need more complicated information passed from the build statement to the rule (for example, if the rule needs "the file extension of the first input"), pass that through as an extra variable, like how `cflags` is passed above. If the top-level Ninja file is specified as an output of any build statement and it is out of date, Ninja will rebuild and reload it before building the targets requested by the user. Generating Ninja files from code ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ `misc/ninja_syntax.py` in the Ninja distribution is a tiny Python module to facilitate generating Ninja files. It allows you to make Python calls like `ninja.rule(name='foo', command='bar', depfile='$out.d')` and it will generate the appropriate syntax. Feel free to just inline it into your project's build system if it's useful. More details ------------ The `phony` rule ~~~~~~~~~~~~~~~~ The special rule name `phony` can be used to create aliases for other targets. For example: ---------------- build foo: phony some/file/in/a/faraway/subdir/foo ---------------- This makes `ninja foo` build the longer path. Semantically, the `phony` rule is equivalent to a plain rule where the `command` does nothing, but phony rules are handled specially in that they aren't printed when run, logged (see below), nor do they contribute to the command count printed as part of the build process. When a `phony` target is used as an input to another build rule, the other build rule will, semantically, consider the inputs of the `phony` rule as its own. Therefore, `phony` rules can be used to group inputs, e.g. header files. `phony` can also be used to create dummy targets for files which may not exist at build time. If a phony build statement is written without any dependencies, the target will be considered out of date if it does not exist. Without a phony build statement, Ninja will report an error if the file does not exist and is required by the build. To create a rule that never rebuilds, use a build rule without any input: ---------------- rule touch command = touch $out build file_that_always_exists.dummy: touch build dummy_target_to_follow_a_pattern: phony file_that_always_exists.dummy ---------------- Default target statements ~~~~~~~~~~~~~~~~~~~~~~~~~ By default, if no targets are specified on the command line, Ninja will build every output that is not named as an input elsewhere. You can override this behavior using a default target statement. A default target statement causes Ninja to build only a given subset of output files if none are specified on the command line. Default target statements begin with the `default` keyword, and have the format +default _targets_+. A default target statement must appear after the build statement that declares the target as an output file. They are cumulative, so multiple statements may be used to extend the list of default targets. For example: ---------------- default foo bar default baz ---------------- This causes Ninja to build the `foo`, `bar` and `baz` targets by default. [[ref_log]] The Ninja log ~~~~~~~~~~~~~ For each built file, Ninja keeps a log of the command used to build it. Using this log Ninja can know when an existing output was built with a different command line than the build files specify (i.e., the command line changed) and knows to rebuild the file. The log file is kept in the build root in a file called `.ninja_log`. If you provide a variable named `builddir` in the outermost scope, `.ninja_log` will be kept in that directory instead. [[ref_versioning]] Version compatibility ~~~~~~~~~~~~~~~~~~~~~ _Available since Ninja 1.2._ Ninja version labels follow the standard major.minor.patch format, where the major version is increased on backwards-incompatible syntax/behavioral changes and the minor version is increased on new behaviors. Your `build.ninja` may declare a variable named `ninja_required_version` that asserts the minimum Ninja version required to use the generated file. For example, ----- ninja_required_version = 1.1 ----- declares that the build file relies on some feature that was introduced in Ninja 1.1 (perhaps the `pool` syntax), and that Ninja 1.1 or greater must be used to build. Unlike other Ninja variables, this version requirement is checked immediately when the variable is encountered in parsing, so it's best to put it at the top of the build file. Ninja always warns if the major versions of Ninja and the `ninja_required_version` don't match; a major version change hasn't come up yet so it's difficult to predict what behavior might be required. [[ref_headers]] C/C++ header dependencies ~~~~~~~~~~~~~~~~~~~~~~~~~ To get C/C++ header dependencies (or any other build dependency that works in a similar way) correct Ninja has some extra functionality. The problem with headers is that the full list of files that a given source file depends on can only be discovered by the compiler: different preprocessor defines and include paths cause different files to be used. Some compilers can emit this information while building, and Ninja can use that to get its dependencies perfect. Consider: if the file has never been compiled, it must be built anyway, generating the header dependencies as a side effect. If any file is later modified (even in a way that changes which headers it depends on) the modification will cause a rebuild as well, keeping the dependencies up to date. When loading these special dependencies, Ninja implicitly adds extra build edges such that it is not an error if the listed dependency is missing. This allows you to delete a header file and rebuild without the build aborting due to a missing input. depfile ^^^^^^^ `gcc` (and other compilers like `clang`) support emitting dependency information in the syntax of a Makefile. (Any command that can write dependencies in this form can be used, not just `gcc`.) To bring this information into Ninja requires cooperation. On the Ninja side, the `depfile` attribute on the `build` must point to a path where this data is written. (Ninja only supports the limited subset of the Makefile syntax emitted by compilers.) Then the command must know to write dependencies into the `depfile` path. Use it like in the following example: ---- rule cc depfile = $out.d command = gcc -MD -MF $out.d [other gcc flags here] ---- The `-MD` flag to `gcc` tells it to output header dependencies, and the `-MF` flag tells it where to write them. deps ^^^^ _(Available since Ninja 1.3.)_ It turns out that for large projects (and particularly on Windows, where the file system is slow) loading these dependency files on startup is slow. Ninja 1.3 can instead process dependencies just after they're generated and save a compacted form of the same information in a Ninja-internal database. Ninja supports this processing in two forms. 1. `deps = gcc` specifies that the tool outputs `gcc`-style dependencies in the form of Makefiles. Adding this to the above example will cause Ninja to process the `depfile` immediately after the compilation finishes, then delete the `.d` file (which is only used as a temporary). 2. `deps = msvc` specifies that the tool outputs header dependencies in the form produced by the Visual Studio compiler's http://msdn.microsoft.com/en-us/library/hdkef6tk(v=vs.90).aspx[`/showIncludes` flag]. Briefly, this means the tool outputs specially-formatted lines to its stdout. Ninja then filters these lines from the displayed output. No `depfile` attribute is necessary, but the localized string in front of the header file path should be globally defined. For instance, `msvc_deps_prefix = Note: including file:` for an English Visual Studio (the default). + ---- msvc_deps_prefix = Note: including file: rule cc deps = msvc command = cl /showIncludes -c $in /Fo$out ---- If the include directory directives are using absolute paths, your depfile may result in a mixture of relative and absolute paths. Paths used by other build rules need to match exactly. Therefore, it is recommended to use relative paths in these cases. [[ref_pool]] Pools ~~~~~ _Available since Ninja 1.1._ Pools allow you to allocate one or more rules or edges a finite number of concurrent jobs which is more tightly restricted than the default parallelism. This can be useful, for example, to restrict a particular expensive rule (like link steps for huge executables), or to restrict particular build statements which you know perform poorly when run concurrently. Each pool has a `depth` variable which is specified in the build file. The pool is then referred to with the `pool` variable on either a rule or a build statement. No matter what pools you specify, ninja will never run more concurrent jobs than the default parallelism, or the number of jobs specified on the command line (with `-j`). ---------------- # No more than 4 links at a time. pool link_pool depth = 4 # No more than 1 heavy object at a time. pool heavy_object_pool depth = 1 rule link ... pool = link_pool rule cc ... # The link_pool is used here. Only 4 links will run concurrently. build foo.exe: link input.obj # A build statement can be exempted from its rule's pool by setting an # empty pool. This effectively puts the build statement back into the default # pool, which has infinite depth. build other.exe: link input.obj pool = # A build statement can specify a pool directly. # Only one of these builds will run at a time. build heavy_object1.obj: cc heavy_obj1.cc pool = heavy_object_pool build heavy_object2.obj: cc heavy_obj2.cc pool = heavy_object_pool ---------------- The `console` pool ^^^^^^^^^^^^^^^^^^ _Available since Ninja 1.5._ There exists a pre-defined pool named `console` with a depth of 1. It has the special property that any task in the pool has direct access to the standard input, output and error streams provided to Ninja, which are normally connected to the user's console (hence the name) but could be redirected. This can be useful for interactive tasks or long-running tasks which produce status updates on the console (such as test suites). While a task in the `console` pool is running, Ninja's regular output (such as progress status and output from concurrent tasks) is buffered until it completes. [[ref_ninja_file]] Ninja file reference -------------------- A file is a series of declarations. A declaration can be one of: 1. A rule declaration, which begins with +rule _rulename_+, and then has a series of indented lines defining variables. 2. A build edge, which looks like +build _output1_ _output2_: _rulename_ _input1_ _input2_+. + Implicit dependencies may be tacked on the end with +| _dependency1_ _dependency2_+. + Order-only dependencies may be tacked on the end with +|| _dependency1_ _dependency2_+. (See <>.) Validations may be tacked on the end with +|@ _validation1_ _validation2_+. (See <>.) + Implicit outputs _(available since Ninja 1.7)_ may be added before the `:` with +| _output1_ _output2_+ and do not appear in `$out`. (See <>.) 3. Variable declarations, which look like +_variable_ = _value_+. 4. Default target statements, which look like +default _target1_ _target2_+. 5. References to more files, which look like +subninja _path_+ or +include _path_+. The difference between these is explained below <>. 6. A pool declaration, which looks like +pool _poolname_+. Pools are explained <>. [[ref_lexer]] Lexical syntax ~~~~~~~~~~~~~~ Ninja is mostly encoding agnostic, as long as the bytes Ninja cares about (like slashes in paths) are ASCII. This means e.g. UTF-8 or ISO-8859-1 input files ought to work. Comments begin with `#` and extend to the end of the line. Newlines are significant. Statements like `build foo bar` are a set of space-separated tokens that end at the newline. Newlines and spaces within a token must be escaped. There is only one escape character, `$`, and it has the following behaviors: `$` followed by a newline:: escape the newline (continue the current line across a line break). `$` followed by text:: a variable reference. `${varname}`:: alternate syntax for `$varname`. `$` followed by space:: a space. (This is only necessary in lists of paths, where a space would otherwise separate filenames. See below.) `$:` :: a colon. (This is only necessary in `build` lines, where a colon would otherwise terminate the list of outputs.) `$$`:: a literal `$`. A `build` or `default` statement is first parsed as a space-separated list of filenames and then each name is expanded. This means that spaces within a variable will result in spaces in the expanded filename. ---- spaced = foo bar build $spaced/baz other$ file: ... # The above build line has two outputs: "foo bar/baz" and "other file". ---- In a `name = value` statement, whitespace at the beginning of a value is always stripped. Whitespace at the beginning of a line after a line continuation is also stripped. ---- two_words_with_one_space = foo $ bar one_word_with_no_space = foo$ bar ---- Other whitespace is only significant if it's at the beginning of a line. If a line is indented more than the previous one, it's considered part of its parent's scope; if it is indented less than the previous one, it closes the previous scope. [[ref_toplevel]] Top-level variables ~~~~~~~~~~~~~~~~~~~ Two variables are significant when declared in the outermost file scope. `builddir`:: a directory for some Ninja output files. See <>. (You can also store other build output in this directory.) `ninja_required_version`:: the minimum version of Ninja required to process the build correctly. See <>. [[ref_rule]] Rule variables ~~~~~~~~~~~~~~ A `rule` block contains a list of `key = value` declarations that affect the processing of the rule. Here is a full list of special keys. `command` (_required_):: the command line to run. Each `rule` may have only one `command` declaration. See <> for more details on quoting and executing multiple commands. `depfile`:: path to an optional `Makefile` that contains extra _implicit dependencies_ (see <>). This is explicitly to support C/C++ header dependencies; see <>. `deps`:: _(Available since Ninja 1.3.)_ if present, must be one of `gcc` or `msvc` to specify special dependency processing. See <>. The generated database is stored as `.ninja_deps` in the `builddir`, see <>. `msvc_deps_prefix`:: _(Available since Ninja 1.5.)_ defines the string which should be stripped from msvc's /showIncludes output. Only needed when `deps = msvc` and no English Visual Studio version is used. `description`:: a short description of the command, used to pretty-print the command as it's running. The `-v` flag controls whether to print the full command or its description; if a command fails, the full command line will always be printed before the command's output. `dyndep`:: _(Available since Ninja 1.10.)_ Used only on build statements. If present, must name one of the build statement inputs. Dynamically discovered dependency information will be loaded from the file. See the <> section for details. `generator`:: if present, specifies that this rule is used to re-invoke the generator program. Files built using `generator` rules are treated specially in two ways: firstly, they will not be rebuilt if the command line changes; and secondly, they are not cleaned by default. `in`:: the space-separated list of files provided as inputs to the build line referencing this `rule`, shell-quoted if it appears in commands. (`$in` is provided solely for convenience; if you need some subset or variant of this list of files, just construct a new variable with that list and use that instead.) `in_newline`:: the same as `$in` except that multiple inputs are separated by newlines rather than spaces. (For use with `$rspfile_content`; this works around a bug in the MSVC linker where it uses a fixed-size buffer for processing input.) `out`:: the space-separated list of files provided as outputs to the build line referencing this `rule`, shell-quoted if it appears in commands. `restat`:: if present, causes Ninja to re-stat the command's outputs after execution of the command. Each output whose modification time the command did not change will be treated as though it had never needed to be built. This may cause the output's reverse dependencies to be removed from the list of pending build actions. `rspfile`, `rspfile_content`:: if present (both), Ninja will use a response file for the given command, i.e. write the selected string (`rspfile_content`) to the given file (`rspfile`) before calling the command and delete the file after successful execution of the command. + This is particularly useful on Windows OS, where the maximal length of a command line is limited and response files must be used instead. + Use it like in the following example: + ---- rule link command = link.exe /OUT$out [usual link flags here] @$out.rsp rspfile = $out.rsp rspfile_content = $in build myapp.exe: link a.obj b.obj [possibly many other .obj files] ---- [[ref_rule_command]] Interpretation of the `command` variable ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Fundamentally, command lines behave differently on Unixes and Windows. On Unixes, commands are arrays of arguments. The Ninja `command` variable is passed directly to `sh -c`, which is then responsible for interpreting that string into an argv array. Therefore, the quoting rules are those of the shell, and you can use all the normal shell operators, like `&&` to chain multiple commands, or `VAR=value cmd` to set environment variables. On Windows, commands are strings, so Ninja passes the `command` string directly to `CreateProcess`. (In the common case of simply executing a compiler this means there is less overhead.) Consequently, the quoting rules are determined by the called program, which on Windows are usually provided by the C library. If you need shell interpretation of the command (such as the use of `&&` to chain multiple commands), make the command execute the Windows shell by prefixing the command with `cmd /c`. Ninja may error with "invalid parameter" which usually indicates that the command line length has been exceeded. [[ref_outputs]] Build outputs ~~~~~~~~~~~~~ There are two types of build outputs which are subtly different. 1. _Explicit outputs_, as listed in a build line. These are available as the `$out` variable in the rule. + This is the standard form of output to be used for e.g. the object file of a compile command. 2. _Implicit outputs_, as listed in a build line with the syntax +| _out1_ _out2_+ before the `:` of a build line _(available since Ninja 1.7)_. The semantics are identical to explicit outputs, the only difference is that implicit outputs don't show up in the `$out` variable. + This is for expressing outputs that don't show up on the command line of the command. [[ref_dependencies]] Build dependencies ~~~~~~~~~~~~~~~~~~ There are three types of build dependencies which are subtly different. 1. _Explicit dependencies_, as listed in a build line. These are available as the `$in` variable in the rule. Changes in these files cause the output to be rebuilt; if these files are missing and Ninja doesn't know how to build them, the build is aborted. + This is the standard form of dependency to be used e.g. for the source file of a compile command. 2. _Implicit dependencies_, either as picked up from a `depfile` attribute on a rule or from the syntax +| _dep1_ _dep2_+ on the end of a build line. The semantics are identical to explicit dependencies, the only difference is that implicit dependencies don't show up in the `$in` variable. + This is for expressing dependencies that don't show up on the command line of the command; for example, for a rule that runs a script that reads a hardcoded file, the hardcoded file should be an implicit dependency, as changes to the file should cause the output to rebuild, even though it doesn't show up in the arguments. + Note that dependencies as loaded through depfiles have slightly different semantics, as described in the <>. 3. _Order-only dependencies_, expressed with the syntax +|| _dep1_ _dep2_+ on the end of a build line. When these are out of date, the output is not rebuilt until they are built, but changes in order-only dependencies alone do not cause the output to be rebuilt. + Order-only dependencies can be useful for bootstrapping dependencies that are only discovered during build time: for example, to generate a header file before starting a subsequent compilation step. (Once the header is used in compilation, a generated dependency file will then express the implicit dependency.) File paths are compared as is, which means that an absolute path and a relative path, pointing to the same file, are considered different by Ninja. [[validations]] Validations ~~~~~~~~~~~ _Available since Ninja 1.11._ Validations listed on the build line cause the specified files to be added to the top level of the build graph (as if they were specified on the Ninja command line) whenever the build line is a transitive dependency of one of the targets specified on the command line or a default target. Validations are added to the build graph regardless of whether the output files of the build statement are dirty are not, and the dirty state of the build statement that outputs the file being used as a validation has no effect on the dirty state of the build statement that requested it. A build edge can list another build edge as a validation even if the second edge depends on the first. Validations are designed to handle rules that perform error checking but don't produce any artifacts needed by the build, for example, static analysis tools. Marking the static analysis rule as an implicit input of the main build rule of the source files or of the rules that depend on the main build rule would slow down the critical path of the build, but using a validation would allow the build to proceed in parallel with the static analysis rule once the main build rule is complete. Variable expansion ~~~~~~~~~~~~~~~~~~ Variables are expanded in paths (in a `build` or `default` statement) and on the right side of a `name = value` statement. When a `name = value` statement is evaluated, its right-hand side is expanded immediately (according to the below scoping rules), and from then on `$name` expands to the static string as the result of the expansion. It is never the case that you'll need to "double-escape" a value to prevent it from getting expanded twice. All variables are expanded immediately as they're encountered in parsing, with one important exception: variables in `rule` blocks are expanded when the rule is _used_, not when it is declared. In the following example, the `demo` rule prints "this is a demo of bar". ---- rule demo command = echo "this is a demo of $foo" build out: demo foo = bar ---- [[ref_scope]] Evaluation and scoping ~~~~~~~~~~~~~~~~~~~~~~ Top-level variable declarations are scoped to the file they occur in. Rule declarations are also scoped to the file they occur in. _(Available since Ninja 1.6)_ The `subninja` keyword, used to include another `.ninja` file, introduces a new scope. The included `subninja` file may use the variables and rules from the parent file, and shadow their values for the file's scope, but it won't affect values of the variables in the parent. To include another `.ninja` file in the current scope, much like a C `#include` statement, use `include` instead of `subninja`. Variable declarations indented in a `build` block are scoped to the `build` block. The full lookup order for a variable expanded in a `build` block (or the `rule` is uses) is: 1. Special built-in variables (`$in`, `$out`). 2. Build-level variables from the `build` block. 3. Rule-level variables from the `rule` block (i.e. `$command`). (Note from the above discussion on expansion that these are expanded "late", and may make use of in-scope bindings like `$in`.) 4. File-level variables from the file that the `build` line was in. 5. Variables from the file that included that file using the `subninja` keyword. [[ref_dyndep]] Dynamic Dependencies -------------------- _Available since Ninja 1.10._ Some use cases require implicit dependency information to be dynamically discovered from source file content _during the build_ in order to build correctly on the first run (e.g. Fortran module dependencies). This is unlike <> which are only needed on the second run and later to rebuild correctly. A build statement may have a `dyndep` binding naming one of its inputs to specify that dynamic dependency information must be loaded from the file. For example: ---- build out: ... || foo dyndep = foo build foo: ... ---- This specifies that file `foo` is a dyndep file. Since it is an input, the build statement for `out` can never be executed before `foo` is built. As soon as `foo` is finished Ninja will read it to load dynamically discovered dependency information for `out`. This may include additional implicit inputs and/or outputs. Ninja will update the build graph accordingly and the build will proceed as if the information was known originally. Dyndep file reference ~~~~~~~~~~~~~~~~~~~~~ Files specified by `dyndep` bindings use the same <> as <> and have the following layout. 1. A version number in the form `[.][]`: + ---- ninja_dyndep_version = 1 ---- + Currently the version number must always be `1` or `1.0` but may have an arbitrary suffix. 2. One or more build statements of the form: + ---- build out | imp-outs... : dyndep | imp-ins... ---- + Every statement must specify exactly one explicit output and must use the rule name `dyndep`. The `| imp-outs...` and `| imp-ins...` portions are optional. 3. An optional `restat` <> on each build statement. The build statements in a dyndep file must have a one-to-one correspondence to build statements in the <> that name the dyndep file in a `dyndep` binding. No dyndep build statement may be omitted and no extra build statements may be specified. Dyndep Examples ~~~~~~~~~~~~~~~ Fortran Modules ^^^^^^^^^^^^^^^ Consider a Fortran source file `foo.f90` that provides a module `foo.mod` (an implicit output of compilation) and another source file `bar.f90` that uses the module (an implicit input of compilation). This implicit dependency must be discovered before we compile either source in order to ensure that `bar.f90` never compiles before `foo.f90`, and that `bar.f90` recompiles when `foo.mod` changes. We can achieve this as follows: ---- rule f95 command = f95 -o $out -c $in rule fscan command = fscan -o $out $in build foobar.dd: fscan foo.f90 bar.f90 build foo.o: f95 foo.f90 || foobar.dd dyndep = foobar.dd build bar.o: f95 bar.f90 || foobar.dd dyndep = foobar.dd ---- In this example the order-only dependencies ensure that `foobar.dd` is generated before either source compiles. The hypothetical `fscan` tool scans the source files, assumes each will be compiled to a `.o` of the same name, and writes `foobar.dd` with content such as: ---- ninja_dyndep_version = 1 build foo.o | foo.mod: dyndep build bar.o: dyndep | foo.mod ---- Ninja will load this file to add `foo.mod` as an implicit output of `foo.o` and implicit input of `bar.o`. This ensures that the Fortran sources are always compiled in the proper order and recompiled when needed. Tarball Extraction ^^^^^^^^^^^^^^^^^^ Consider a tarball `foo.tar` that we want to extract. The extraction time can be recorded with a `foo.tar.stamp` file so that extraction repeats if the tarball changes, but we also would like to re-extract if any of the outputs is missing. However, the list of outputs depends on the content of the tarball and cannot be spelled out explicitly in the ninja build file. We can achieve this as follows: ---- rule untar command = tar xf $in && touch $out rule scantar command = scantar --stamp=$stamp --dd=$out $in build foo.tar.dd: scantar foo.tar stamp = foo.tar.stamp build foo.tar.stamp: untar foo.tar || foo.tar.dd dyndep = foo.tar.dd ---- In this example the order-only dependency ensures that `foo.tar.dd` is built before the tarball extracts. The hypothetical `scantar` tool will read the tarball (e.g. via `tar tf`) and write `foo.tar.dd` with content such as: ---- ninja_dyndep_version = 1 build foo.tar.stamp | file1.txt file2.txt : dyndep restat = 1 ---- Ninja will load this file to add `file1.txt` and `file2.txt` as implicit outputs of `foo.tar.stamp`, and to mark the build statement for `restat`. On future builds, if any implicit output is missing the tarball will be extracted again. The `restat` binding tells Ninja to tolerate the fact that the implicit outputs may not have modification times newer than the tarball itself (avoiding re-extraction on every build). ninja-1.13.2/doc/style.css000066400000000000000000000021411510764045400153200ustar00rootroot00000000000000:root { color-scheme: light dark; } body { margin: 5ex 10ex; max-width: 80ex; line-height: 1.5; font-family: sans-serif; } h1, h2, h3 { font-weight: normal; } pre, code { font-family: x, monospace; } pre { padding: 1ex; background: #eee; border: solid 1px #ddd; min-width: 0; font-size: 90%; } @media (prefers-color-scheme: dark) { pre { background: #333; border: solid 1px #444; } } code { color: #007; } @media (prefers-color-scheme: dark) { code { color: #a7cec8; } } div.chapter { margin-top: 4em; border-top: solid 2px black; } @media (prefers-color-scheme: dark) { div.chapter { border-top: solid 2px white; } } p { margin-top: 0; } /* The following applies to the left column of a [horizontal] labeled list: */ table.horizontal > tbody > tr > td:nth-child(1) { /* prevent the insertion of a line-break in the middle of a label: */ white-space: nowrap; /* insert a little horizontal padding between the two columns: */ padding-right: 1.5em; /* right-justify labels: */ text-align: end; } ninja-1.13.2/misc/000077500000000000000000000000001510764045400136365ustar00rootroot00000000000000ninja-1.13.2/misc/afl-fuzz-tokens/000077500000000000000000000000001510764045400166755ustar00rootroot00000000000000ninja-1.13.2/misc/afl-fuzz-tokens/kw_build000066400000000000000000000000051510764045400204130ustar00rootroot00000000000000buildninja-1.13.2/misc/afl-fuzz-tokens/kw_default000066400000000000000000000000071510764045400207420ustar00rootroot00000000000000defaultninja-1.13.2/misc/afl-fuzz-tokens/kw_include000066400000000000000000000000071510764045400207410ustar00rootroot00000000000000includeninja-1.13.2/misc/afl-fuzz-tokens/kw_pool000066400000000000000000000000041510764045400202640ustar00rootroot00000000000000poolninja-1.13.2/misc/afl-fuzz-tokens/kw_rule000066400000000000000000000000041510764045400202620ustar00rootroot00000000000000ruleninja-1.13.2/misc/afl-fuzz-tokens/kw_subninja000066400000000000000000000000101510764045400211210ustar00rootroot00000000000000subninjaninja-1.13.2/misc/afl-fuzz-tokens/misc_a000066400000000000000000000000011510764045400200420ustar00rootroot00000000000000aninja-1.13.2/misc/afl-fuzz-tokens/misc_b000066400000000000000000000000011510764045400200430ustar00rootroot00000000000000bninja-1.13.2/misc/afl-fuzz-tokens/misc_colon000066400000000000000000000000011510764045400207340ustar00rootroot00000000000000:ninja-1.13.2/misc/afl-fuzz-tokens/misc_cont000066400000000000000000000000021510764045400205660ustar00rootroot00000000000000$ ninja-1.13.2/misc/afl-fuzz-tokens/misc_dollar000066400000000000000000000000011510764045400210770ustar00rootroot00000000000000$ninja-1.13.2/misc/afl-fuzz-tokens/misc_eq000066400000000000000000000000011510764045400202270ustar00rootroot00000000000000=ninja-1.13.2/misc/afl-fuzz-tokens/misc_indent000066400000000000000000000000021510764045400211040ustar00rootroot00000000000000 ninja-1.13.2/misc/afl-fuzz-tokens/misc_pipe000066400000000000000000000000011510764045400205570ustar00rootroot00000000000000|ninja-1.13.2/misc/afl-fuzz-tokens/misc_pipepipe000066400000000000000000000000021510764045400214360ustar00rootroot00000000000000||ninja-1.13.2/misc/afl-fuzz-tokens/misc_space000066400000000000000000000000011510764045400207150ustar00rootroot00000000000000 ninja-1.13.2/misc/afl-fuzz/000077500000000000000000000000001510764045400153745ustar00rootroot00000000000000ninja-1.13.2/misc/afl-fuzz/build.ninja000066400000000000000000000001501510764045400175100ustar00rootroot00000000000000rule b command = clang -MMD -MF $out.d -o $out -c $in description = building $out build a.o: b a.c ninja-1.13.2/misc/bash-completion000066400000000000000000000036061510764045400166520ustar00rootroot00000000000000# Copyright 2011 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Add the following to your .bashrc to tab-complete ninja targets # . path/to/ninja/misc/bash-completion _ninja_target() { local cur prev targets dir line targets_command OPTIND # When available, use bash_completion to: # 1) Complete words when the cursor is in the middle of the word # 2) Complete paths with files or directories, as appropriate if _get_comp_words_by_ref cur prev &>/dev/null ; then case $prev in -f) _filedir return 0 ;; -C) _filedir -d return 0 ;; esac else cur="${COMP_WORDS[COMP_CWORD]}" fi if [[ "$cur" == "--"* ]]; then # there is currently only one argument that takes -- COMPREPLY=($(compgen -P '--' -W 'version' -- "${cur:2}")) else dir="." line=$(echo ${COMP_LINE} | cut -d" " -f 2-) # filter out all non relevant arguments but keep C for dirs while getopts :C:f:j:l:k:nvd:t: opt $line; do case $opt in # eval for tilde expansion C) eval dir="$OPTARG" ;; esac done; targets_command="eval ninja -C \"${dir}\" -t targets all 2>/dev/null | cut -d: -f1" COMPREPLY=($(compgen -W '`${targets_command}`' -- "$cur")) fi return } complete -F _ninja_target ninja ninja-1.13.2/misc/ci.py000077500000000000000000000021011510764045400146000ustar00rootroot00000000000000#!/usr/bin/env python3 import os ignores = [ '.git/', 'misc/afl-fuzz-tokens/', 'src/depfile_parser.cc', 'src/lexer.cc', ] error_count = 0 def error(path: str, msg: str) -> None: global error_count error_count += 1 print('\x1b[1;31m{}\x1b[0;31m{}\x1b[0m'.format(path, msg)) try: import git repo = git.Repo('.') except: repo = None for root, directory, filenames in os.walk('.'): for filename in filenames: path = os.path.join(root, filename)[2:] if any([path.startswith(x) for x in ignores]) or (repo is not None and repo.ignored(path)): continue with open(path, 'rb') as file: line_nr = 1 try: for line in [x.decode() for x in file.readlines()]: if len(line) == 0 or line[-1] != '\n': error(path, ' missing newline at end of file.') if len(line) > 1: if line[-2] == '\r': error(path, ' has Windows line endings.') break if line[-2] == ' ' or line[-2] == '\t': error(path, ':{} has trailing whitespace.'.format(line_nr)) line_nr += 1 except UnicodeError: pass # binary file exit(error_count) ninja-1.13.2/misc/inherited-fds.ninja000066400000000000000000000010411510764045400174000ustar00rootroot00000000000000# This build file prints out a list of open file descriptors in # Ninja subprocesses, to help verify we don't accidentally leak # any. # Because one fd leak was in the code managing multiple subprocesses, # this test brings up multiple subprocesses and then dumps the fd # table of the last one. # Use like: ./ninja -f misc/inherited-fds.ninja rule sleep command = sleep 10000 rule dump command = sleep 1; ls -l /proc/self/fd; exit 1 build all: phony a b c d e build a: sleep build b: sleep build c: sleep build d: sleep build e: dump ninja-1.13.2/misc/jobserver_pool.py000077500000000000000000000264551510764045400172610ustar00rootroot00000000000000#!/usr/bin/env python3 # Copyright 2024 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Setup a GNU Make Jobserver jobs pool then launch a command with it. On Windows, this only supports the semaphore-based scheme. On Posix, this uses a fifo by default, use --pipe for pipe mode. On exit, this script verifies that all job slots were returned to the pool, and will print an error message if this is not the case. This is useful to catch the use of broken protocol clients. Use the `--no-check` flag to disable this. See --help-usage for usage examples. """ import argparse import os import platform import subprocess import sys import typing as T # Technical note about the MAKEFLAGS values set by this script. # # All the MAKEFLAGS values created by this script begin # with " -j{count} ", i.e. an initial space, the "-j" characters # followed by a job slot count then another space. # # The initial space is only there to mimic what GNU Make 4.3 # does. Other pool implementations do not use one and thus # clients should not expect it (even GNU Make doesn't seem # to care when used as a jobserver client). # # The {count} value is also not available in many pool # implementations, but is useful to better debug multi-builds # using this script (i.e. to verify that the pool has the # expected size). Protocol clients should not depend on it # though. _DEFAULT_NAME = "jobserver_pool" _IS_WINDOWS = sys.platform in ("win32", "cygwin") if _IS_WINDOWS: try: # This requires pywin32 to be installed. import pywintypes import win32event import win32api except ModuleNotFoundError: print( "\nERROR: Could not import Win32 API, please install pywin32, e.g. `python -m pip install pywin32`.\n", file=sys.stderr, ) raise # It seems impossible to import a proper mypy-compatible type definition for PyHANDLE # 'from pywintypes import PyHANDLE' fails stating there is no such name. # 'from pywintypes import HANDLE as PyHANDLE' fails because HANDLE is a function, not a type. PyHandle: T.TypeAlias = T.Any def create_sem( sem_name: str, jobs_count: int ) -> T.Tuple[PyHandle, T.Dict[str, str]]: """Create and initialize Win32 semaphore.""" assert jobs_count > 0, f"Jobs count must be strictly positive" # The win32event documentation states that the first argument to CreateSemaphore() # can be None to indicate default security attributes, but mypy only wants # a PySECURITY_ATTRIBUTES for some reason. handle = win32event.CreateSemaphore( None, # type: ignore jobs_count - 1, jobs_count - 1, sem_name, ) assert bool(handle), f"Error creating Win32 semaphore {win32api.GetLastError()}" # See technical note above about MAKEFLAGS format. env = dict(os.environ) env["MAKEFLAGS"] = f" -j{jobs_count} --jobserver-auth=" + sem_name return handle, env def check_sem_count(handle: PyHandle, jobs_count: int) -> int: if jobs_count <= 1: # Nothing to check here. return 0 expected_count = jobs_count - 1 read_count = win32event.ReleaseSemaphore(handle, 1) if read_count < expected_count: print( f"ERROR: {expected_count - read_count} were missing from the jobs pool (got {read_count}, expected {expected_count})", file=sys.stderr, ) return 1 if read_count > expected_count: print( f"ERROR: {read_count - expected_count} extra tokens were released to the jobs pool (got {read_count}, expected {expected_count})", file=sys.stderr, ) return 1 return 0 def print_usage() -> int: print( r"""Example usage: # Start after setting the server to provide as many jobs # as available CPUs (the default) python \path\to\jobserver_pool.py # Start with a fixed number of job slots. python \path\to\jobserver_pool.py -j10 # Disable the feature with a non-positive count. This is equivalent # to running directly. python \path\to\jobserver_pool.py -j0 # Use a specific semaphore name python \path\to\jobserver_pool.py --name=my_build_jobs # Setup jobserver then start new interactive PowerShell # session, print MAKEFLAGS value, build stuff, then exit. python \path\to\jobserver_pool.py powershell.exe $env:MAKEFLAGS ... build stuff ... exit """ ) return 0 else: # !_IS_WINDOWS def create_pipe(jobs_count: int) -> T.Tuple[int, int, T.Dict[str, str]]: """Create and fill Posix PIPE.""" read_fd, write_fd = os.pipe() os.set_inheritable(read_fd, True) os.set_inheritable(write_fd, True) assert jobs_count > 0, f"Token count must be strictly positive" os.write(write_fd, (jobs_count - 1) * b"x") # See technical note above about MAKEFLAGS format. env = dict(os.environ) env["MAKEFLAGS"] = ( f" -j{jobs_count} --jobserver-fds={read_fd},{write_fd} --jobserver-auth={read_fd},{write_fd}" ) return read_fd, write_fd, env def create_fifo(path: str, jobs_count: int) -> T.Tuple[int, int, T.Dict[str, str]]: """Create and fill Posix FIFO.""" if os.path.exists(path): os.remove(path) # mypy complains that this does not exit on Windows. os.mkfifo(path) # type: ignore read_fd = os.open(path, os.O_RDONLY | os.O_NONBLOCK) write_fd = os.open(path, os.O_WRONLY | os.O_NONBLOCK) assert jobs_count > 0, f"Token count must be strictly positive" os.write(write_fd, (jobs_count - 1) * b"x") # See technical note above about MAKEFLAGS format. env = dict(os.environ) env["MAKEFLAGS"] = f" -j{jobs_count} --jobserver-auth=fifo:" + path return read_fd, write_fd, env def print_usage() -> int: print( r"""Example usage: # Start after setting the job pool to provide as many jobs # as available CPUs (the default) /path/to/jobserver_pool.py # Start with a fixed number of jobs /path/to/jobserver_pool.py -j10 # Disable the feature with a non-positive count. This is equivalent # to running directly. /path/to/jobserver_pool.py -j0 # Use a specific FIFO path /path/to/jobserver_pool.py --fifo=/tmp/my_build_jobs # Setup jobserver then start new interactive Bash shell # session, print MAKEFLAGS value, build stuff, then exit. /path/to/jobserver_pool.py bash -i echo "$MAKEFLAGS" ... build stuff ... exit """ ) return 0 def check_pipe_tokens(read_fd: int, jobs_count: int) -> int: if jobs_count <= 1: # Nothing to check return 0 # Remove implicit token from the expected count. expected_count = jobs_count - 1 os.set_blocking(read_fd, False) read_count = 0 while True: try: token = os.read(read_fd, 1) if len(token) == 0: # End of pipe? break read_count += 1 except BlockingIOError: break if read_count < expected_count: print( f"ERROR: {expected_count - read_count} tokens were missing from the jobs pool (got {read_count}, expected {expected_count})", file=sys.stderr, ) return 1 if read_count > expected_count: print( f"ERROR: {read_count - expected_count} extra tokens were released to the jobs pool (got {read_count}, expected {expected_count})", file=sys.stderr, ) return 1 return 0 def main() -> int: parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawTextHelpFormatter ) if _IS_WINDOWS: parser.add_argument( "--name", help=f"Specify semaphore name, default is {_DEFAULT_NAME}", default=_DEFAULT_NAME, ) else: mutex_group = parser.add_mutually_exclusive_group() mutex_group.add_argument( "--pipe", action="store_true", help="Implement the pool with a Unix pipe (default is FIFO).", ) mutex_group.add_argument( "--fifo", default=_DEFAULT_NAME, help=f"Specify pool FIFO file path (default ./{_DEFAULT_NAME})", ) parser.add_argument( "--no-check", action="store_true", help="Disable the final check that verifies that all job slots were returned to the pool on exit.", ) parser.add_argument( "--help-usage", action="store_true", help="Print usage examples." ) parser.add_argument( "-j", "--jobs", action="store", metavar="COUNT", dest="jobs_count", type=int, default=os.cpu_count(), help="Set job slots ccount, default is available CPUs count", ) parser.add_argument("command", nargs=argparse.REMAINDER, help="Command to run.") args = parser.parse_args() if args.help_usage: return print_usage() if not args.command: parser.error("This script requires at least one command argument!") jobs_count = args.jobs_count if jobs_count <= 0: # Disable the feature. ret = subprocess.run(args.command) exit_code = ret.returncode elif _IS_WINDOWS: # Run with a Window semaphore. try: handle, env = create_sem(args.name, jobs_count) ret = subprocess.run(args.command, env=env) exit_code = ret.returncode if exit_code == 0 and not args.no_check: exit_code = check_sem_count(handle, jobs_count) finally: win32api.CloseHandle(handle) else: # Run with pipe descriptors by default, or a FIFO if --fifo is used. exit_code = 0 fifo_path = "" try: if not args.pipe: fifo_path = os.path.abspath(args.fifo) read_fd, write_fd, env = create_fifo(fifo_path, args.jobs_count) ret = subprocess.run(args.command, env=env) else: read_fd, write_fd, env = create_pipe(args.jobs_count) ret = subprocess.run( args.command, env=env, pass_fds=(read_fd, write_fd) ) exit_code = ret.returncode if exit_code == 0 and not args.no_check: exit_code = check_pipe_tokens(read_fd, jobs_count) finally: os.close(read_fd) os.close(write_fd) if fifo_path: os.remove(fifo_path) return exit_code if __name__ == "__main__": sys.exit(main()) ninja-1.13.2/misc/jobserver_pool_test.py000077500000000000000000000132721510764045400203110ustar00rootroot00000000000000#!/usr/bin/env python3 """Regression tests for the jobserver_pool.py script.""" import os import re import platform import subprocess import sys import tempfile import unittest import typing as T _SCRIPT_DIR = os.path.dirname(__file__) _JOBSERVER_SCRIPT = os.path.join(_SCRIPT_DIR, "jobserver_pool.py") _JOBSERVER_CMD = [sys.executable, _JOBSERVER_SCRIPT] _IS_WINDOWS = sys.platform == "win32" # This is only here to avoid depending on the non-standard # scanf package which does the job properly :-) def _simple_scanf(pattern: str, input: str) -> T.Sequence[T.Any]: """Extract values from input using a scanf-like pattern. This is very basic and only used to avoid depending on the non-standard scanf package which does the job properly. Only supports %d, %s and %%, does not support any fancy escaping. """ re_pattern = "" groups = "" from_pos = 0 # Just in case. assert "." not in pattern, f"Dots in pattern not supported." assert "?" not in pattern, f"Question marks in pattern not supported." while True: next_percent = pattern.find("%", from_pos) if next_percent < 0 or next_percent + 1 >= len(pattern): re_pattern += pattern[from_pos:] break re_pattern += pattern[from_pos:next_percent] from_pos = next_percent + 2 formatter = pattern[next_percent + 1] if formatter == "%": re_pattern += "%" elif formatter == "d": groups += formatter re_pattern += "(\\d+)" elif formatter == "s": groups += formatter re_pattern += "(\\S+)" else: assert False, f"Unsupported scanf formatter: %{formatter}" m = re.match(re_pattern, input) if not m: return None result = [] for group_index, formatter in enumerate(groups, start=1): if formatter == "d": result.append(int(m.group(group_index))) elif formatter == "s": result.append(m.group(group_index)) else: assert False, f"Unsupported formatter {formatter}" return result class JobserverPool(unittest.TestCase): def _run_jobserver_echo_MAKEFLAGS( self, cmd_args_prefix ) -> "subprocess.CompletedProcess[str]": if _IS_WINDOWS: cmd_args = cmd_args_prefix + ["cmd.exe", "/c", "echo %MAKEFLAGS%"] else: cmd_args = cmd_args_prefix + ["sh", "-c", 'echo "$MAKEFLAGS"'] ret = subprocess.run( cmd_args, text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) ret.check_returncode() return ret def _test_echo_MAKEFLAGS(self, cmd_args_prefix, expected_core_count: int): ret = self._run_jobserver_echo_MAKEFLAGS(cmd_args_prefix) makeflags = ret.stdout.rstrip() if expected_core_count == 0: if _IS_WINDOWS: # On Windows, echo %FOO% prints "%FOO%" if FOO is not defined! self.assertEqual(makeflags.strip(), "%MAKEFLAGS%") else: self.assertEqual(makeflags.strip(), "") else: # expected_core_count > 0 if _IS_WINDOWS: expected_format = " -j%d --jobserver-auth=%s" else: expected_format = " -j%d --jobserver-auth=fifo:%s" m = _simple_scanf(expected_format, makeflags) self.assertTrue( m, f"Invalid MAKEFLAGS value, expected format [{expected_format}], got: [{makeflags}]", ) if _IS_WINDOWS: sem_name = m[1] self.assertEqual( sem_name, "jobserver_pool", f"Invalid semaphore name in MAKEFLAGS value [{makeflags}]", ) else: fifo_name = os.path.basename(m[1]) self.assertEqual( fifo_name, "jobserver_pool", f"Invalid fifo name in MAKEFLAGS value [{makeflags}]", ) core_count = m[0] self.assertEqual( core_count, expected_core_count, f"Invalid core count {core_count}, expected {expected_core_count}", ) def test_MAKEFLAGS_default(self): self._test_echo_MAKEFLAGS(_JOBSERVER_CMD, os.cpu_count()) def test_MAKEFLAGS_with_10_jobs(self): self._test_echo_MAKEFLAGS(_JOBSERVER_CMD + ["-j10"], 10) self._test_echo_MAKEFLAGS(_JOBSERVER_CMD + ["--jobs=10"], 10) self._test_echo_MAKEFLAGS(_JOBSERVER_CMD + ["--jobs", "10"], 10) def test_MAKEFLAGS_with_no_jobs(self): self._test_echo_MAKEFLAGS(_JOBSERVER_CMD + ["-j0"], 0) self._test_echo_MAKEFLAGS(_JOBSERVER_CMD + ["--jobs=0"], 0) self._test_echo_MAKEFLAGS(_JOBSERVER_CMD + ["--jobs", "0"], 0) @unittest.skipIf(_IS_WINDOWS, "--fifo is not supported on Windows") def test_MAKEFLAGS_with_fifo(self): fifo_name = "test_fifo" fifo_path = os.path.abspath(fifo_name) ret = self._run_jobserver_echo_MAKEFLAGS( _JOBSERVER_CMD + ["-j10", "--fifo", fifo_name] ) makeflags = ret.stdout.rstrip() self.assertEqual(makeflags, " -j10 --jobserver-auth=fifo:" + fifo_path) @unittest.skipIf(not _IS_WINDOWS, "--name is not supported on Posix") def test_MAKEFLAGS_with_name(self): sem_name = "test_semaphore" ret = self._run_jobserver_echo_MAKEFLAGS( _JOBSERVER_CMD + ["-j10", "--name", sem_name] ) makeflags = ret.stdout.rstrip() self.assertEqual(makeflags, " -j10 --jobserver-auth=" + sem_name) if __name__ == "__main__": unittest.main() ninja-1.13.2/misc/jobserver_test.py000077500000000000000000000270761510764045400172670ustar00rootroot00000000000000#!/usr/bin/env python3 # Copyright 2024 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from textwrap import dedent import os import platform import subprocess import tempfile import typing as T import shlex import sys import unittest _SCRIPT_DIR = os.path.realpath(os.path.dirname(__file__)) _JOBSERVER_POOL_SCRIPT = os.path.join(_SCRIPT_DIR, "jobserver_pool.py") _JOBSERVER_TEST_HELPER_SCRIPT = os.path.join(_SCRIPT_DIR, "jobserver_test_helper.py") _PLATFORM_IS_WINDOWS = platform.system() == "Windows" # Set this to True to debug command invocations. _DEBUG = False default_env = dict(os.environ) default_env.pop("NINJA_STATUS", None) default_env.pop("MAKEFLAGS", None) default_env["TERM"] = "dumb" NINJA_PATH = os.path.abspath("./ninja") class BuildDir: def __init__(self, build_ninja: str): self.build_ninja = dedent(build_ninja) self.d: T.Optional[tempfile.TemporaryDirectory] = None def __enter__(self): self.d = tempfile.TemporaryDirectory() with open(os.path.join(self.d.name, "build.ninja"), "w") as f: f.write(self.build_ninja) return self def __exit__(self, exc_type, exc_val, exc_tb): self.d.cleanup() @property def path(self) -> str: assert self.d return self.d.name def run( self, cmd_flags: T.Sequence[str] = [], env: T.Dict[str, str] = default_env, ) -> None: """Run a command, raise exception on error. Do not capture outputs.""" ret = subprocess.run(cmd_flags, env=env) ret.check_returncode() def ninja_run( self, ninja_args: T.List[str], prefix_args: T.List[str] = [], extra_env: T.Dict[str, str] = {}, ) -> "subprocess.CompletedProcess[str]": ret = self.ninja_spawn( ninja_args, prefix_args=prefix_args, extra_env=extra_env, capture_output=False, ) ret.check_returncode() return ret def ninja_clean(self) -> None: self.ninja_run(["-t", "clean"]) def ninja_spawn( self, ninja_args: T.List[str], prefix_args: T.List[str] = [], extra_env: T.Dict[str, str] = {}, capture_output: bool = True, ) -> "subprocess.CompletedProcess[str]": """Run Ninja command and capture outputs.""" cmd_args = prefix_args + [NINJA_PATH, "-C", self.path] + ninja_args if _DEBUG: cmd_str = " ".join(shlex.quote(c) for c in cmd_args) print(f"CMD [{cmd_str}]", file=sys.stderr) return subprocess.run( cmd_args, text=True, stdout=subprocess.PIPE if capture_output else None, stderr=subprocess.PIPE if capture_output else None, env={**default_env, **extra_env}, ) def span_output_file(span_n: int) -> str: return "out%02d" % span_n def generate_build_plan(command_count: int) -> str: """Generate a Ninja build plan for |command_count| parallel tasks. Each task calls the test helper script which waits for 50ms then writes its own start and end time to its output file. """ result = f""" rule span command = {sys.executable} -S {_JOBSERVER_TEST_HELPER_SCRIPT} --duration-ms=50 $out """ for n in range(command_count): result += "build %s: span\n" % span_output_file(n) result += "build all: phony %s\n" % " ".join( [span_output_file(n) for n in range(command_count)] ) return result def compute_max_overlapped_spans(build_dir: str, command_count: int) -> int: """Compute the maximum number of overlapped spanned tasks. This reads the output files from |build_dir| and look at their start and end times to compute the maximum number of tasks that were run in parallel. """ # Read the output files. if command_count < 2: return 0 spans: T.List[T.Tuple[int, int]] = [] for n in range(command_count): with open(os.path.join(build_dir, span_output_file(n)), "rb") as f: content = f.read().decode("utf-8") lines = content.splitlines() assert len(lines) == 2, f"Unexpected output file content: [{content}]" spans.append((int(lines[0]), int(lines[1]))) # Stupid but simple, for each span, count the number of other spans that overlap it. max_overlaps = 1 for n in range(command_count): cur_start, cur_end = spans[n] cur_overlaps = 1 for m in range(command_count): other_start, other_end = spans[m] if n != m and other_end > cur_start and other_start < cur_end: cur_overlaps += 1 if cur_overlaps > max_overlaps: max_overlaps = cur_overlaps return max_overlaps class JobserverTest(unittest.TestCase): def test_no_jobserver_client(self): task_count = 4 build_plan = generate_build_plan(task_count) with BuildDir(build_plan) as b: output = b.run([NINJA_PATH, "-C", b.path, f"-j{task_count}", "all"]) max_overlaps = compute_max_overlapped_spans(b.path, task_count) self.assertEqual(max_overlaps, task_count) b.ninja_clean() output = b.run([NINJA_PATH, "-C", b.path, "-j1", "all"]) max_overlaps = compute_max_overlapped_spans(b.path, task_count) self.assertEqual(max_overlaps, 1) def _run_client_test(self, jobserver_args: T.List[str]) -> None: task_count = 4 build_plan = generate_build_plan(task_count) with BuildDir(build_plan) as b: # First, run the full tasks with with {task_count} tokens, this should allow all # tasks to run in parallel. ret = b.ninja_run( ninja_args=["all"], prefix_args=jobserver_args + [f"--jobs={task_count}"], ) max_overlaps = compute_max_overlapped_spans(b.path, task_count) self.assertEqual(max_overlaps, task_count) # Second, use 2 tokens only, and verify that this was enforced by Ninja. b.ninja_clean() b.ninja_run( ["all"], prefix_args=jobserver_args + ["--jobs=2"], ) max_overlaps = compute_max_overlapped_spans(b.path, task_count) self.assertEqual(max_overlaps, 2) # Third, verify that --jobs=1 serializes all tasks. b.ninja_clean() b.ninja_run( ["all"], prefix_args=jobserver_args + ["--jobs=1"], ) max_overlaps = compute_max_overlapped_spans(b.path, task_count) self.assertEqual(max_overlaps, 1) # Finally, verify that -j1 overrides the pool. b.ninja_clean() b.ninja_run( ["-j1", "all"], prefix_args=jobserver_args + [f"--jobs={task_count}"], ) max_overlaps = compute_max_overlapped_spans(b.path, task_count) self.assertEqual(max_overlaps, 1) # On Linux, use taskset to limit the number of available cores to 1 # and verify that the jobserver overrides the default Ninja parallelism # and that {task_count} tasks are still spawned in parallel. if platform.system() == "Linux": # First, run without a jobserver, with a single CPU, Ninja will # use a parallelism of 2 in this case (GuessParallelism() in ninja.cc) b.ninja_clean() b.ninja_run( ["all"], prefix_args=["taskset", "-c", "0"], ) max_overlaps = compute_max_overlapped_spans(b.path, task_count) self.assertEqual(max_overlaps, 2) # Now with a jobserver with {task_count} tasks. b.ninja_clean() b.ninja_run( ["all"], prefix_args=jobserver_args + [f"--jobs={task_count}"] + ["taskset", "-c", "0"], ) max_overlaps = compute_max_overlapped_spans(b.path, task_count) self.assertEqual(max_overlaps, task_count) @unittest.skipIf(_PLATFORM_IS_WINDOWS, "These test methods do not work on Windows") def test_jobserver_client_with_posix_fifo(self): self._run_client_test([sys.executable, "-S", _JOBSERVER_POOL_SCRIPT]) @unittest.skipIf(_PLATFORM_IS_WINDOWS, "These test methods do not work on Windows") def test_jobserver_client_with_posix_pipe(self): # Verify that setting up a --pipe server does not make Ninja exit with an error. # Instead, a warning is printed. task_count = 4 build_plan = generate_build_plan(task_count) with BuildDir(build_plan) as b: prefix_args = [ sys.executable, "-S", _JOBSERVER_POOL_SCRIPT, "--pipe", f"--jobs={task_count}", ] def run_ninja_with_jobserver_pipe(args): ret = b.ninja_spawn(args, prefix_args=prefix_args) ret.check_returncode() return ret.stdout, ret.stderr output, error = run_ninja_with_jobserver_pipe(["all"]) if _DEBUG: print(f"OUTPUT [{output}]\nERROR [{error}]\n", file=sys.stderr) self.assertTrue(error.find("Pipe-based protocol is not supported!") >= 0) max_overlaps = compute_max_overlapped_spans(b.path, task_count) self.assertEqual(max_overlaps, task_count) # Using an explicit -j ignores the jobserver pool. b.ninja_clean() output, error = run_ninja_with_jobserver_pipe(["-j1", "all"]) if _DEBUG: print(f"OUTPUT [{output}]\nERROR [{error}]\n", file=sys.stderr) self.assertFalse(error.find("Pipe-based protocol is not supported!") >= 0) max_overlaps = compute_max_overlapped_spans(b.path, task_count) self.assertEqual(max_overlaps, 1) def _test_MAKEFLAGS_value( self, ninja_args: T.List[str] = [], prefix_args: T.List[str] = [] ): build_plan = r""" rule print command = echo MAKEFLAGS="[$$MAKEFLAGS]" build all: print """ with BuildDir(build_plan) as b: ret = b.ninja_spawn( ninja_args + ["--quiet", "all"], prefix_args=prefix_args ) self.assertEqual(ret.returncode, 0) output = ret.stdout.strip() pos = output.find("MAKEFLAGS=[") self.assertNotEqual(pos, -1, "Could not find MAKEFLAGS in output!") makeflags, sep, _ = output[pos + len("MAKEFLAGS=[") :].partition("]") self.assertEqual(sep, "]", "Missing ] in output!: " + output) self.assertTrue( "--jobserver-auth=" in makeflags, f"Missing --jobserver-auth from MAKEFLAGS [{makeflags}]\nSTDOUT [{ret.stdout}]\nSTDERR [{ret.stderr}]", ) def test_client_passes_MAKEFLAGS(self): self._test_MAKEFLAGS_value( prefix_args=[sys.executable, "-S", _JOBSERVER_POOL_SCRIPT] ) if __name__ == "__main__": unittest.main() ninja-1.13.2/misc/jobserver_test_helper.py000077500000000000000000000024661510764045400206220ustar00rootroot00000000000000#!/usr/bin/env python3 # Copyright 2024 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Simple utility used by the jobserver test. Wait for specific time, then write start/stop times to output file.""" import argparse import time import sys from pathlib import Path def main(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument( "--duration-ms", default="50", help="sleep duration in milliseconds (default 50)", ) parser.add_argument("output_file", type=Path, help="output file name.") args = parser.parse_args() now_time_ns = time.time_ns() time.sleep(int(args.duration_ms) / 1000.0) args.output_file.write_text(f"{now_time_ns}\n{time.time_ns()}\n") return 0 if __name__ == "__main__": sys.exit(main()) ninja-1.13.2/misc/long-slow-build.ninja000066400000000000000000000014311510764045400176740ustar00rootroot00000000000000# An input file for running a "slow" build. # Use like: ninja -f misc/long-slow-build.ninja all rule sleep command = sleep 1 description = SLEEP $out build 0: sleep README build 1: sleep README build 2: sleep README build 3: sleep README build 4: sleep README build 5: sleep README build 6: sleep README build 7: sleep README build 8: sleep README build 9: sleep README build 10: sleep 0 build 11: sleep 1 build 12: sleep 2 build 13: sleep 3 build 14: sleep 4 build 15: sleep 5 build 16: sleep 6 build 17: sleep 7 build 18: sleep 8 build 19: sleep 9 build 20: sleep 10 build 21: sleep 11 build 22: sleep 12 build 23: sleep 13 build 24: sleep 14 build 25: sleep 15 build 26: sleep 16 build 27: sleep 17 build 28: sleep 18 build 29: sleep 19 build all: phony 20 21 22 23 24 25 26 27 28 29 ninja-1.13.2/misc/manifest_fuzzer.cc000066400000000000000000000022761510764045400173670ustar00rootroot00000000000000// Copyright 2020 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "stdint.h" #include #include "disk_interface.h" #include "state.h" #include "manifest_parser.h" #include extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { char build_file[256]; sprintf(build_file, "/tmp/build.ninja"); FILE *fp = fopen(build_file, "wb"); if (!fp) return 0; fwrite(data, size, 1, fp); fclose(fp); std::string err; RealDiskInterface disk_interface; State state; ManifestParser parser(&state, &disk_interface); parser.Load("/tmp/build.ninja", &err); std::__fs::filesystem::remove_all("/tmp/build.ninja"); return 0; } ninja-1.13.2/misc/measure.py000077500000000000000000000034661510764045400156650ustar00rootroot00000000000000#!/usr/bin/env python3 # Copyright 2011 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """measure the runtime of a command by repeatedly running it. """ import time import subprocess import sys from typing import Union, List devnull = open('/dev/null', 'w') def run(cmd: Union[str, List[str]], repeat: int = 10) -> None: print('sampling:', end=' ') sys.stdout.flush() samples = [] for _ in range(repeat): start = time.time() subprocess.call(cmd, stdout=devnull, stderr=devnull) end = time.time() dt = (end - start) * 1000 print('%dms' % int(dt), end=' ') sys.stdout.flush() samples.append(dt) print() # We're interested in the 'pure' runtime of the code, which is # conceptually the smallest time we'd see if we ran it enough times # such that it got the perfect time slices / disk cache hits. best = min(samples) # Also print how varied the outputs were in an attempt to make it # more obvious if something has gone terribly wrong. err = sum(s - best for s in samples) / float(len(samples)) print('estimate: %dms (mean err %.1fms)' % (best, err)) if __name__ == '__main__': if len(sys.argv) < 2: print('usage: measure.py command args...') sys.exit(1) run(cmd=sys.argv[1:]) ninja-1.13.2/misc/ninja.vim000066400000000000000000000055111510764045400154540ustar00rootroot00000000000000" ninja build file syntax. " Language: ninja build file as described at " http://ninja-build.org/manual.html " Version: 1.5 " Last Change: 2018/04/05 " Maintainer: Nicolas Weber " Version 1.4 of this script is in the upstream vim repository and will be " included in the next vim release. If you change this, please send your change " upstream. " ninja lexer and parser are at " https://github.com/ninja-build/ninja/blob/master/src/lexer.in.cc " https://github.com/ninja-build/ninja/blob/master/src/manifest_parser.cc if exists("b:current_syntax") finish endif let s:cpo_save = &cpo set cpo&vim syn case match " Comments are only matched when the # is at the beginning of the line (with " optional whitespace), as long as the prior line didn't end with a $ " continuation. syn match ninjaComment /\(\$\n\)\@" syn match ninjaKeyword "^rule\>" syn match ninjaKeyword "^pool\>" syn match ninjaKeyword "^default\>" syn match ninjaKeyword "^include\>" syn match ninjaKeyword "^subninja\>" " Both 'build' and 'rule' begin a variable scope that ends " on the first line without indent. 'rule' allows only a " limited set of magic variables, 'build' allows general " let assignments. " manifest_parser.cc, ParseRule() syn region ninjaRule start="^rule" end="^\ze\S" contains=TOP transparent syn keyword ninjaRuleCommand contained containedin=ninjaRule command \ deps depfile description generator \ pool restat rspfile rspfile_content syn region ninjaPool start="^pool" end="^\ze\S" contains=TOP transparent syn keyword ninjaPoolCommand contained containedin=ninjaPool depth " Strings are parsed as follows: " lexer.in.cc, ReadEvalString() " simple_varname = [a-zA-Z0-9_-]+; " varname = [a-zA-Z0-9_.-]+; " $$ -> $ " $\n -> line continuation " '$ ' -> escaped space " $simple_varname -> variable " ${varname} -> variable syn match ninjaDollar "\$\$" syn match ninjaWrapLineOperator "\$$" syn match ninjaSimpleVar "\$[a-zA-Z0-9_-]\+" syn match ninjaVar "\${[a-zA-Z0-9_.-]\+}" " operators are: " variable assignment = " rule definition : " implicit dependency | " order-only dependency || syn match ninjaOperator "\(=\|:\||\|||\)\ze\s" hi def link ninjaComment Comment hi def link ninjaKeyword Keyword hi def link ninjaRuleCommand Statement hi def link ninjaPoolCommand Statement hi def link ninjaDollar ninjaOperator hi def link ninjaWrapLineOperator ninjaOperator hi def link ninjaOperator Operator hi def link ninjaSimpleVar ninjaVar hi def link ninjaVar Identifier let b:current_syntax = "ninja" let &cpo = s:cpo_save unlet s:cpo_save ninja-1.13.2/misc/ninja_syntax.py000066400000000000000000000177241510764045400167300ustar00rootroot00000000000000#!/usr/bin/python # Copyright 2011 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Python module for generating .ninja files. Note that this is emphatically not a required piece of Ninja; it's just a helpful utility for build-file-generation systems that already use Python. """ import re import textwrap from io import TextIOWrapper from typing import Dict, List, Match, Optional, Tuple, Union def escape_path(word: str) -> str: return word.replace('$ ', '$$ ').replace(' ', '$ ').replace(':', '$:') class Writer(object): def __init__(self, output: TextIOWrapper, width: int = 78) -> None: self.output = output self.width = width def newline(self) -> None: self.output.write('\n') def comment(self, text: str) -> None: for line in textwrap.wrap(text, self.width - 2, break_long_words=False, break_on_hyphens=False): self.output.write('# ' + line + '\n') def variable( self, key: str, value: Optional[Union[bool, int, float, str, List[str]]], indent: int = 0, ) -> None: if value is None: return if isinstance(value, list): value = ' '.join(filter(None, value)) # Filter out empty strings. self._line('%s = %s' % (key, value), indent) def pool(self, name: str, depth: int) -> None: self._line('pool %s' % name) self.variable('depth', depth, indent=1) def rule( self, name: str, command: str, description: Optional[str] = None, depfile: Optional[str] = None, generator: bool = False, pool: Optional[str] = None, restat: bool = False, rspfile: Optional[str] = None, rspfile_content: Optional[str] = None, deps: Optional[Union[str, List[str]]] = None, ) -> None: self._line('rule %s' % name) self.variable('command', command, indent=1) if description: self.variable('description', description, indent=1) if depfile: self.variable('depfile', depfile, indent=1) if generator: self.variable('generator', '1', indent=1) if pool: self.variable('pool', pool, indent=1) if restat: self.variable('restat', '1', indent=1) if rspfile: self.variable('rspfile', rspfile, indent=1) if rspfile_content: self.variable('rspfile_content', rspfile_content, indent=1) if deps: self.variable('deps', deps, indent=1) def build( self, outputs: Union[str, List[str]], rule: str, inputs: Optional[Union[str, List[str]]] = None, implicit: Optional[Union[str, List[str]]] = None, order_only: Optional[Union[str, List[str]]] = None, variables: Optional[ Union[ List[Tuple[str, Optional[Union[str, List[str]]]]], Dict[str, Optional[Union[str, List[str]]]], ] ] = None, implicit_outputs: Optional[Union[str, List[str]]] = None, pool: Optional[str] = None, dyndep: Optional[str] = None, ) -> List[str]: outputs = as_list(outputs) out_outputs = [escape_path(x) for x in outputs] all_inputs = [escape_path(x) for x in as_list(inputs)] if implicit: implicit = [escape_path(x) for x in as_list(implicit)] all_inputs.append('|') all_inputs.extend(implicit) if order_only: order_only = [escape_path(x) for x in as_list(order_only)] all_inputs.append('||') all_inputs.extend(order_only) if implicit_outputs: implicit_outputs = [escape_path(x) for x in as_list(implicit_outputs)] out_outputs.append('|') out_outputs.extend(implicit_outputs) self._line('build %s: %s' % (' '.join(out_outputs), ' '.join([rule] + all_inputs))) if pool is not None: self._line(' pool = %s' % pool) if dyndep is not None: self._line(' dyndep = %s' % dyndep) if variables: if isinstance(variables, dict): iterator = iter(variables.items()) else: iterator = iter(variables) for key, val in iterator: self.variable(key, val, indent=1) return outputs def include(self, path: str) -> None: self._line('include %s' % path) def subninja(self, path: str) -> None: self._line('subninja %s' % path) def default(self, paths: Union[str, List[str]]) -> None: self._line('default %s' % ' '.join(as_list(paths))) def _count_dollars_before_index(self, s: str, i: int) -> int: """Returns the number of '$' characters right in front of s[i].""" dollar_count = 0 dollar_index = i - 1 while dollar_index > 0 and s[dollar_index] == '$': dollar_count += 1 dollar_index -= 1 return dollar_count def _line(self, text: str, indent: int = 0) -> None: """Write 'text' word-wrapped at self.width characters.""" leading_space = ' ' * indent while len(leading_space) + len(text) > self.width: # The text is too wide; wrap if possible. # Find the rightmost space that would obey our width constraint and # that's not an escaped space. available_space = self.width - len(leading_space) - len(' $') space = available_space while True: space = text.rfind(' ', 0, space) if (space < 0 or self._count_dollars_before_index(text, space) % 2 == 0): break if space < 0: # No such space; just use the first unescaped space we can find. space = available_space - 1 while True: space = text.find(' ', space + 1) if (space < 0 or self._count_dollars_before_index(text, space) % 2 == 0): break if space < 0: # Give up on breaking. break self.output.write(leading_space + text[0:space] + ' $\n') text = text[space+1:] # Subsequent lines are continuations, so indent them. leading_space = ' ' * (indent+2) self.output.write(leading_space + text + '\n') def close(self) -> None: self.output.close() def as_list(input: Optional[Union[str, List[str]]]) -> List[str]: if input is None: return [] if isinstance(input, list): return input return [input] def escape(string: str) -> str: """Escape a string such that it can be embedded into a Ninja file without further interpretation.""" assert '\n' not in string, 'Ninja syntax does not allow newlines' # We only have one special metacharacter: '$'. return string.replace('$', '$$') def expand(string: str, vars: Dict[str, str], local_vars: Dict[str, str] = {}) -> str: """Expand a string containing $vars as Ninja would. Note: doesn't handle the full Ninja variable syntax, but it's enough to make configure.py's use of it work. """ def exp(m: Match[str]) -> str: var = m.group(1) if var == '$': return '$' return local_vars.get(var, vars.get(var, '')) return re.sub(r'\$(\$|\w*)', exp, string) ninja-1.13.2/misc/ninja_syntax_test.py000077500000000000000000000152571510764045400177710ustar00rootroot00000000000000#!/usr/bin/env python3 # Copyright 2011 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from typing import Dict try: from StringIO import StringIO except ImportError: from io import StringIO import ninja_syntax LONGWORD = 'a' * 10 LONGWORDWITHSPACES = 'a'*5 + '$ ' + 'a'*5 INDENT = ' ' class TestLineWordWrap(unittest.TestCase): def setUp(self) -> None: self.out = StringIO() self.n = ninja_syntax.Writer(self.out, width=8) def test_single_long_word(self) -> None: # We shouldn't wrap a single long word. self.n._line(LONGWORD) self.assertEqual(LONGWORD + '\n', self.out.getvalue()) def test_few_long_words(self) -> None: # We should wrap a line where the second word is overlong. self.n._line(' '.join(['x', LONGWORD, 'y'])) self.assertEqual(' $\n'.join(['x', INDENT + LONGWORD, INDENT + 'y']) + '\n', self.out.getvalue()) def test_comment_wrap(self) -> None: # Filenames should not be wrapped self.n.comment('Hello /usr/local/build-tools/bin') self.assertEqual('# Hello\n# /usr/local/build-tools/bin\n', self.out.getvalue()) def test_short_words_indented(self) -> None: # Test that indent is taking into account when breaking subsequent lines. # The second line should not be ' to tree', as that's longer than the # test layout width of 8. self.n._line('line_one to tree') self.assertEqual('''\ line_one $ to $ tree ''', self.out.getvalue()) def test_few_long_words_indented(self) -> None: # Check wrapping in the presence of indenting. self.n._line(' '.join(['x', LONGWORD, 'y']), indent=1) self.assertEqual(' $\n'.join([' ' + 'x', ' ' + INDENT + LONGWORD, ' ' + INDENT + 'y']) + '\n', self.out.getvalue()) def test_escaped_spaces(self) -> None: self.n._line(' '.join(['x', LONGWORDWITHSPACES, 'y'])) self.assertEqual(' $\n'.join(['x', INDENT + LONGWORDWITHSPACES, INDENT + 'y']) + '\n', self.out.getvalue()) def test_fit_many_words(self) -> None: self.n = ninja_syntax.Writer(self.out, width=78) self.n._line('command = cd ../../chrome; python ../tools/grit/grit/format/repack.py ../out/Debug/obj/chrome/chrome_dll.gen/repack/theme_resources_large.pak ../out/Debug/gen/chrome/theme_resources_large.pak', 1) self.assertEqual('''\ command = cd ../../chrome; python ../tools/grit/grit/format/repack.py $ ../out/Debug/obj/chrome/chrome_dll.gen/repack/theme_resources_large.pak $ ../out/Debug/gen/chrome/theme_resources_large.pak ''', self.out.getvalue()) def test_leading_space(self) -> None: self.n = ninja_syntax.Writer(self.out, width=14) # force wrapping self.n.variable('foo', ['', '-bar', '-somethinglong'], 0) self.assertEqual('''\ foo = -bar $ -somethinglong ''', self.out.getvalue()) def test_embedded_dollar_dollar(self) -> None: self.n = ninja_syntax.Writer(self.out, width=15) # force wrapping self.n.variable('foo', ['a$$b', '-somethinglong'], 0) self.assertEqual('''\ foo = a$$b $ -somethinglong ''', self.out.getvalue()) def test_two_embedded_dollar_dollars(self) -> None: self.n = ninja_syntax.Writer(self.out, width=17) # force wrapping self.n.variable('foo', ['a$$b', '-somethinglong'], 0) self.assertEqual('''\ foo = a$$b $ -somethinglong ''', self.out.getvalue()) def test_leading_dollar_dollar(self) -> None: self.n = ninja_syntax.Writer(self.out, width=14) # force wrapping self.n.variable('foo', ['$$b', '-somethinglong'], 0) self.assertEqual('''\ foo = $$b $ -somethinglong ''', self.out.getvalue()) def test_trailing_dollar_dollar(self) -> None: self.n = ninja_syntax.Writer(self.out, width=14) # force wrapping self.n.variable('foo', ['a$$', '-somethinglong'], 0) self.assertEqual('''\ foo = a$$ $ -somethinglong ''', self.out.getvalue()) class TestBuild(unittest.TestCase): def setUp(self) -> None: self.out = StringIO() self.n = ninja_syntax.Writer(self.out) def test_variables_dict(self) -> None: self.n.build('out', 'cc', 'in', variables={'name': 'value'}) self.assertEqual('''\ build out: cc in name = value ''', self.out.getvalue()) def test_variables_list(self) -> None: self.n.build('out', 'cc', 'in', variables=[('name', 'value')]) self.assertEqual('''\ build out: cc in name = value ''', self.out.getvalue()) def test_implicit_outputs(self) -> None: self.n.build('o', 'cc', 'i', implicit_outputs='io') self.assertEqual('''\ build o | io: cc i ''', self.out.getvalue()) class TestExpand(unittest.TestCase): def test_basic(self) -> None: vars = {'x': 'X'} self.assertEqual('foo', ninja_syntax.expand('foo', vars)) def test_var(self) -> None: vars = {'xyz': 'XYZ'} self.assertEqual('fooXYZ', ninja_syntax.expand('foo$xyz', vars)) def test_vars(self) -> None: vars = {'x': 'X', 'y': 'YYY'} self.assertEqual('XYYY', ninja_syntax.expand('$x$y', vars)) def test_space(self) -> None: vars: Dict[str, str] = {} self.assertEqual('x y z', ninja_syntax.expand('x$ y$ z', vars)) def test_locals(self) -> None: vars = {'x': 'a'} local_vars = {'x': 'b'} self.assertEqual('a', ninja_syntax.expand('$x', vars)) self.assertEqual('b', ninja_syntax.expand('$x', vars, local_vars)) def test_double(self) -> None: self.assertEqual('a b$c', ninja_syntax.expand('a$ b$$c', {})) if __name__ == '__main__': unittest.main() ninja-1.13.2/misc/oss-fuzz/000077500000000000000000000000001510764045400154365ustar00rootroot00000000000000ninja-1.13.2/misc/oss-fuzz/build.sh000066400000000000000000000017641510764045400171010ustar00rootroot00000000000000#!/bin/bash -eu # Copyright 2020 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ################################################################################ cmake -Bbuild-cmake -H. cmake --build build-cmake cd $SRC/ninja/misc $CXX $CXXFLAGS -fdiagnostics-color -I/src/ninja/src -o fuzzer.o -c manifest_fuzzer.cc find .. -name "*.o" -exec ar rcs fuzz_lib.a {} \; $CXX $CXXFLAGS $LIB_FUZZING_ENGINE fuzzer.o -o $OUT/fuzzer fuzz_lib.a zip $OUT/fuzzer_seed_corpus.zip $SRC/sample_ninja_build ninja-1.13.2/misc/oss-fuzz/sample_ninja_build000066400000000000000000000003301510764045400211740ustar00rootroot00000000000000# build.ninja cc = clang cflags = -Weverything rule compile command = $cc $cflags -c $in -o $out rule link command = $cc $in -o $out build hello.o: compile hello.c build hello: link hello.o default hello ninja-1.13.2/misc/output_test.py000077500000000000000000000461311510764045400166170ustar00rootroot00000000000000#!/usr/bin/env python3 """Runs ./ninja and checks if the output is correct. In order to simulate a smart terminal it uses the 'script' command. """ import os import platform import signal import subprocess import sys import tempfile import time import unittest from textwrap import dedent import typing as T default_env = dict(os.environ) default_env.pop('NINJA_STATUS', None) default_env.pop('CLICOLOR_FORCE', None) default_env['TERM'] = '' NINJA_PATH = os.path.abspath('./ninja') def remove_non_visible_lines(raw_output: bytes) -> str: # When running in a smart terminal, Ninja uses CR (\r) to # return the cursor to the start of the current line, prints # something, then uses `\x1b[K` to clear everything until # the end of the line. # # Thus printing 'FOO', 'BAR', 'ZOO' on the same line, then # jumping to the next one results in the following output # on Posix: # # '\rFOO\x1b[K\rBAR\x1b[K\rZOO\x1b[K\r\n' # # The following splits the output at both \r, \n and \r\n # boundaries, which gives: # # [ '\r', 'FOO\x1b[K\r', 'BAR\x1b[K\r', 'ZOO\x1b[K\r\n' ] # decoded_lines = raw_output.decode('utf-8').splitlines(True) # Remove any item that ends with a '\r' as this means its # content will be overwritten by the next item in the list. # For the previous example, this gives: # # [ 'ZOO\x1b[K\r\n' ] # final_lines = [ l for l in decoded_lines if not l.endswith('\r') ] # Return a single string that concatenates all filtered lines # while removing any remaining \r in it. Needed to transform # \r\n into \n. # # "ZOO\x1b[K\n' # return ''.join(final_lines).replace('\r', '') class BuildDir: def __init__(self, build_ninja: str): self.build_ninja = dedent(build_ninja) self.d = None def __enter__(self): self.d = tempfile.TemporaryDirectory() with open(os.path.join(self.d.name, 'build.ninja'), 'w') as f: f.write(self.build_ninja) f.flush() return self def __exit__(self, exc_type, exc_val, exc_tb): self.d.cleanup() @property def path(self) -> str: return os.path.realpath(self.d.name) def run( self, flags: T.Optional[str] = None, pipe: bool = False, raw_output: bool = False, env: T.Dict[str, str] = default_env, print_err_output = True, ) -> str: """Run Ninja command, and get filtered output. Args: flags: Extra arguments passed to Ninja. pipe: set to True to run Ninja in a non-interactive terminal. If False (the default), this runs Ninja in a pty to simulate a smart terminal (this feature cannot work on Windows!). raw_output: set to True to return the raw, unfiltered command output. env: Optional environment dictionary to run the command in. print_err_output: set to False if the test expects ninja to print something to stderr. (Otherwise, an error message from Ninja probably represents a failed test.) Returns: A UTF-8 string corresponding to the output (stdout only) of the Ninja command. By default, partial lines that were overwritten are removed according to the rules described in the comments below. """ ninja_cmd = '{} {}'.format(NINJA_PATH, flags if flags else '') try: if pipe: output = subprocess.check_output( [ninja_cmd], shell=True, cwd=self.d.name, env=env) elif platform.system() == 'Darwin': output = subprocess.check_output(['script', '-q', '/dev/null', 'bash', '-c', ninja_cmd], cwd=self.d.name, env=env) else: output = subprocess.check_output(['script', '-qfec', ninja_cmd, '/dev/null'], cwd=self.d.name, env=env) except subprocess.CalledProcessError as err: if print_err_output: sys.stdout.buffer.write(err.output) err.cooked_output = remove_non_visible_lines(err.output) raise err if raw_output: return output.decode('utf-8') return remove_non_visible_lines(output) def run( build_ninja: str, flags: T.Optional[str] = None, pipe: bool = False, raw_output: bool = False, env: T.Dict[str, str] = default_env, print_err_output = True, ) -> str: """Run Ninja with a given build plan in a temporary directory. """ with BuildDir(build_ninja) as b: return b.run(flags, pipe, raw_output, env, print_err_output) @unittest.skipIf(platform.system() == 'Windows', 'These test methods do not work on Windows') class Output(unittest.TestCase): BUILD_SIMPLE_ECHO = '\n'.join(( 'rule echo', ' command = printf "do thing"', ' description = echo $out', '', 'build a: echo', '', )) def _test_expected_error(self, plan: str, flags: T.Optional[str],expected: str, *args, exit_code: T.Optional[int]=None, **kwargs)->None: """Run Ninja with a given plan and flags, and verify its cooked output against an expected content. All *args and **kwargs are passed to the `run` function """ actual = '' kwargs['print_err_output'] = False with self.assertRaises(subprocess.CalledProcessError) as cm: run(plan, flags, *args, **kwargs) actual = cm.exception.cooked_output if exit_code is not None: self.assertEqual(cm.exception.returncode, exit_code) self.assertEqual(expected, actual) def test_issue_1418(self) -> None: self.assertEqual(run( '''rule echo command = sleep $delay && echo $out description = echo $out build a: echo delay = 3 build b: echo delay = 2 build c: echo delay = 1 ''', '-j3'), '''[1/3] echo c\x1b[K c [2/3] echo b\x1b[K b [3/3] echo a\x1b[K a ''') def test_issue_1214(self) -> None: print_red = '''rule echo command = printf '\x1b[31mred\x1b[0m' description = echo $out build a: echo ''' # Only strip color when ninja's output is piped. self.assertEqual(run(print_red), '''[1/1] echo a\x1b[K \x1b[31mred\x1b[0m ''') self.assertEqual(run(print_red, pipe=True), '''[1/1] echo a red ''') # Even in verbose mode, colors should still only be stripped when piped. self.assertEqual(run(print_red, flags='-v'), '''[1/1] printf '\x1b[31mred\x1b[0m' \x1b[31mred\x1b[0m ''') self.assertEqual(run(print_red, flags='-v', pipe=True), '''[1/1] printf '\x1b[31mred\x1b[0m' red ''') # CLICOLOR_FORCE=1 can be used to disable escape code stripping. env = default_env.copy() env['CLICOLOR_FORCE'] = '1' self.assertEqual(run(print_red, pipe=True, env=env), '''[1/1] echo a \x1b[31mred\x1b[0m ''') def test_issue_1966(self) -> None: self.assertEqual(run( '''rule cat command = cat $rspfile $rspfile > $out rspfile = cat.rsp rspfile_content = a b c build a: cat ''', '-j3'), '''[1/1] cat cat.rsp cat.rsp > a\x1b[K ''') def test_issue_2499(self) -> None: # This verifies that Ninja prints its status line updates on a single # line when running in a smart terminal, and when commands do not have # any output. Get the raw command output which includes CR (\r) codes # and all content that was printed by Ninja. self.assertEqual(run( '''rule touch command = touch $out build foo: touch build bar: touch foo build zoo: touch bar ''', flags='-j1 zoo', raw_output=True).split('\r'), [ '', '[0/3] touch foo\x1b[K', '[1/3] touch foo\x1b[K', '[1/3] touch bar\x1b[K', '[2/3] touch bar\x1b[K', '[2/3] touch zoo\x1b[K', '[3/3] touch zoo\x1b[K', '\n', ]) def test_pr_1685(self) -> None: # Running those tools without .ninja_deps and .ninja_log shouldn't fail. self.assertEqual(run('', flags='-t recompact'), '') self.assertEqual(run('', flags='-t restat'), '') def test_issue_2048(self) -> None: with tempfile.TemporaryDirectory() as d: with open(os.path.join(d, 'build.ninja'), 'w'): pass with open(os.path.join(d, '.ninja_log'), 'w') as f: f.write('# ninja log v4\n') try: output = subprocess.check_output([NINJA_PATH, '-t', 'recompact'], cwd=d, env=default_env, stderr=subprocess.STDOUT, text=True ) self.assertEqual( output.strip(), "ninja: warning: build log version is too old; starting over" ) except subprocess.CalledProcessError as err: self.fail("non-zero exit code with: " + err.output) def test_pr_2540(self)->None: py = sys.executable plan = f'''\ rule CUSTOM_COMMAND command = $COMMAND build 124: CUSTOM_COMMAND COMMAND = {py} -c 'exit(124)' build 127: CUSTOM_COMMAND COMMAND = {py} -c 'exit(127)' build 130: CUSTOM_COMMAND COMMAND = {py} -c 'exit(130)' build 137: CUSTOM_COMMAND COMMAND = {py} -c 'exit(137)' build success: CUSTOM_COMMAND COMMAND = sleep 0.3; echo success ''' # Disable colors env = default_env.copy() env['TERM'] = 'dumb' self._test_expected_error( plan, '124', f'''[1/1] {py} -c 'exit(124)' FAILED: [code=124] 124 \n{py} -c 'exit(124)' ninja: build stopped: subcommand failed. ''', exit_code=124, env=env, ) self._test_expected_error( plan, '127', f'''[1/1] {py} -c 'exit(127)' FAILED: [code=127] 127 \n{py} -c 'exit(127)' ninja: build stopped: subcommand failed. ''', exit_code=127, env=env, ) self._test_expected_error( plan, '130', 'ninja: build stopped: interrupted by user.\n', exit_code=130, env=env, ) self._test_expected_error( plan, '137', f'''[1/1] {py} -c 'exit(137)' FAILED: [code=137] 137 \n{py} -c 'exit(137)' ninja: build stopped: subcommand failed. ''', exit_code=137, env=env, ) self._test_expected_error( plan, 'non-existent-target', "ninja: error: unknown target 'non-existent-target'\n", exit_code=1, env=env, ) self._test_expected_error( plan, '-j2 success 127', f'''[1/2] {py} -c 'exit(127)' FAILED: [code=127] 127 \n{py} -c 'exit(127)' [2/2] sleep 0.3; echo success success ninja: build stopped: subcommand failed. ''', exit_code=127, env=env, ) def test_depfile_directory_creation(self) -> None: b = BuildDir('''\ rule touch command = touch $out && echo "$out: extra" > $depfile build somewhere/out: touch depfile = somewhere_else/out.d ''') with b: self.assertEqual(b.run('', pipe=True), dedent('''\ [1/1] touch somewhere/out && echo "somewhere/out: extra" > somewhere_else/out.d ''')) self.assertTrue(os.path.isfile(os.path.join(b.d.name, "somewhere", "out"))) self.assertTrue(os.path.isfile(os.path.join(b.d.name, "somewhere_else", "out.d"))) def test_status(self) -> None: self.assertEqual(run(''), 'ninja: no work to do.\n') self.assertEqual(run('', pipe=True), 'ninja: no work to do.\n') self.assertEqual(run('', flags='--quiet'), '') def test_ninja_status_default(self) -> None: 'Do we show the default status by default?' self.assertEqual(run(Output.BUILD_SIMPLE_ECHO), '[1/1] echo a\x1b[K\ndo thing\n') def test_ninja_status_quiet(self) -> None: 'Do we suppress the status information when --quiet is specified?' output = run(Output.BUILD_SIMPLE_ECHO, flags='--quiet') self.assertEqual(output, 'do thing\n') def test_entering_directory_on_stdout(self) -> None: output = run(Output.BUILD_SIMPLE_ECHO, flags='-C$PWD', pipe=True) self.assertEqual(output.splitlines()[0][:25], "ninja: Entering directory") def test_tool_inputs(self) -> None: plan = ''' rule cat command = cat $in $out build out1 : cat in1 build out2 : cat in2 out1 build out3 : cat out2 out1 | implicit || order_only ''' self.assertEqual(run(plan, flags='-t inputs out3'), '''implicit in1 in2 order_only out1 out2 ''') self.assertEqual(run(plan, flags='-t inputs --dependency-order out3'), '''in2 in1 out1 out2 implicit order_only ''') # Verify that results are shell-escaped by default, unless --no-shell-escape # is used. Also verify that phony outputs are never part of the results. quote = '"' if platform.system() == "Windows" else "'" plan = ''' rule cat command = cat $in $out build out1 : cat in1 build out$ 2 : cat out1 build out$ 3 : phony out$ 2 build all: phony out$ 3 ''' # Quoting changes the order of results when sorting alphabetically. self.assertEqual(run(plan, flags='-t inputs all'), f'''{quote}out 2{quote} in1 out1 ''') self.assertEqual(run(plan, flags='-t inputs --no-shell-escape all'), '''in1 out 2 out1 ''') # But not when doing dependency order. self.assertEqual( run( plan, flags='-t inputs --dependency-order all' ), f'''in1 out1 {quote}out 2{quote} ''') self.assertEqual( run( plan, flags='-t inputs --dependency-order --no-shell-escape all' ), f'''in1 out1 out 2 ''') self.assertEqual( run( plan, flags='-t inputs --dependency-order --no-shell-escape --print0 all' ), f'''in1\0out1\0out 2\0''' ) def test_tool_compdb_targets(self) -> None: plan = ''' rule cat command = cat $in $out build out1 : cat in1 build out2 : cat in2 out1 build out3 : cat out2 out1 build out4 : cat in4 ''' self._test_expected_error(plan, '-t compdb-targets', '''ninja: error: compdb-targets expects the name of at least one target usage: ninja -t compdb [-hx] target [targets] options: -h display this help message -x expand @rspfile style response file invocations ''') self._test_expected_error(plan, '-t compdb-targets in1', "ninja: fatal: 'in1' is not a target (i.e. it is not an output of any `build` statement)\n") self._test_expected_error(plan, '-t compdb-targets nonexistent_target', "ninja: fatal: unknown target 'nonexistent_target'\n") with BuildDir(plan) as b: actual = b.run(flags='-t compdb-targets out3') expected = f'''[ {{ "directory": "{b.path}", "command": "cat in1 out1", "file": "in1", "output": "out1" }}, {{ "directory": "{b.path}", "command": "cat in2 out1 out2", "file": "in2", "output": "out2" }}, {{ "directory": "{b.path}", "command": "cat out2 out1 out3", "file": "out2", "output": "out3" }} ] ''' self.assertEqual(expected, actual) def test_tool_multi_inputs(self) -> None: plan = ''' rule cat command = cat $in $out build out1 : cat in1 build out2 : cat in1 in2 build out3 : cat in1 in2 in3 ''' self.assertEqual(run(plan, flags='-t multi-inputs out1'), '''out1in1 '''.replace("", "\t")) self.assertEqual(run(plan, flags='-t multi-inputs out1 out2 out3'), '''out1in1 out2in1 out2in2 out3in1 out3in2 out3in3 '''.replace("", "\t")) self.assertEqual(run(plan, flags='-t multi-inputs -d: out1'), '''out1:in1 ''') self.assertEqual( run( plan, flags='-t multi-inputs -d, --print0 out1 out2' ), '''out1,in1\0out2,in1\0out2,in2\0''' ) def test_explain_output(self): b = BuildDir('''\ build .FORCE: phony rule create_if_non_exist command = [ -e $out ] || touch $out restat = true rule write command = cp $in $out build input : create_if_non_exist .FORCE build mid : write input build output : write mid default output ''') with b: # The explain output is shown just before the relevant build: self.assertEqual(b.run('-v -d explain'), dedent('''\ ninja explain: .FORCE is dirty [1/3] [ -e input ] || touch input ninja explain: input is dirty [2/3] cp input mid ninja explain: mid is dirty [3/3] cp mid output ''')) # Don't print "ninja explain: XXX is dirty" for inputs that are # pruned from the graph by an earlier restat. self.assertEqual(b.run('-v -d explain'), dedent('''\ ninja explain: .FORCE is dirty [1/3] [ -e input ] || touch input ''')) def test_issue_2586(self): """This shouldn't hang""" plan = '''rule echo command = echo echo build dep: echo build console1: echo dep pool = console build console2: echo pool = console build all: phony console1 console2 default all ''' self.assertEqual(run(plan, flags='-j2', env={'NINJA_STATUS':''}), '''echo echo echo echo echo echo echo echo echo ''') def test_issue_2621(self): """Should result in "multiple rules generate" error""" plan = r"""rule dd command = printf 'ninja_dyndep_version = 1\nbuild stamp-$n | out: dyndep\n' > $out rule touch command = touch stamp-$n out dyndep = dd-$n build dd-1: dd n = 1 build dd-2: dd n = 2 build stamp-1: touch || dd-1 n = 1 build stamp-2: touch || dd-2 n = 2 """ self._test_expected_error( plan, "-v", r"""[1/4] printf 'ninja_dyndep_version = 1\nbuild stamp-1 | out: dyndep\n' > dd-1 [2/4] printf 'ninja_dyndep_version = 1\nbuild stamp-2 | out: dyndep\n' > dd-2 ninja: build stopped: multiple rules generate out. """, ) def test_issue_2681(self): """Ninja should return a status code of 130 when interrupted.""" plan = r"""rule sleep command = sleep 10 build foo: sleep """ with BuildDir(plan) as b: for signum in (signal.SIGINT, signal.SIGHUP, signal.SIGTERM): proc = subprocess.Popen([NINJA_PATH, "foo"], cwd=b.path, env=default_env, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Sleep a bit to let Ninja start the build, otherwise the signal could be received # before it, and returncode will be -2. time.sleep(0.2) os.kill(proc.pid, signum) proc.wait() self.assertEqual(proc.returncode, 130, msg=f"For signal {signum}") if __name__ == '__main__': unittest.main() ninja-1.13.2/misc/packaging/000077500000000000000000000000001510764045400155625ustar00rootroot00000000000000ninja-1.13.2/misc/packaging/ninja.spec000066400000000000000000000023361510764045400175410ustar00rootroot00000000000000Summary: Ninja is a small build system with a focus on speed. Name: ninja Version: %{ver} Release: %{rel}%{?dist} Group: Development/Tools License: Apache 2.0 URL: https://github.com/ninja-build/ninja Source0: %{name}-%{version}-%{rel}.tar.gz BuildRoot: %{_tmppath}/%{name}-%{version}-%{rel} BuildRequires: asciidoc %description Ninja is yet another build system. It takes as input the interdependencies of files (typically source code and output executables) and orchestrates building them, quickly. Ninja joins a sea of other build systems. Its distinguishing goal is to be fast. It is born from my work on the Chromium browser project, which has over 30,000 source files and whose other build systems (including one built from custom non-recursive Makefiles) can take ten seconds to start building after changing one file. Ninja is under a second. %prep %setup -q -n %{name}-%{version}-%{rel} %build echo Building.. ./configure.py --bootstrap ./ninja manual %install mkdir -p %{buildroot}%{_bindir} %{buildroot}%{_docdir} cp -p ninja %{buildroot}%{_bindir}/ %files %defattr(-, root, root) %doc COPYING README.md doc/manual.html %{_bindir}/* %clean rm -rf %{buildroot} #The changelog is built automatically from Git history %changelog ninja-1.13.2/misc/packaging/rpmbuild.sh000077500000000000000000000020361510764045400177400ustar00rootroot00000000000000#!/bin/bash echo Building ninja RPMs.. GITROOT=$(git rev-parse --show-toplevel) cd $GITROOT VER=1.0 REL=$(git rev-parse --short HEAD)git RPMTOPDIR=$GITROOT/rpm-build echo "Ver: $VER, Release: $REL" # Create tarball mkdir -p $RPMTOPDIR/{SOURCES,SPECS} git archive --format=tar --prefix=ninja-${VER}-${REL}/ HEAD | gzip -c > $RPMTOPDIR/SOURCES/ninja-${VER}-${REL}.tar.gz # Convert git log to RPM's ChangeLog format (shown with rpm -qp --changelog ) sed -e "s/%{ver}/$VER/" -e "s/%{rel}/$REL/" misc/packaging/ninja.spec > $RPMTOPDIR/SPECS/ninja.spec git log --format="* %cd %aN%n- (%h) %s%d%n" --date=local | sed -r 's/[0-9]+:[0-9]+:[0-9]+ //' >> $RPMTOPDIR/SPECS/ninja.spec # Build SRC and binary RPMs rpmbuild --quiet \ --define "_topdir $RPMTOPDIR" \ --define "_rpmdir $PWD" \ --define "_srcrpmdir $PWD" \ --define '_rpmfilename %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm' \ -ba $RPMTOPDIR/SPECS/ninja.spec && rm -rf $RPMTOPDIR && echo Done ninja-1.13.2/misc/write_fake_manifests.py000077500000000000000000000242551510764045400204140ustar00rootroot00000000000000#!/usr/bin/env python3 """Writes large manifest files, for manifest parser performance testing. The generated manifest files are (eerily) similar in appearance and size to the ones used in the Chromium project. Usage: python misc/write_fake_manifests.py outdir # Will run for about 5s. The program contains a hardcoded random seed, so it will generate the same output every time it runs. By changing the seed, it's easy to generate many different sets of manifest files. """ import argparse import contextlib import os import random import sys from typing import Generator, Optional, Tuple, List, Set import ninja_syntax def paretoint(avg: float, alpha: float) -> int: """Returns a random integer that's avg on average, following a power law. alpha determines the shape of the power curve. alpha has to be larger than 1. The closer alpha is to 1, the higher the variation of the returned numbers.""" return int(random.paretovariate(alpha) * avg / (alpha / (alpha - 1))) # Based on http://neugierig.org/software/chromium/class-name-generator.html def moar(avg_options: float, p_suffix: float) -> str: kStart = ['render', 'web', 'browser', 'tab', 'content', 'extension', 'url', 'file', 'sync', 'content', 'http', 'profile'] kOption = ['view', 'host', 'holder', 'container', 'impl', 'ref', 'delegate', 'widget', 'proxy', 'stub', 'context', 'manager', 'master', 'watcher', 'service', 'file', 'data', 'resource', 'device', 'info', 'provider', 'internals', 'tracker', 'api', 'layer'] kOS = ['win', 'mac', 'aura', 'linux', 'android', 'unittest', 'browsertest'] num_options = min(paretoint(avg_options, alpha=4), 5) # The original allows kOption to repeat as long as no consecutive options # repeat. This version doesn't allow any option repetition. name = [random.choice(kStart)] + random.sample(kOption, num_options) if random.random() < p_suffix: name.append(random.choice(kOS)) return '_'.join(name) class GenRandom(object): def __init__(self, src_dir: str) -> None: self.seen_names: Set[Optional[str]] = set([None]) self.seen_defines: Set[Optional[str]] = set([None]) self.src_dir = src_dir def _unique_string(self, seen: Set[Optional[str]], avg_options: float = 1.3, p_suffix: float = 0.1) -> str: s = None while s in seen: s = moar(avg_options, p_suffix) seen.add(s) return s # type: ignore # Incompatible return value type def _n_unique_strings(self, n: int) -> List[str]: seen: Set[Optional[str]] = set([None]) return [self._unique_string(seen, avg_options=3, p_suffix=0.4) for _ in range(n)] def target_name(self) -> str: return self._unique_string(p_suffix=0, seen=self.seen_names) def path(self) -> str: return os.path.sep.join([ self._unique_string(self.seen_names, avg_options=1, p_suffix=0) for _ in range(1 + paretoint(0.6, alpha=4))]) def src_obj_pairs(self, path: str, name: str) -> List[Tuple[str, str]]: num_sources = paretoint(55, alpha=2) + 1 return [(os.path.join(self.src_dir, path, s + '.cc'), os.path.join('obj', path, '%s.%s.o' % (name, s))) for s in self._n_unique_strings(num_sources)] def defines(self) -> List[str]: return [ '-DENABLE_' + self._unique_string(self.seen_defines).upper() for _ in range(paretoint(20, alpha=3))] LIB, EXE = 0, 1 class Target(object): def __init__(self, gen: GenRandom, kind: int) -> None: self.name = gen.target_name() self.dir_path = gen.path() self.ninja_file_path = os.path.join( 'obj', self.dir_path, self.name + '.ninja') self.src_obj_pairs = gen.src_obj_pairs(self.dir_path, self.name) if kind == LIB: self.output = os.path.join('lib' + self.name + '.a') elif kind == EXE: self.output = os.path.join(self.name) self.defines = gen.defines() self.deps: List[Target] = [] self.kind = kind self.has_compile_depends = random.random() < 0.4 def write_target_ninja(ninja: ninja_syntax.Writer, target: Target, src_dir: str) -> None: compile_depends = None if target.has_compile_depends: compile_depends = os.path.join( 'obj', target.dir_path, target.name + '.stamp') ninja.build(compile_depends, 'stamp', target.src_obj_pairs[0][0]) ninja.newline() ninja.variable('defines', target.defines) ninja.variable('includes', '-I' + src_dir) ninja.variable('cflags', ['-Wall', '-fno-rtti', '-fno-exceptions']) ninja.newline() for src, obj in target.src_obj_pairs: ninja.build(obj, 'cxx', src, implicit=compile_depends) ninja.newline() deps = [dep.output for dep in target.deps] libs = [dep.output for dep in target.deps if dep.kind == LIB] if target.kind == EXE: ninja.variable('libs', libs) if sys.platform == "darwin": ninja.variable('ldflags', '-Wl,-pie') link = { LIB: 'alink', EXE: 'link'}[target.kind] ninja.build(target.output, link, [obj for _, obj in target.src_obj_pairs], implicit=deps) def write_sources(target: Target, root_dir: str) -> None: need_main = target.kind == EXE includes = [] # Include siblings. for cc_filename, _ in target.src_obj_pairs: h_filename = os.path.basename(os.path.splitext(cc_filename)[0] + '.h') includes.append(h_filename) # Include deps. for dep in target.deps: for cc_filename, _ in dep.src_obj_pairs: h_filename = os.path.basename( os.path.splitext(cc_filename)[0] + '.h') includes.append("%s/%s" % (dep.dir_path, h_filename)) for cc_filename, _ in target.src_obj_pairs: cc_path = os.path.join(root_dir, cc_filename) h_path = os.path.splitext(cc_path)[0] + '.h' namespace = os.path.basename(target.dir_path) class_ = os.path.splitext(os.path.basename(cc_filename))[0] try: os.makedirs(os.path.dirname(cc_path)) except OSError: pass with open(h_path, 'w') as f: f.write('namespace %s { struct %s { %s(); }; }' % (namespace, class_, class_)) with open(cc_path, 'w') as f: for include in includes: f.write('#include "%s"\n' % include) f.write('\n') f.write('namespace %s { %s::%s() {} }' % (namespace, class_, class_)) if need_main: f.write('int main(int argc, char **argv) {}\n') need_main = False def write_master_ninja(master_ninja: ninja_syntax.Writer, targets: List[Target]) -> None: """Writes master build.ninja file, referencing all given subninjas.""" master_ninja.variable('cxx', 'c++') master_ninja.variable('ld', '$cxx') if sys.platform == 'darwin': master_ninja.variable('alink', 'libtool -static') else: master_ninja.variable('alink', 'ar rcs') master_ninja.newline() master_ninja.pool('link_pool', depth=4) master_ninja.newline() master_ninja.rule('cxx', description='CXX $out', command='$cxx -MMD -MF $out.d $defines $includes $cflags -c $in -o $out', depfile='$out.d', deps='gcc') master_ninja.rule('alink', description='ARCHIVE $out', command='rm -f $out && $alink -o $out $in') master_ninja.rule('link', description='LINK $out', pool='link_pool', command='$ld $ldflags -o $out $in $libs') master_ninja.rule('stamp', description='STAMP $out', command='touch $out') master_ninja.newline() for target in targets: master_ninja.subninja(target.ninja_file_path) master_ninja.newline() master_ninja.comment('Short names for targets.') for target in targets: if target.name != target.output: master_ninja.build(target.name, 'phony', target.output) master_ninja.newline() master_ninja.build('all', 'phony', [target.output for target in targets]) master_ninja.default('all') @contextlib.contextmanager def FileWriter(path: str) -> Generator[ninja_syntax.Writer, None, None]: """Context manager for a ninja_syntax object writing to a file.""" try: os.makedirs(os.path.dirname(path)) except OSError: pass f = open(path, 'w') yield ninja_syntax.Writer(f) f.close() def random_targets(num_targets: int, src_dir: str) -> List[Target]: gen = GenRandom(src_dir) # N-1 static libraries, and 1 executable depending on all of them. targets = [Target(gen, LIB) for i in range(num_targets - 1)] for i in range(len(targets)): targets[i].deps = [t for t in targets[0:i] if random.random() < 0.05] last_target = Target(gen, EXE) last_target.deps = targets[:] last_target.src_obj_pairs = last_target.src_obj_pairs[0:10] # Trim. targets.append(last_target) return targets def main() -> None: parser = argparse.ArgumentParser() parser.add_argument('-s', '--sources', nargs="?", const="src", help='write sources to directory (relative to output directory)') parser.add_argument('-t', '--targets', type=int, default=1500, help='number of targets (default: 1500)') parser.add_argument('-S', '--seed', type=int, help='random seed', default=12345) parser.add_argument('outdir', help='output directory') args = parser.parse_args() root_dir = args.outdir random.seed(args.seed) do_write_sources = args.sources is not None src_dir = args.sources if do_write_sources else "src" targets = random_targets(args.targets, src_dir) for target in targets: with FileWriter(os.path.join(root_dir, target.ninja_file_path)) as n: write_target_ninja(n, target, src_dir) if do_write_sources: write_sources(target, root_dir) with FileWriter(os.path.join(root_dir, 'build.ninja')) as master_ninja: master_ninja.width = 120 write_master_ninja(master_ninja, targets) if __name__ == '__main__': sys.exit(main()) # type: ignore # "main" does not return a value ninja-1.13.2/misc/zsh-completion000066400000000000000000000054471510764045400165460ustar00rootroot00000000000000#compdef ninja # Copyright 2011 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Add the following to your .zshrc to tab-complete ninja targets # fpath=(path/to/ninja/misc/zsh-completion $fpath) (( $+functions[_ninja-get-targets] )) || _ninja-get-targets() { dir="." if [ -n "${opt_args[-C]}" ]; then eval dir="${opt_args[-C]}" fi file="build.ninja" if [ -n "${opt_args[-f]}" ]; then eval file="${opt_args[-f]}" fi targets_command="ninja -f \"${file}\" -C \"${dir}\" -t targets all" eval ${targets_command} 2>/dev/null | cut -d: -f1 } (( $+functions[_ninja-get-tools] )) || _ninja-get-tools() { # remove the first line; remove the leading spaces; replace spaces with colon ninja -t list 2> /dev/null | sed -e '1d;s/^ *//;s/ \+/:/' } (( $+functions[_ninja-get-modes] )) || _ninja-get-modes() { # remove the first line; remove the last line; remove the leading spaces; replace spaces with colon ninja -d list 2> /dev/null | sed -e '1d;$d;s/^ *//;s/ \+/:/' } (( $+functions[_ninja-modes] )) || _ninja-modes() { local -a modes modes=(${(fo)"$(_ninja-get-modes)"}) _describe 'modes' modes } (( $+functions[_ninja-tools] )) || _ninja-tools() { local -a tools tools=(${(fo)"$(_ninja-get-tools)"}) _describe 'tools' tools } (( $+functions[_ninja-targets] )) || _ninja-targets() { local -a targets targets=(${(fo)"$(_ninja-get-targets)"}) _describe 'targets' targets } _arguments \ '(- *)'{-h,--help}'[Show help]' \ '(- *)--version[Print ninja version]' \ '-C+[Change to directory before doing anything else]:directories:_directories' \ '-f+[Specify input build file (default=build.ninja)]:files:_files' \ '-j+[Run N jobs in parallel (default=number of CPUs available)]:number of jobs' \ '-l+[Do not start new jobs if the load average is greater than N]:number of jobs' \ '-k+[Keep going until N jobs fail (default=1)]:number of jobs' \ '-n[Dry run (do not run commands but act like they succeeded)]' \ '(-v --verbose --quiet)'{-v,--verbose}'[Show all command lines while building]' \ "(-v --verbose --quiet)--quiet[Don't show progress status, just command output]" \ '-d+[Enable debugging (use -d list to list modes)]:modes:_ninja-modes' \ '-t+[Run a subtool (use -t list to list subtools)]:tools:_ninja-tools' \ '*::targets:_ninja-targets' ninja-1.13.2/src/000077500000000000000000000000001510764045400134725ustar00rootroot00000000000000ninja-1.13.2/src/browse.cc000066400000000000000000000047651510764045400153160ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "browse.h" #include #include #include #include #include #include "build/browse_py.h" using namespace std; void RunBrowsePython(State* state, const char* ninja_command, const char* input_file, int argc, char* argv[]) { // Fork off a Python process and have it run our code via its stdin. // (Actually the Python process becomes the parent.) int pipefd[2]; if (pipe(pipefd) < 0) { perror("ninja: pipe"); return; } pid_t pid = fork(); if (pid < 0) { perror("ninja: fork"); return; } if (pid > 0) { // Parent. close(pipefd[1]); do { if (dup2(pipefd[0], 0) < 0) { perror("ninja: dup2"); break; } std::vector command; command.push_back(NINJA_PYTHON); command.push_back("-"); command.push_back("--ninja-command"); command.push_back(ninja_command); command.push_back("-f"); command.push_back(input_file); for (int i = 0; i < argc; i++) { command.push_back(argv[i]); } command.push_back(NULL); execvp(command[0], const_cast(&command[0])); if (errno == ENOENT) { printf("ninja: %s is required for the browse tool\n", NINJA_PYTHON); } else { perror("ninja: execvp"); } } while (false); _exit(1); } else { // Child. close(pipefd[0]); // Write the script file into the stdin of the Python process. // Only write n - 1 bytes, because Python 3.11 does not allow null // bytes in source code anymore, so avoid writing the null string // terminator. // See https://github.com/python/cpython/issues/96670 auto kBrowsePyLength = sizeof(kBrowsePy) - 1; ssize_t len = write(pipefd[1], kBrowsePy, kBrowsePyLength); if (len < (ssize_t)kBrowsePyLength) perror("ninja: write"); close(pipefd[1]); exit(0); } } ninja-1.13.2/src/browse.h000066400000000000000000000021711510764045400151450ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef NINJA_BROWSE_H_ #define NINJA_BROWSE_H_ struct State; /// Run in "browse" mode, which execs a Python webserver. /// \a ninja_command is the command used to invoke ninja. /// \a args are the number of arguments to be passed to the Python script. /// \a argv are arguments to be passed to the Python script. /// This function does not return if it runs successfully. void RunBrowsePython(State* state, const char* ninja_command, const char* input_file, int argc, char* argv[]); #endif // NINJA_BROWSE_H_ ninja-1.13.2/src/browse.py000077500000000000000000000171131510764045400153530ustar00rootroot00000000000000#!/usr/bin/env python3 # # Copyright 2001 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Simple web server for browsing dependency graph data. This script is inlined into the final executable and spawned by it when needed. """ try: import http.server as httpserver import socketserver except ImportError: import BaseHTTPServer as httpserver # type: ignore # Name "httpserver" already defined import SocketServer as socketserver # type: ignore # Name "socketserver" already defined import argparse import os import socket import subprocess import sys import webbrowser if sys.version_info >= (3, 2): from html import escape else: from cgi import escape try: from urllib.request import unquote # type: ignore # Module "urllib.request" has no attribute "unquote" except ImportError: from urllib2 import unquote from collections import namedtuple from typing import Tuple, Any Node = namedtuple('Node', ['inputs', 'rule', 'target', 'outputs']) # Ideally we'd allow you to navigate to a build edge or a build node, # with appropriate views for each. But there's no way to *name* a build # edge so we can only display nodes. # # For a given node, it has at most one input edge, which has n # different inputs. This becomes node.inputs. (We leave out the # outputs of the input edge due to what follows.) The node can have # multiple dependent output edges. Rather than attempting to display # those, they are summarized by taking the union of all their outputs. # # This means there's no single view that shows you all inputs and outputs # of an edge. But I think it's less confusing than alternatives. def match_strip(line: str, prefix: str) -> Tuple[bool, str]: if not line.startswith(prefix): return (False, line) return (True, line[len(prefix):]) def html_escape(text: str) -> str: return escape(text, quote=True) def parse(text: str) -> Node: lines = iter(text.split('\n')) target = None rule = None inputs = [] outputs = [] try: target = next(lines)[:-1] # strip trailing colon line = next(lines) (match, rule) = match_strip(line, ' input: ') if match: (match, line) = match_strip(next(lines), ' ') while match: type = "" (match, line) = match_strip(line, '| ') if match: type = 'implicit' (match, line) = match_strip(line, '|| ') if match: type = 'order-only' inputs.append((line, type)) (match, line) = match_strip(next(lines), ' ') match, _ = match_strip(line, ' outputs:') if match: (match, line) = match_strip(next(lines), ' ') while match: outputs.append(line) (match, line) = match_strip(next(lines), ' ') except StopIteration: pass return Node(inputs, rule, target, outputs) def create_page(body: str) -> str: return ''' ''' + body def generate_html(node: Node) -> str: document = ['

%s

' % html_escape(node.target)] if node.inputs: document.append('

target is built using rule %s of

' % html_escape(node.rule)) if len(node.inputs) > 0: document.append('
') for input, type in sorted(node.inputs): extra = '' if type: extra = ' (%s)' % html_escape(type) document.append('%s%s
' % (html_escape(input), html_escape(input), extra)) document.append('
') if node.outputs: document.append('

dependent edges build:

') document.append('
') for output in sorted(node.outputs): document.append('%s
' % (html_escape(output), html_escape(output))) document.append('
') return '\n'.join(document) def ninja_dump(target: str) -> Tuple[str, str, int]: cmd = [args.ninja_command, '-f', args.f, '-t', 'query', target] proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) return proc.communicate() + (proc.returncode,) class RequestHandler(httpserver.BaseHTTPRequestHandler): def do_GET(self) -> None: assert self.path[0] == '/' target = unquote(self.path[1:]) if target == '': self.send_response(302) self.send_header('Location', '?' + args.initial_target) self.end_headers() return if not target.startswith('?'): self.send_response(404) self.end_headers() return target = target[1:] ninja_output, ninja_error, exit_code = ninja_dump(target) if exit_code == 0: page_body = generate_html(parse(ninja_output.strip())) else: # Relay ninja's error message. page_body = '

%s

' % html_escape(ninja_error) self.send_response(200) self.end_headers() self.wfile.write(create_page(page_body).encode('utf-8')) def log_message(self, format: str, *args: Any) -> None: pass # Swallow console spam. parser = argparse.ArgumentParser(prog='ninja -t browse') parser.add_argument('--port', '-p', default=8000, type=int, help='Port number to use (default %(default)d)') parser.add_argument('--hostname', '-a', default='localhost', type=str, help='Hostname to bind to (default %(default)s)') parser.add_argument('--no-browser', action='store_true', help='Do not open a webbrowser on startup.') parser.add_argument('--ninja-command', default='ninja', help='Path to ninja binary (default %(default)s)') parser.add_argument('-f', default='build.ninja', help='Path to build.ninja file (default %(default)s)') parser.add_argument('initial_target', default='all', nargs='?', help='Initial target to show (default %(default)s)') class HTTPServer(socketserver.ThreadingMixIn, httpserver.HTTPServer): # terminate server immediately when Python exits. daemon_threads = True args = parser.parse_args() port = args.port hostname = args.hostname httpd = HTTPServer((hostname,port), RequestHandler) try: if hostname == "": hostname = socket.gethostname() print('Web server running on %s:%d, ctl-C to abort...' % (hostname,port) ) print('Web server pid %d' % os.getpid(), file=sys.stderr ) if not args.no_browser: webbrowser.open_new('http://%s:%s' % (hostname, port) ) httpd.serve_forever() except KeyboardInterrupt: print() pass # Swallow console spam. ninja-1.13.2/src/build.cc000066400000000000000000001017301510764045400151020ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "build.h" #include #include #include #include #include #include #include #if defined(__SVR4) && defined(__sun) #include #endif #include "build_log.h" #include "clparser.h" #include "debug_flags.h" #include "depfile_parser.h" #include "deps_log.h" #include "disk_interface.h" #include "exit_status.h" #include "explanations.h" #include "graph.h" #include "jobserver.h" #include "metrics.h" #include "state.h" #include "status.h" #include "util.h" using namespace std; namespace { /// A CommandRunner that doesn't actually run the commands. struct DryRunCommandRunner : public CommandRunner { // Overridden from CommandRunner: size_t CanRunMore() const override; bool StartCommand(Edge* edge) override; bool WaitForCommand(Result* result) override; private: queue finished_; }; size_t DryRunCommandRunner::CanRunMore() const { return SIZE_MAX; } bool DryRunCommandRunner::StartCommand(Edge* edge) { finished_.push(edge); return true; } bool DryRunCommandRunner::WaitForCommand(Result* result) { if (finished_.empty()) return false; result->status = ExitSuccess; result->edge = finished_.front(); finished_.pop(); return true; } } // namespace Plan::Plan(Builder* builder) : builder_(builder) , command_edges_(0) , wanted_edges_(0) {} void Plan::Reset() { command_edges_ = 0; wanted_edges_ = 0; ready_.clear(); want_.clear(); } bool Plan::AddTarget(const Node* target, string* err) { targets_.push_back(target); return AddSubTarget(target, NULL, err, NULL); } bool Plan::AddSubTarget(const Node* node, const Node* dependent, string* err, set* dyndep_walk) { Edge* edge = node->in_edge(); if (!edge) { // Leaf node, this can be either a regular input from the manifest // (e.g. a source file), or an implicit input from a depfile or dyndep // file. In the first case, a dirty flag means the file is missing, // and the build should stop. In the second, do not do anything here // since there is no producing edge to add to the plan. if (node->dirty() && !node->generated_by_dep_loader()) { string referenced; if (dependent) referenced = ", needed by '" + dependent->path() + "',"; *err = "'" + node->path() + "'" + referenced + " missing and no known rule to make it"; } return false; } if (edge->outputs_ready()) return false; // Don't need to do anything. // If an entry in want_ does not already exist for edge, create an entry which // maps to kWantNothing, indicating that we do not want to build this entry itself. pair::iterator, bool> want_ins = want_.insert(make_pair(edge, kWantNothing)); Want& want = want_ins.first->second; if (dyndep_walk && want == kWantToFinish) return false; // Don't need to do anything with already-scheduled edge. // If we do need to build edge and we haven't already marked it as wanted, // mark it now. if (node->dirty() && want == kWantNothing) { want = kWantToStart; EdgeWanted(edge); } if (dyndep_walk) dyndep_walk->insert(edge); if (!want_ins.second) return true; // We've already processed the inputs. for (vector::iterator i = edge->inputs_.begin(); i != edge->inputs_.end(); ++i) { if (!AddSubTarget(*i, node, err, dyndep_walk) && !err->empty()) return false; } return true; } void Plan::EdgeWanted(const Edge* edge) { ++wanted_edges_; if (!edge->is_phony()) { ++command_edges_; if (builder_) builder_->status_->EdgeAddedToPlan(edge); } } Edge* Plan::FindWork() { if (ready_.empty()) return NULL; Edge* work = ready_.top(); // If jobserver mode is enabled, try to acquire a token first, // and return null in case of failure. if (builder_ && builder_->jobserver_.get()) { work->job_slot_ = builder_->jobserver_->TryAcquire(); if (!work->job_slot_.IsValid()) return nullptr; } ready_.pop(); return work; } void Plan::ScheduleWork(map::iterator want_e) { if (want_e->second == kWantToFinish) { // This edge has already been scheduled. We can get here again if an edge // and one of its dependencies share an order-only input, or if a node // duplicates an out edge (see https://github.com/ninja-build/ninja/pull/519). // Avoid scheduling the work again. return; } assert(want_e->second == kWantToStart); want_e->second = kWantToFinish; Edge* edge = want_e->first; Pool* pool = edge->pool(); if (pool->ShouldDelayEdge()) { pool->DelayEdge(edge); pool->RetrieveReadyEdges(&ready_); } else { pool->EdgeScheduled(*edge); ready_.push(edge); } } bool Plan::EdgeFinished(Edge* edge, EdgeResult result, string* err) { map::iterator e = want_.find(edge); assert(e != want_.end()); bool directly_wanted = e->second != kWantNothing; // See if this job frees up any delayed jobs. if (directly_wanted) edge->pool()->EdgeFinished(*edge); edge->pool()->RetrieveReadyEdges(&ready_); // Release job slot if needed. if (builder_ && builder_->jobserver_.get()) builder_->jobserver_->Release(std::move(edge->job_slot_)); // The rest of this function only applies to successful commands. if (result != kEdgeSucceeded) return true; if (directly_wanted) --wanted_edges_; want_.erase(e); edge->outputs_ready_ = true; // Check off any nodes we were waiting for with this edge. for (vector::iterator o = edge->outputs_.begin(); o != edge->outputs_.end(); ++o) { if (!NodeFinished(*o, err)) return false; } return true; } bool Plan::NodeFinished(Node* node, string* err) { // If this node provides dyndep info, load it now. if (node->dyndep_pending()) { assert(builder_ && "dyndep requires Plan to have a Builder"); // Load the now-clean dyndep file. This will also update the // build plan and schedule any new work that is ready. return builder_->LoadDyndeps(node, err); } // See if we we want any edges from this node. for (vector::const_iterator oe = node->out_edges().begin(); oe != node->out_edges().end(); ++oe) { map::iterator want_e = want_.find(*oe); if (want_e == want_.end()) continue; // See if the edge is now ready. if (!EdgeMaybeReady(want_e, err)) return false; } return true; } bool Plan::EdgeMaybeReady(map::iterator want_e, string* err) { Edge* edge = want_e->first; if (edge->AllInputsReady()) { if (want_e->second != kWantNothing) { ScheduleWork(want_e); } else { // We do not need to build this edge, but we might need to build one of // its dependents. if (!EdgeFinished(edge, kEdgeSucceeded, err)) return false; } } return true; } bool Plan::CleanNode(DependencyScan* scan, Node* node, string* err) { node->set_dirty(false); for (vector::const_iterator oe = node->out_edges().begin(); oe != node->out_edges().end(); ++oe) { // Don't process edges that we don't actually want. map::iterator want_e = want_.find(*oe); if (want_e == want_.end() || want_e->second == kWantNothing) continue; // Don't attempt to clean an edge if it failed to load deps. if ((*oe)->deps_missing_) continue; // If all non-order-only inputs for this edge are now clean, // we might have changed the dirty state of the outputs. vector::iterator begin = (*oe)->inputs_.begin(), end = (*oe)->inputs_.end() - (*oe)->order_only_deps_; #if __cplusplus < 201703L #define MEM_FN mem_fun #else #define MEM_FN mem_fn // mem_fun was removed in C++17. #endif if (find_if(begin, end, MEM_FN(&Node::dirty)) == end) { // Recompute most_recent_input. Node* most_recent_input = NULL; for (vector::iterator i = begin; i != end; ++i) { if (!most_recent_input || (*i)->mtime() > most_recent_input->mtime()) most_recent_input = *i; } // Now, this edge is dirty if any of the outputs are dirty. // If the edge isn't dirty, clean the outputs and mark the edge as not // wanted. bool outputs_dirty = false; if (!scan->RecomputeOutputsDirty(*oe, most_recent_input, &outputs_dirty, err)) { return false; } if (!outputs_dirty) { for (vector::iterator o = (*oe)->outputs_.begin(); o != (*oe)->outputs_.end(); ++o) { if (!CleanNode(scan, *o, err)) return false; } want_e->second = kWantNothing; --wanted_edges_; if (!(*oe)->is_phony()) { --command_edges_; if (builder_) builder_->status_->EdgeRemovedFromPlan(*oe); } } } } return true; } bool Plan::DyndepsLoaded(DependencyScan* scan, const Node* node, const DyndepFile& ddf, string* err) { // Recompute the dirty state of all our direct and indirect dependents now // that our dyndep information has been loaded. if (!RefreshDyndepDependents(scan, node, err)) return false; // We loaded dyndep information for those out_edges of the dyndep node that // specify the node in a dyndep binding, but they may not be in the plan. // Starting with those already in the plan, walk newly-reachable portion // of the graph through the dyndep-discovered dependencies. // Find edges in the the build plan for which we have new dyndep info. std::vector dyndep_roots; for (DyndepFile::const_iterator oe = ddf.begin(); oe != ddf.end(); ++oe) { Edge* edge = oe->first; // If the edge outputs are ready we do not need to consider it here. if (edge->outputs_ready()) continue; map::iterator want_e = want_.find(edge); // If the edge has not been encountered before then nothing already in the // plan depends on it so we do not need to consider the edge yet either. if (want_e == want_.end()) continue; // This edge is already in the plan so queue it for the walk. dyndep_roots.push_back(oe); } // Walk dyndep-discovered portion of the graph to add it to the build plan. std::set dyndep_walk; for (std::vector::iterator oei = dyndep_roots.begin(); oei != dyndep_roots.end(); ++oei) { DyndepFile::const_iterator oe = *oei; for (vector::const_iterator i = oe->second.implicit_inputs_.begin(); i != oe->second.implicit_inputs_.end(); ++i) { if (!AddSubTarget(*i, oe->first->outputs_[0], err, &dyndep_walk) && !err->empty()) return false; } } // Add out edges from this node that are in the plan (just as // Plan::NodeFinished would have without taking the dyndep code path). for (vector::const_iterator oe = node->out_edges().begin(); oe != node->out_edges().end(); ++oe) { map::iterator want_e = want_.find(*oe); if (want_e == want_.end()) continue; dyndep_walk.insert(want_e->first); } // See if any encountered edges are now ready. for (set::iterator wi = dyndep_walk.begin(); wi != dyndep_walk.end(); ++wi) { map::iterator want_e = want_.find(*wi); if (want_e == want_.end()) continue; if (!EdgeMaybeReady(want_e, err)) return false; } return true; } bool Plan::RefreshDyndepDependents(DependencyScan* scan, const Node* node, string* err) { // Collect the transitive closure of dependents and mark their edges // as not yet visited by RecomputeDirty. set dependents; UnmarkDependents(node, &dependents); // Update the dirty state of all dependents and check if their edges // have become wanted. for (set::iterator i = dependents.begin(); i != dependents.end(); ++i) { Node* n = *i; // Check if this dependent node is now dirty. Also checks for new cycles. std::vector validation_nodes; if (!scan->RecomputeDirty(n, &validation_nodes, err)) return false; // Add any validation nodes found during RecomputeDirty as new top level // targets. for (std::vector::iterator v = validation_nodes.begin(); v != validation_nodes.end(); ++v) { if (Edge* in_edge = (*v)->in_edge()) { if (!in_edge->outputs_ready() && !AddTarget(*v, err)) { return false; } } } if (!n->dirty()) continue; // This edge was encountered before. However, we may not have wanted to // build it if the outputs were not known to be dirty. With dyndep // information an output is now known to be dirty, so we want the edge. Edge* edge = n->in_edge(); assert(edge && !edge->outputs_ready()); map::iterator want_e = want_.find(edge); assert(want_e != want_.end()); if (want_e->second == kWantNothing) { want_e->second = kWantToStart; EdgeWanted(edge); } } return true; } void Plan::UnmarkDependents(const Node* node, set* dependents) { for (vector::const_iterator oe = node->out_edges().begin(); oe != node->out_edges().end(); ++oe) { Edge* edge = *oe; map::iterator want_e = want_.find(edge); if (want_e == want_.end()) continue; if (edge->mark_ != Edge::VisitNone) { edge->mark_ = Edge::VisitNone; for (vector::iterator o = edge->outputs_.begin(); o != edge->outputs_.end(); ++o) { if (dependents->insert(*o).second) UnmarkDependents(*o, dependents); } } } } namespace { // Heuristic for edge priority weighting. // Phony edges are free (0 cost), all other edges are weighted equally. int64_t EdgeWeightHeuristic(Edge *edge) { return edge->is_phony() ? 0 : 1; } } // namespace void Plan::ComputeCriticalPath() { METRIC_RECORD("ComputeCriticalPath"); // Convenience class to perform a topological sort of all edges // reachable from a set of unique targets. Usage is: // // 1) Create instance. // // 2) Call VisitTarget() as many times as necessary. // Note that duplicate targets are properly ignored. // // 3) Call result() to get a sorted list of edges, // where each edge appears _after_ its parents, // i.e. the edges producing its inputs, in the list. // struct TopoSort { void VisitTarget(const Node* target) { Edge* producer = target->in_edge(); if (producer) Visit(producer); } const std::vector& result() const { return sorted_edges_; } private: // Implementation note: // // This is the regular depth-first-search algorithm described // at https://en.wikipedia.org/wiki/Topological_sorting, except // that: // // - Edges are appended to the end of the list, for performance // reasons. Hence the order used in result(). // // - Since the graph cannot have any cycles, temporary marks // are not necessary, and a simple set is used to record // which edges have already been visited. // void Visit(Edge* edge) { auto insertion = visited_set_.emplace(edge); if (!insertion.second) return; for (const Node* input : edge->inputs_) { Edge* producer = input->in_edge(); if (producer) Visit(producer); } sorted_edges_.push_back(edge); } std::unordered_set visited_set_; std::vector sorted_edges_; }; TopoSort topo_sort; for (const Node* target : targets_) { topo_sort.VisitTarget(target); } const auto& sorted_edges = topo_sort.result(); // First, reset all weights to 1. for (Edge* edge : sorted_edges) edge->set_critical_path_weight(EdgeWeightHeuristic(edge)); // Second propagate / increment weights from // children to parents. Scan the list // in reverse order to do so. for (auto reverse_it = sorted_edges.rbegin(); reverse_it != sorted_edges.rend(); ++reverse_it) { Edge* edge = *reverse_it; int64_t edge_weight = edge->critical_path_weight(); for (const Node* input : edge->inputs_) { Edge* producer = input->in_edge(); if (!producer) continue; int64_t producer_weight = producer->critical_path_weight(); int64_t candidate_weight = edge_weight + EdgeWeightHeuristic(producer); if (candidate_weight > producer_weight) producer->set_critical_path_weight(candidate_weight); } } } void Plan::ScheduleInitialEdges() { // Add ready edges to queue. assert(ready_.empty()); std::set pools; for (std::map::iterator it = want_.begin(), end = want_.end(); it != end; ++it) { Edge* edge = it->first; Plan::Want want = it->second; if (want == kWantToStart && edge->AllInputsReady()) { Pool* pool = edge->pool(); if (pool->ShouldDelayEdge()) { pool->DelayEdge(edge); pools.insert(pool); } else { ScheduleWork(it); } } } // Call RetrieveReadyEdges only once at the end so higher priority // edges are retrieved first, not the ones that happen to be first // in the want_ map. for (std::set::iterator it=pools.begin(), end = pools.end(); it != end; ++it) { (*it)->RetrieveReadyEdges(&ready_); } } void Plan::PrepareQueue() { ComputeCriticalPath(); ScheduleInitialEdges(); } void Plan::Dump() const { printf("pending: %d\n", (int)want_.size()); for (map::const_iterator e = want_.begin(); e != want_.end(); ++e) { if (e->second != kWantNothing) printf("want "); e->first->Dump(); } printf("ready: %d\n", (int)ready_.size()); } Builder::Builder(State* state, const BuildConfig& config, BuildLog* build_log, DepsLog* deps_log, DiskInterface* disk_interface, Status* status, int64_t start_time_millis) : state_(state), config_(config), plan_(this), status_(status), start_time_millis_(start_time_millis), disk_interface_(disk_interface), explanations_(g_explaining ? new Explanations() : nullptr), scan_(state, build_log, deps_log, disk_interface, &config_.depfile_parser_options, explanations_.get()) { lock_file_path_ = ".ninja_lock"; string build_dir = state_->bindings_.LookupVariable("builddir"); if (!build_dir.empty()) lock_file_path_ = build_dir + "/" + lock_file_path_; status_->SetExplanations(explanations_.get()); } Builder::~Builder() { Cleanup(); status_->SetExplanations(nullptr); } void Builder::Cleanup() { if (command_runner_.get()) { vector active_edges = command_runner_->GetActiveEdges(); command_runner_->Abort(); for (vector::iterator e = active_edges.begin(); e != active_edges.end(); ++e) { string depfile = (*e)->GetUnescapedDepfile(); for (vector::iterator o = (*e)->outputs_.begin(); o != (*e)->outputs_.end(); ++o) { // Only delete this output if it was actually modified. This is // important for things like the generator where we don't want to // delete the manifest file if we can avoid it. But if the rule // uses a depfile, always delete. (Consider the case where we // need to rebuild an output because of a modified header file // mentioned in a depfile, and the command touches its depfile // but is interrupted before it touches its output file.) string err; TimeStamp new_mtime = disk_interface_->Stat((*o)->path(), &err); if (new_mtime == -1) // Log and ignore Stat() errors. status_->Error("%s", err.c_str()); if (!depfile.empty() || (*o)->mtime() != new_mtime) disk_interface_->RemoveFile((*o)->path()); } if (!depfile.empty()) disk_interface_->RemoveFile(depfile); } } string err; if (disk_interface_->Stat(lock_file_path_, &err) > 0) disk_interface_->RemoveFile(lock_file_path_); } Node* Builder::AddTarget(const string& name, string* err) { Node* node = state_->LookupNode(name); if (!node) { *err = "unknown target: '" + name + "'"; return NULL; } if (!AddTarget(node, err)) return NULL; return node; } bool Builder::AddTarget(Node* target, string* err) { std::vector validation_nodes; if (!scan_.RecomputeDirty(target, &validation_nodes, err)) return false; Edge* in_edge = target->in_edge(); if (!in_edge || !in_edge->outputs_ready()) { if (!plan_.AddTarget(target, err)) { return false; } } // Also add any validation nodes found during RecomputeDirty as top level // targets. for (std::vector::iterator n = validation_nodes.begin(); n != validation_nodes.end(); ++n) { if (Edge* validation_in_edge = (*n)->in_edge()) { if (!validation_in_edge->outputs_ready() && !plan_.AddTarget(*n, err)) { return false; } } } return true; } bool Builder::AlreadyUpToDate() const { return !plan_.more_to_do(); } ExitStatus Builder::Build(string* err) { assert(!AlreadyUpToDate()); plan_.PrepareQueue(); int pending_commands = 0; int failures_allowed = config_.failures_allowed; // Set up the command runner if we haven't done so already. if (!command_runner_.get()) { if (config_.dry_run) command_runner_.reset(new DryRunCommandRunner); else command_runner_.reset(CommandRunner::factory(config_, jobserver_.get())); ; } // We are about to start the build process. status_->BuildStarted(); // This main loop runs the entire build process. // It is structured like this: // First, we attempt to start as many commands as allowed by the // command runner. // Second, we attempt to wait for / reap the next finished command. while (plan_.more_to_do()) { // See if we can start any more commands. if (failures_allowed) { size_t capacity = command_runner_->CanRunMore(); while (capacity > 0) { Edge* edge = plan_.FindWork(); if (!edge) break; if (edge->GetBindingBool("generator")) { scan_.build_log()->Close(); } if (!StartEdge(edge, err)) { Cleanup(); status_->BuildFinished(); return ExitFailure; } if (edge->is_phony()) { if (!plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, err)) { Cleanup(); status_->BuildFinished(); return ExitFailure; } } else { ++pending_commands; --capacity; // Re-evaluate capacity. size_t current_capacity = command_runner_->CanRunMore(); if (current_capacity < capacity) capacity = current_capacity; } } // We are finished with all work items and have no pending // commands. Therefore, break out of the main loop. if (pending_commands == 0 && !plan_.more_to_do()) break; } // See if we can reap any finished commands. if (pending_commands) { CommandRunner::Result result; if (!command_runner_->WaitForCommand(&result) || result.status == ExitInterrupted) { Cleanup(); status_->BuildFinished(); *err = "interrupted by user"; return result.status; } --pending_commands; bool command_finished = FinishCommand(&result, err); SetFailureCode(result.status); if (!command_finished) { Cleanup(); status_->BuildFinished(); if (result.success()) { // If the command pretend succeeded, the status wasn't set to a proper exit code, // so we set it to ExitFailure. result.status = ExitFailure; SetFailureCode(result.status); } return result.status; } if (!result.success()) { if (failures_allowed) failures_allowed--; } // We made some progress; start the main loop over. continue; } // If we get here, we cannot make any more progress. status_->BuildFinished(); if (failures_allowed == 0) { if (config_.failures_allowed > 1) *err = "subcommands failed"; else *err = "subcommand failed"; } else if (failures_allowed < config_.failures_allowed) *err = "cannot make progress due to previous errors"; else *err = "stuck [this is a bug]"; return GetExitCode(); } status_->BuildFinished(); return ExitSuccess; } bool Builder::StartEdge(Edge* edge, string* err) { METRIC_RECORD("StartEdge"); if (edge->is_phony()) return true; int64_t start_time_millis = GetTimeMillis() - start_time_millis_; running_edges_.insert(make_pair(edge, start_time_millis)); status_->BuildEdgeStarted(edge, start_time_millis); TimeStamp build_start = config_.dry_run ? 0 : -1; // Create directories necessary for outputs and remember the current // filesystem mtime to record later // XXX: this will block; do we care? for (vector::iterator o = edge->outputs_.begin(); o != edge->outputs_.end(); ++o) { if (!disk_interface_->MakeDirs((*o)->path())) return false; if (build_start == -1) { disk_interface_->WriteFile(lock_file_path_, "", false); build_start = disk_interface_->Stat(lock_file_path_, err); if (build_start == -1) build_start = 0; } } edge->command_start_time_ = build_start; // Create depfile directory if needed. // XXX: this may also block; do we care? std::string depfile = edge->GetUnescapedDepfile(); if (!depfile.empty() && !disk_interface_->MakeDirs(depfile)) return false; // Create response file, if needed // XXX: this may also block; do we care? string rspfile = edge->GetUnescapedRspfile(); if (!rspfile.empty()) { string content = edge->GetBinding("rspfile_content"); if (!disk_interface_->WriteFile(rspfile, content, true)) return false; } // start command computing and run it if (!command_runner_->StartCommand(edge)) { err->assign("command '" + edge->EvaluateCommand() + "' failed."); return false; } return true; } bool Builder::FinishCommand(CommandRunner::Result* result, string* err) { METRIC_RECORD("FinishCommand"); Edge* edge = result->edge; // First try to extract dependencies from the result, if any. // This must happen first as it filters the command output (we want // to filter /showIncludes output, even on compile failure) and // extraction itself can fail, which makes the command fail from a // build perspective. vector deps_nodes; string deps_type = edge->GetBinding("deps"); const string deps_prefix = edge->GetBinding("msvc_deps_prefix"); if (!deps_type.empty()) { string extract_err; if (!ExtractDeps(result, deps_type, deps_prefix, &deps_nodes, &extract_err) && result->success()) { if (!result->output.empty()) result->output.append("\n"); result->output.append(extract_err); result->status = ExitFailure; } } int64_t start_time_millis, end_time_millis; RunningEdgeMap::iterator it = running_edges_.find(edge); start_time_millis = it->second; end_time_millis = GetTimeMillis() - start_time_millis_; running_edges_.erase(it); status_->BuildEdgeFinished(edge, start_time_millis, end_time_millis, result->status, result->output); // The rest of this function only applies to successful commands. if (!result->success()) { return plan_.EdgeFinished(edge, Plan::kEdgeFailed, err); } // Restat the edge outputs TimeStamp record_mtime = 0; if (!config_.dry_run) { const bool restat = edge->GetBindingBool("restat"); const bool generator = edge->GetBindingBool("generator"); bool node_cleaned = false; record_mtime = edge->command_start_time_; // restat and generator rules must restat the outputs after the build // has finished. if record_mtime == 0, then there was an error while // attempting to touch/stat the temp file when the edge started and // we should fall back to recording the outputs' current mtime in the // log. if (record_mtime == 0 || restat || generator) { for (vector::iterator o = edge->outputs_.begin(); o != edge->outputs_.end(); ++o) { TimeStamp new_mtime = disk_interface_->Stat((*o)->path(), err); if (new_mtime == -1) return false; if (new_mtime > record_mtime) record_mtime = new_mtime; if ((*o)->mtime() == new_mtime && restat) { // The rule command did not change the output. Propagate the clean // state through the build graph. // Note that this also applies to nonexistent outputs (mtime == 0). if (!plan_.CleanNode(&scan_, *o, err)) return false; node_cleaned = true; } } } if (node_cleaned) { record_mtime = edge->command_start_time_; } } if (!plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, err)) return false; // Delete any left over response file. string rspfile = edge->GetUnescapedRspfile(); if (!rspfile.empty() && !g_keep_rsp) disk_interface_->RemoveFile(rspfile); if (scan_.build_log()) { if (!scan_.build_log()->RecordCommand( edge, static_cast(start_time_millis), static_cast(end_time_millis), record_mtime)) { *err = string("Error writing to build log: ") + strerror(errno); return false; } } if (!deps_type.empty() && !config_.dry_run) { assert(!edge->outputs_.empty() && "should have been rejected by parser"); for (std::vector::const_iterator o = edge->outputs_.begin(); o != edge->outputs_.end(); ++o) { TimeStamp deps_mtime = disk_interface_->Stat((*o)->path(), err); if (deps_mtime == -1) return false; if (!scan_.deps_log()->RecordDeps(*o, deps_mtime, deps_nodes)) { *err = std::string("Error writing to deps log: ") + strerror(errno); return false; } } } return true; } bool Builder::ExtractDeps(CommandRunner::Result* result, const string& deps_type, const string& deps_prefix, vector* deps_nodes, string* err) { if (deps_type == "msvc") { CLParser parser; string output; if (!parser.Parse(result->output, deps_prefix, &output, err)) return false; result->output = output; for (set::iterator i = parser.includes_.begin(); i != parser.includes_.end(); ++i) { // ~0 is assuming that with MSVC-parsed headers, it's ok to always make // all backslashes (as some of the slashes will certainly be backslashes // anyway). This could be fixed if necessary with some additional // complexity in IncludesNormalize::Relativize. deps_nodes->push_back(state_->GetNode(*i, ~0u)); } } else if (deps_type == "gcc") { string depfile = result->edge->GetUnescapedDepfile(); if (depfile.empty()) { *err = string("edge with deps=gcc but no depfile makes no sense"); return false; } // Read depfile content. Treat a missing depfile as empty. string content; switch (disk_interface_->ReadFile(depfile, &content, err)) { case DiskInterface::Okay: break; case DiskInterface::NotFound: err->clear(); break; case DiskInterface::OtherError: return false; } if (content.empty()) return true; DepfileParser deps(config_.depfile_parser_options); if (!deps.Parse(&content, err)) return false; // XXX check depfile matches expected output. deps_nodes->reserve(deps.ins_.size()); for (vector::iterator i = deps.ins_.begin(); i != deps.ins_.end(); ++i) { uint64_t slash_bits; CanonicalizePath(const_cast(i->str_), &i->len_, &slash_bits); deps_nodes->push_back(state_->GetNode(*i, slash_bits)); } if (!g_keep_depfile) { if (disk_interface_->RemoveFile(depfile) < 0) { *err = string("deleting depfile: ") + strerror(errno) + string("\n"); return false; } } } else { Fatal("unknown deps type '%s'", deps_type.c_str()); } return true; } bool Builder::LoadDyndeps(Node* node, string* err) { // Load the dyndep information provided by this node. DyndepFile ddf; if (!scan_.LoadDyndeps(node, &ddf, err)) return false; // Update the build plan to account for dyndep modifications to the graph. if (!plan_.DyndepsLoaded(&scan_, node, ddf, err)) return false; return true; } void Builder::SetFailureCode(ExitStatus code) { // ExitSuccess should not overwrite any error if (code != ExitSuccess) { exit_code_ = code; } } ninja-1.13.2/src/build.h000066400000000000000000000224641510764045400147520ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef NINJA_BUILD_H_ #define NINJA_BUILD_H_ #include #include #include #include #include #include "depfile_parser.h" #include "exit_status.h" #include "graph.h" #include "jobserver.h" #include "util.h" // int64_t struct BuildLog; struct Builder; struct DiskInterface; struct Edge; struct Explanations; struct Node; struct State; struct Status; /// Plan stores the state of a build plan: what we intend to build, /// which steps we're ready to execute. struct Plan { Plan(Builder* builder = NULL); /// Add a target to our plan (including all its dependencies). /// Returns false if we don't need to build this target; may /// fill in |err| with an error message if there's a problem. bool AddTarget(const Node* target, std::string* err); // Pop a ready edge off the queue of edges to build. // Returns NULL if there's no work to do. Edge* FindWork(); /// Returns true if there's more work to be done. bool more_to_do() const { return wanted_edges_ > 0 && command_edges_ > 0; } /// Dumps the current state of the plan. void Dump() const; enum EdgeResult { kEdgeFailed, kEdgeSucceeded }; /// Mark an edge as done building (whether it succeeded or failed). /// If any of the edge's outputs are dyndep bindings of their dependents, /// this loads dynamic dependencies from the nodes' paths. /// Returns 'false' if loading dyndep info fails and 'true' otherwise. bool EdgeFinished(Edge* edge, EdgeResult result, std::string* err); /// Clean the given node during the build. /// Return false on error. bool CleanNode(DependencyScan* scan, Node* node, std::string* err); /// Number of edges with commands to run. int command_edge_count() const { return command_edges_; } /// Reset state. Clears want and ready sets. void Reset(); // After all targets have been added, prepares the ready queue for find work. void PrepareQueue(); /// Update the build plan to account for modifications made to the graph /// by information loaded from a dyndep file. bool DyndepsLoaded(DependencyScan* scan, const Node* node, const DyndepFile& ddf, std::string* err); /// Enumerate possible steps we want for an edge. enum Want { /// We do not want to build the edge, but we might want to build one of /// its dependents. kWantNothing, /// We want to build the edge, but have not yet scheduled it. kWantToStart, /// We want to build the edge, have scheduled it, and are waiting /// for it to complete. kWantToFinish }; private: void ComputeCriticalPath(); bool RefreshDyndepDependents(DependencyScan* scan, const Node* node, std::string* err); void UnmarkDependents(const Node* node, std::set* dependents); bool AddSubTarget(const Node* node, const Node* dependent, std::string* err, std::set* dyndep_walk); // Add edges that kWantToStart into the ready queue // Must be called after ComputeCriticalPath and before FindWork void ScheduleInitialEdges(); /// Update plan with knowledge that the given node is up to date. /// If the node is a dyndep binding on any of its dependents, this /// loads dynamic dependencies from the node's path. /// Returns 'false' if loading dyndep info fails and 'true' otherwise. bool NodeFinished(Node* node, std::string* err); void EdgeWanted(const Edge* edge); bool EdgeMaybeReady(std::map::iterator want_e, std::string* err); /// Submits a ready edge as a candidate for execution. /// The edge may be delayed from running, for example if it's a member of a /// currently-full pool. void ScheduleWork(std::map::iterator want_e); /// Keep track of which edges we want to build in this plan. If this map does /// not contain an entry for an edge, we do not want to build the entry or its /// dependents. If it does contain an entry, the enumeration indicates what /// we want for the edge. std::map want_; EdgePriorityQueue ready_; Builder* builder_; /// user provided targets in build order, earlier one have higher priority std::vector targets_; /// Total number of edges that have commands (not phony). int command_edges_; /// Total remaining number of wanted edges. int wanted_edges_; }; struct BuildConfig; /// CommandRunner is an interface that wraps running the build /// subcommands. This allows tests to abstract out running commands. /// RealCommandRunner is an implementation that actually runs commands. struct CommandRunner { virtual ~CommandRunner() {} virtual size_t CanRunMore() const = 0; virtual bool StartCommand(Edge* edge) = 0; /// The result of waiting for a command. struct Result { Edge* edge = nullptr; ExitStatus status = ExitFailure; std::string output; bool success() const { return status == ExitSuccess; } }; /// Wait for a command to complete, or return false if interrupted. virtual bool WaitForCommand(Result* result) = 0; virtual std::vector GetActiveEdges() { return std::vector(); } virtual void Abort() {} /// Creates the RealCommandRunner. \arg jobserver can be nullptr if there /// is no jobserver pool to use. static CommandRunner* factory(const BuildConfig& config, Jobserver::Client* jobserver); }; /// Options (e.g. verbosity, parallelism) passed to a build. struct BuildConfig { BuildConfig() = default; enum Verbosity { QUIET, // No output -- used when testing. NO_STATUS_UPDATE, // just regular output but suppress status update NORMAL, // regular output and status update VERBOSE }; Verbosity verbosity = NORMAL; bool dry_run = false; int parallelism = 1; bool disable_jobserver_client = false; int failures_allowed = 1; /// The maximum load average we must not exceed. A negative value /// means that we do not have any limit. double max_load_average = -0.0f; DepfileParserOptions depfile_parser_options; }; /// Builder wraps the build process: starting commands, updating status. struct Builder { Builder(State* state, const BuildConfig& config, BuildLog* build_log, DepsLog* deps_log, DiskInterface* disk_interface, Status* status, int64_t start_time_millis); ~Builder(); /// Set Jobserver client instance for this builder. void SetJobserverClient(std::unique_ptr jobserver_client) { jobserver_ = std::move(jobserver_client); } /// Clean up after interrupted commands by deleting output files. void Cleanup(); Node* AddTarget(const std::string& name, std::string* err); /// Add a target to the build, scanning dependencies. /// @return false on error. bool AddTarget(Node* target, std::string* err); /// Returns true if the build targets are already up to date. bool AlreadyUpToDate() const; /// Run the build. Returns ExitStatus or the exit code of the last failed job. /// It is an error to call this function when AlreadyUpToDate() is true. ExitStatus Build(std::string* err); bool StartEdge(Edge* edge, std::string* err); /// Update status ninja logs following a command termination. /// @return false if the build can not proceed further due to a fatal error. bool FinishCommand(CommandRunner::Result* result, std::string* err); /// Used for tests. void SetBuildLog(BuildLog* log) { scan_.set_build_log(log); } /// Load the dyndep information provided by the given node. bool LoadDyndeps(Node* node, std::string* err); State* state_; const BuildConfig& config_; Plan plan_; std::unique_ptr jobserver_; std::unique_ptr command_runner_; Status* status_; /// Returns ExitStatus or the exit code of the last failed job /// (doesn't need to be an enum value of ExitStatus) ExitStatus GetExitCode() const { return exit_code_; } private: bool ExtractDeps(CommandRunner::Result* result, const std::string& deps_type, const std::string& deps_prefix, std::vector* deps_nodes, std::string* err); /// Map of running edge to time the edge started running. typedef std::map RunningEdgeMap; RunningEdgeMap running_edges_; /// Time the build started. int64_t start_time_millis_; std::string lock_file_path_; DiskInterface* disk_interface_; // Only create an Explanations class if '-d explain' is used. std::unique_ptr explanations_; DependencyScan scan_; /// Keep the global exit code for the build ExitStatus exit_code_ = ExitSuccess; void SetFailureCode(ExitStatus code); // Unimplemented copy ctor and operator= ensure we don't copy the auto_ptr. Builder(const Builder &other); // DO NOT IMPLEMENT void operator=(const Builder &other); // DO NOT IMPLEMENT }; #endif // NINJA_BUILD_H_ ninja-1.13.2/src/build_log.cc000066400000000000000000000300431510764045400157410ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // On AIX, inttypes.h gets indirectly included by build_log.h. // It's easiest just to ask for the printf format macros right away. #ifndef _WIN32 #ifndef __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS #endif #endif #include "build_log.h" #include "disk_interface.h" #include #include #include #include #ifndef _WIN32 #include #include #endif #include "build.h" #include "graph.h" #include "metrics.h" #include "util.h" #if defined(_MSC_VER) && (_MSC_VER < 1800) #define strtoll _strtoi64 #endif // Implementation details: // Each run's log appends to the log file. // To load, we run through all log entries in series, throwing away // older runs. // Once the number of redundant entries exceeds a threshold, we write // out a new file and replace the existing one with it. namespace { const char kFileSignature[] = "# ninja log v%d\n"; const int kOldestSupportedVersion = 7; const int kCurrentVersion = 7; } // namespace // static uint64_t BuildLog::LogEntry::HashCommand(StringPiece command) { return rapidhash(command.str_, command.len_); } BuildLog::LogEntry::LogEntry(std::string output) : output(std::move(output)) {} BuildLog::LogEntry::LogEntry(const std::string& output, uint64_t command_hash, int start_time, int end_time, TimeStamp mtime) : output(output), command_hash(command_hash), start_time(start_time), end_time(end_time), mtime(mtime) {} BuildLog::BuildLog() = default; BuildLog::~BuildLog() { Close(); } bool BuildLog::OpenForWrite(const std::string& path, const BuildLogUser& user, std::string* err) { if (needs_recompaction_) { if (!Recompact(path, user, err)) return false; } assert(!log_file_); log_file_path_ = path; // we don't actually open the file right now, but will // do so on the first write attempt return true; } bool BuildLog::RecordCommand(Edge* edge, int start_time, int end_time, TimeStamp mtime) { std::string command = edge->EvaluateCommand(true); uint64_t command_hash = LogEntry::HashCommand(command); for (std::vector::iterator out = edge->outputs_.begin(); out != edge->outputs_.end(); ++out) { const std::string& path = (*out)->path(); Entries::iterator i = entries_.find(path); LogEntry* log_entry; if (i != entries_.end()) { log_entry = i->second.get(); } else { log_entry = new LogEntry(path); // Passes ownership of |log_entry| to the map, but keeps the pointer valid. entries_.emplace(log_entry->output, std::unique_ptr(log_entry)); } log_entry->command_hash = command_hash; log_entry->start_time = start_time; log_entry->end_time = end_time; log_entry->mtime = mtime; if (!OpenForWriteIfNeeded()) { return false; } if (log_file_) { if (!WriteEntry(log_file_, *log_entry)) return false; if (fflush(log_file_) != 0) { return false; } } } return true; } void BuildLog::Close() { OpenForWriteIfNeeded(); // create the file even if nothing has been recorded if (log_file_) fclose(log_file_); log_file_ = NULL; } bool BuildLog::OpenForWriteIfNeeded() { if (log_file_ || log_file_path_.empty()) { return true; } log_file_ = fopen(log_file_path_.c_str(), "ab"); if (!log_file_) { return false; } if (setvbuf(log_file_, NULL, _IOLBF, BUFSIZ) != 0) { return false; } SetCloseOnExec(fileno(log_file_)); // Opening a file in append mode doesn't set the file pointer to the file's // end on Windows. Do that explicitly. fseek(log_file_, 0, SEEK_END); if (ftell(log_file_) == 0) { if (fprintf(log_file_, kFileSignature, kCurrentVersion) < 0) { return false; } } return true; } struct LineReader { explicit LineReader(FILE* file) : file_(file), buf_end_(buf_), line_start_(buf_), line_end_(NULL) { memset(buf_, 0, sizeof(buf_)); } // Reads a \n-terminated line from the file passed to the constructor. // On return, *line_start points to the beginning of the next line, and // *line_end points to the \n at the end of the line. If no newline is seen // in a fixed buffer size, *line_end is set to NULL. Returns false on EOF. bool ReadLine(char** line_start, char** line_end) { if (line_start_ >= buf_end_ || !line_end_) { // Buffer empty, refill. size_t size_read = fread(buf_, 1, sizeof(buf_), file_); if (!size_read) return false; line_start_ = buf_; buf_end_ = buf_ + size_read; } else { // Advance to next line in buffer. line_start_ = line_end_ + 1; } line_end_ = static_cast(memchr(line_start_, '\n', buf_end_ - line_start_)); if (!line_end_) { // No newline. Move rest of data to start of buffer, fill rest. size_t already_consumed = line_start_ - buf_; size_t size_rest = (buf_end_ - buf_) - already_consumed; memmove(buf_, line_start_, size_rest); size_t read = fread(buf_ + size_rest, 1, sizeof(buf_) - size_rest, file_); buf_end_ = buf_ + size_rest + read; line_start_ = buf_; line_end_ = static_cast(memchr(line_start_, '\n', buf_end_ - line_start_)); } *line_start = line_start_; *line_end = line_end_; return true; } private: FILE* file_; char buf_[256 << 10]; char* buf_end_; // Points one past the last valid byte in |buf_|. char* line_start_; // Points at the next \n in buf_ after line_start, or NULL. char* line_end_; }; LoadStatus BuildLog::Load(const std::string& path, std::string* err) { METRIC_RECORD(".ninja_log load"); FILE* file = fopen(path.c_str(), "r"); if (!file) { if (errno == ENOENT) return LOAD_NOT_FOUND; *err = strerror(errno); return LOAD_ERROR; } int log_version = 0; int unique_entry_count = 0; int total_entry_count = 0; LineReader reader(file); char* line_start = 0; char* line_end = 0; while (reader.ReadLine(&line_start, &line_end)) { if (!log_version) { sscanf(line_start, kFileSignature, &log_version); bool invalid_log_version = false; if (log_version < kOldestSupportedVersion) { invalid_log_version = true; *err = "build log version is too old; starting over"; } else if (log_version > kCurrentVersion) { invalid_log_version = true; *err = "build log version is too new; starting over"; } if (invalid_log_version) { fclose(file); platformAwareUnlink(path.c_str()); // Don't report this as a failure. A missing build log will cause // us to rebuild the outputs anyway. return LOAD_NOT_FOUND; } } // If no newline was found in this chunk, read the next. if (!line_end) continue; const char kFieldSeparator = '\t'; char* start = line_start; char* end = static_cast(memchr(start, kFieldSeparator, line_end - start)); if (!end) continue; *end = 0; int start_time = 0, end_time = 0; TimeStamp mtime = 0; start_time = atoi(start); start = end + 1; end = static_cast(memchr(start, kFieldSeparator, line_end - start)); if (!end) continue; *end = 0; end_time = atoi(start); start = end + 1; end = static_cast(memchr(start, kFieldSeparator, line_end - start)); if (!end) continue; *end = 0; mtime = strtoll(start, NULL, 10); start = end + 1; end = static_cast(memchr(start, kFieldSeparator, line_end - start)); if (!end) continue; std::string output(start, end - start); start = end + 1; end = line_end; LogEntry* entry; Entries::iterator i = entries_.find(output); if (i != entries_.end()) { entry = i->second.get(); } else { entry = new LogEntry(std::move(output)); // Passes ownership of |entry| to the map, but keeps the pointer valid. entries_.emplace(entry->output, std::unique_ptr(entry)); ++unique_entry_count; } ++total_entry_count; entry->start_time = start_time; entry->end_time = end_time; entry->mtime = mtime; char c = *end; *end = '\0'; entry->command_hash = (uint64_t)strtoull(start, NULL, 16); *end = c; } fclose(file); if (!line_start) { return LOAD_SUCCESS; // file was empty } // Decide whether it's time to rebuild the log: // - if we're upgrading versions // - if it's getting large int kMinCompactionEntryCount = 100; int kCompactionRatio = 3; if (log_version < kCurrentVersion) { needs_recompaction_ = true; } else if (total_entry_count > kMinCompactionEntryCount && total_entry_count > unique_entry_count * kCompactionRatio) { needs_recompaction_ = true; } return LOAD_SUCCESS; } BuildLog::LogEntry* BuildLog::LookupByOutput(const std::string& path) { Entries::iterator i = entries_.find(path); if (i != entries_.end()) return i->second.get(); return NULL; } bool BuildLog::WriteEntry(FILE* f, const LogEntry& entry) { return fprintf(f, "%d\t%d\t%" PRId64 "\t%s\t%" PRIx64 "\n", entry.start_time, entry.end_time, entry.mtime, entry.output.c_str(), entry.command_hash) > 0; } bool BuildLog::Recompact(const std::string& path, const BuildLogUser& user, std::string* err) { METRIC_RECORD(".ninja_log recompact"); Close(); std::string temp_path = path + ".recompact"; FILE* f = fopen(temp_path.c_str(), "wb"); if (!f) { *err = strerror(errno); return false; } if (fprintf(f, kFileSignature, kCurrentVersion) < 0) { *err = strerror(errno); fclose(f); return false; } std::vector dead_outputs; for (const auto& pair : entries_) { if (user.IsPathDead(pair.first)) { dead_outputs.push_back(pair.first); continue; } if (!WriteEntry(f, *pair.second)) { *err = strerror(errno); fclose(f); return false; } } for (StringPiece output : dead_outputs) entries_.erase(output); fclose(f); if (platformAwareUnlink(path.c_str()) < 0) { *err = strerror(errno); return false; } if (rename(temp_path.c_str(), path.c_str()) < 0) { *err = strerror(errno); return false; } return true; } bool BuildLog::Restat(const StringPiece path, const DiskInterface& disk_interface, const int output_count, char** outputs, std::string* const err) { METRIC_RECORD(".ninja_log restat"); Close(); std::string temp_path = path.AsString() + ".restat"; FILE* f = fopen(temp_path.c_str(), "wb"); if (!f) { *err = strerror(errno); return false; } if (fprintf(f, kFileSignature, kCurrentVersion) < 0) { *err = strerror(errno); fclose(f); return false; } for (auto& pair : entries_) { bool skip = output_count > 0; for (int j = 0; j < output_count; ++j) { if (pair.second->output == outputs[j]) { skip = false; break; } } if (!skip) { const TimeStamp mtime = disk_interface.Stat(pair.second->output, err); if (mtime == -1) { fclose(f); return false; } pair.second->mtime = mtime; } if (!WriteEntry(f, *pair.second)) { *err = strerror(errno); fclose(f); return false; } } fclose(f); if (platformAwareUnlink(path.str_) < 0) { *err = strerror(errno); return false; } if (rename(temp_path.c_str(), path.str_) < 0) { *err = strerror(errno); return false; } return true; } ninja-1.13.2/src/build_log.h000066400000000000000000000066541510764045400156160ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef NINJA_BUILD_LOG_H_ #define NINJA_BUILD_LOG_H_ #include #include #include #include "hash_map.h" #include "load_status.h" #include "timestamp.h" #include "util.h" // uint64_t struct DiskInterface; struct Edge; /// Can answer questions about the manifest for the BuildLog. struct BuildLogUser { /// Return if a given output is no longer part of the build manifest. /// This is only called during recompaction and doesn't have to be fast. virtual bool IsPathDead(StringPiece s) const = 0; }; /// Store a log of every command ran for every build. /// It has a few uses: /// /// 1) (hashes of) command lines for existing output files, so we know /// when we need to rebuild due to the command changing /// 2) timing information, perhaps for generating reports /// 3) restat information struct BuildLog { BuildLog(); ~BuildLog(); /// Prepares writing to the log file without actually opening it - that will /// happen when/if it's needed bool OpenForWrite(const std::string& path, const BuildLogUser& user, std::string* err); bool RecordCommand(Edge* edge, int start_time, int end_time, TimeStamp mtime = 0); void Close(); /// Load the on-disk log. LoadStatus Load(const std::string& path, std::string* err); struct LogEntry { std::string output; uint64_t command_hash = 0; int start_time = 0; int end_time = 0; TimeStamp mtime = 0; static uint64_t HashCommand(StringPiece command); // Used by tests. bool operator==(const LogEntry& o) const { return output == o.output && command_hash == o.command_hash && start_time == o.start_time && end_time == o.end_time && mtime == o.mtime; } explicit LogEntry(std::string output); LogEntry(const std::string& output, uint64_t command_hash, int start_time, int end_time, TimeStamp mtime); }; /// Lookup a previously-run command by its output path. LogEntry* LookupByOutput(const std::string& path); /// Serialize an entry into a log file. bool WriteEntry(FILE* f, const LogEntry& entry); /// Rewrite the known log entries, throwing away old data. bool Recompact(const std::string& path, const BuildLogUser& user, std::string* err); /// Restat all outputs in the log bool Restat(StringPiece path, const DiskInterface& disk_interface, int output_count, char** outputs, std::string* err); typedef ExternalStringHashMap>::Type Entries; const Entries& entries() const { return entries_; } private: /// Should be called before using log_file_. When false is returned, errno /// will be set. bool OpenForWriteIfNeeded(); Entries entries_; FILE* log_file_ = nullptr; std::string log_file_path_; bool needs_recompaction_ = false; }; #endif // NINJA_BUILD_LOG_H_ ninja-1.13.2/src/build_log_perftest.cc000066400000000000000000000100421510764045400176520ustar00rootroot00000000000000// Copyright 2012 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include #include #include "build_log.h" #include "graph.h" #include "manifest_parser.h" #include "state.h" #include "util.h" #include "metrics.h" #ifndef _WIN32 #include #endif using namespace std; const char kTestFilename[] = "BuildLogPerfTest-tempfile"; struct NoDeadPaths : public BuildLogUser { virtual bool IsPathDead(StringPiece) const { return false; } }; bool WriteTestData(string* err) { BuildLog log; NoDeadPaths no_dead_paths; if (!log.OpenForWrite(kTestFilename, no_dead_paths, err)) return false; /* A histogram of command lengths in chromium. For example, 407 builds, 1.4% of all builds, had commands longer than 32 bytes but shorter than 64. 32 407 1.4% 64 183 0.6% 128 1461 5.1% 256 791 2.8% 512 1314 4.6% 1024 6114 21.3% 2048 11759 41.0% 4096 2056 7.2% 8192 4567 15.9% 16384 13 0.0% 32768 4 0.0% 65536 5 0.0% The average command length is 4.1 kB and there were 28674 commands in total, which makes for a total log size of ~120 MB (also counting output filenames). Based on this, write 30000 many 4 kB long command lines. */ // ManifestParser is the only object allowed to create Rules. const size_t kRuleSize = 4000; string long_rule_command = "gcc "; for (int i = 0; long_rule_command.size() < kRuleSize; ++i) { char buf[80]; sprintf(buf, "-I../../and/arbitrary/but/fairly/long/path/suffixed/%d ", i); long_rule_command += buf; } long_rule_command += "$in -o $out\n"; State state; ManifestParser parser(&state, NULL); if (!parser.ParseTest("rule cxx\n command = " + long_rule_command, err)) return false; // Create build edges. Using ManifestParser is as fast as using the State api // for edge creation, so just use that. const int kNumCommands = 30000; string build_rules; for (int i = 0; i < kNumCommands; ++i) { char buf[80]; sprintf(buf, "build input%d.o: cxx input%d.cc\n", i, i); build_rules += buf; } if (!parser.ParseTest(build_rules, err)) return false; for (int i = 0; i < kNumCommands; ++i) { log.RecordCommand(state.edges_[i], /*start_time=*/100 * i, /*end_time=*/100 * i + 1, /*mtime=*/0); } return true; } int main() { vector times; string err; if (!WriteTestData(&err)) { fprintf(stderr, "Failed to write test data: %s\n", err.c_str()); return 1; } { // Read once to warm up disk cache. BuildLog log; if (log.Load(kTestFilename, &err) == LOAD_ERROR) { fprintf(stderr, "Failed to read test data: %s\n", err.c_str()); return 1; } } const int kNumRepetitions = 5; for (int i = 0; i < kNumRepetitions; ++i) { int64_t start = GetTimeMillis(); BuildLog log; if (log.Load(kTestFilename, &err) == LOAD_ERROR) { fprintf(stderr, "Failed to read test data: %s\n", err.c_str()); return 1; } int delta = (int)(GetTimeMillis() - start); printf("%dms\n", delta); times.push_back(delta); } int min = times[0]; int max = times[0]; float total = 0; for (size_t i = 0; i < times.size(); ++i) { total += times[i]; if (times[i] < min) min = times[i]; else if (times[i] > max) max = times[i]; } printf("min %dms max %dms avg %.1fms\n", min, max, total / times.size()); platformAwareUnlink(kTestFilename); return 0; } ninja-1.13.2/src/build_log_test.cc000066400000000000000000000250601510764045400170030ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "build_log.h" #include "util.h" #include "test.h" #include #ifdef _WIN32 #include #include #else #include #include #endif #include namespace { const char kTestFilename[] = "BuildLogTest-tempfile"; struct BuildLogTest : public StateTestWithBuiltinRules, public BuildLogUser { virtual void SetUp() { // In case a crashing test left a stale file behind. platformAwareUnlink(kTestFilename); } virtual void TearDown() { platformAwareUnlink(kTestFilename); } virtual bool IsPathDead(StringPiece s) const { return false; } }; TEST_F(BuildLogTest, WriteRead) { AssertParse(&state_, "build out: cat mid\n" "build mid: cat in\n"); BuildLog log1; std::string err; EXPECT_TRUE(log1.OpenForWrite(kTestFilename, *this, &err)); ASSERT_EQ("", err); log1.RecordCommand(state_.edges_[0], 15, 18); log1.RecordCommand(state_.edges_[1], 20, 25); log1.Close(); BuildLog log2; EXPECT_TRUE(log2.Load(kTestFilename, &err)); ASSERT_EQ("", err); ASSERT_EQ(2u, log1.entries().size()); ASSERT_EQ(2u, log2.entries().size()); BuildLog::LogEntry* e1 = log1.LookupByOutput("out"); ASSERT_TRUE(e1); BuildLog::LogEntry* e2 = log2.LookupByOutput("out"); ASSERT_TRUE(e2); ASSERT_TRUE(*e1 == *e2); ASSERT_EQ(15, e1->start_time); ASSERT_EQ("out", e1->output); } TEST_F(BuildLogTest, FirstWriteAddsSignature) { const char kExpectedVersion[] = "# ninja log vX\n"; const size_t kVersionPos = strlen(kExpectedVersion) - 2; // Points at 'X'. BuildLog log; std::string contents, err; EXPECT_TRUE(log.OpenForWrite(kTestFilename, *this, &err)); ASSERT_EQ("", err); log.Close(); ASSERT_EQ(0, ReadFile(kTestFilename, &contents, &err)); ASSERT_EQ("", err); if (contents.size() >= kVersionPos) contents[kVersionPos] = 'X'; EXPECT_EQ(kExpectedVersion, contents); // Opening the file anew shouldn't add a second version string. EXPECT_TRUE(log.OpenForWrite(kTestFilename, *this, &err)); ASSERT_EQ("", err); log.Close(); contents.clear(); ASSERT_EQ(0, ReadFile(kTestFilename, &contents, &err)); ASSERT_EQ("", err); if (contents.size() >= kVersionPos) contents[kVersionPos] = 'X'; EXPECT_EQ(kExpectedVersion, contents); } TEST_F(BuildLogTest, DoubleEntry) { FILE* f = fopen(kTestFilename, "wb"); fprintf(f, "# ninja log v7\n"); fprintf(f, "0\t1\t2\tout\t%" PRIx64 "\n", BuildLog::LogEntry::HashCommand("command abc")); fprintf(f, "0\t1\t2\tout\t%" PRIx64 "\n", BuildLog::LogEntry::HashCommand("command def")); fclose(f); std::string err; BuildLog log; EXPECT_TRUE(log.Load(kTestFilename, &err)); ASSERT_EQ("", err); BuildLog::LogEntry* e = log.LookupByOutput("out"); ASSERT_TRUE(e); ASSERT_NO_FATAL_FAILURE(AssertHash("command def", e->command_hash)); } TEST_F(BuildLogTest, Truncate) { AssertParse(&state_, "build out: cat mid\n" "build mid: cat in\n"); { BuildLog log1; std::string err; EXPECT_TRUE(log1.OpenForWrite(kTestFilename, *this, &err)); ASSERT_EQ("", err); log1.RecordCommand(state_.edges_[0], 15, 18); log1.RecordCommand(state_.edges_[1], 20, 25); log1.Close(); } #ifdef __USE_LARGEFILE64 struct stat64 statbuf; ASSERT_EQ(0, stat64(kTestFilename, &statbuf)); #else struct stat statbuf; ASSERT_EQ(0, stat(kTestFilename, &statbuf)); #endif ASSERT_GT(statbuf.st_size, 0); // For all possible truncations of the input file, assert that we don't // crash when parsing. for (off_t size = statbuf.st_size; size > 0; --size) { BuildLog log2; std::string err; EXPECT_TRUE(log2.OpenForWrite(kTestFilename, *this, &err)); ASSERT_EQ("", err); log2.RecordCommand(state_.edges_[0], 15, 18); log2.RecordCommand(state_.edges_[1], 20, 25); log2.Close(); ASSERT_TRUE(Truncate(kTestFilename, size, &err)); BuildLog log3; err.clear(); ASSERT_TRUE(log3.Load(kTestFilename, &err) == LOAD_SUCCESS || !err.empty()); } } TEST_F(BuildLogTest, ObsoleteOldVersion) { FILE* f = fopen(kTestFilename, "wb"); fprintf(f, "# ninja log v3\n"); fprintf(f, "123 456 0 out command\n"); fclose(f); std::string err; BuildLog log; EXPECT_TRUE(log.Load(kTestFilename, &err)); ASSERT_NE(err.find("version"), std::string::npos); } TEST_F(BuildLogTest, SpacesInOutput) { FILE* f = fopen(kTestFilename, "wb"); fprintf(f, "# ninja log v7\n"); fprintf(f, "123\t456\t456\tout with space\t%" PRIx64 "\n", BuildLog::LogEntry::HashCommand("command")); fclose(f); std::string err; BuildLog log; EXPECT_TRUE(log.Load(kTestFilename, &err)); ASSERT_EQ("", err); BuildLog::LogEntry* e = log.LookupByOutput("out with space"); ASSERT_TRUE(e); ASSERT_EQ(123, e->start_time); ASSERT_EQ(456, e->end_time); ASSERT_EQ(456, e->mtime); ASSERT_NO_FATAL_FAILURE(AssertHash("command", e->command_hash)); } TEST_F(BuildLogTest, DuplicateVersionHeader) { // Old versions of ninja accidentally wrote multiple version headers to the // build log on Windows. This shouldn't crash, and the second version header // should be ignored. FILE* f = fopen(kTestFilename, "wb"); fprintf(f, "# ninja log v7\n"); fprintf(f, "123\t456\t456\tout\t%" PRIx64 "\n", BuildLog::LogEntry::HashCommand("command")); fprintf(f, "# ninja log v7\n"); fprintf(f, "456\t789\t789\tout2\t%" PRIx64 "\n", BuildLog::LogEntry::HashCommand("command2")); fclose(f); std::string err; BuildLog log; EXPECT_TRUE(log.Load(kTestFilename, &err)); ASSERT_EQ("", err); BuildLog::LogEntry* e = log.LookupByOutput("out"); ASSERT_TRUE(e); ASSERT_EQ(123, e->start_time); ASSERT_EQ(456, e->end_time); ASSERT_EQ(456, e->mtime); ASSERT_NO_FATAL_FAILURE(AssertHash("command", e->command_hash)); e = log.LookupByOutput("out2"); ASSERT_TRUE(e); ASSERT_EQ(456, e->start_time); ASSERT_EQ(789, e->end_time); ASSERT_EQ(789, e->mtime); ASSERT_NO_FATAL_FAILURE(AssertHash("command2", e->command_hash)); } struct TestDiskInterface : public DiskInterface { TimeStamp Stat(const std::string& path, std::string* err) const override { return 4; } bool WriteFile(const std::string& path, const std::string& contents, bool crlf_on_windows) override { assert(false); return true; } bool MakeDir(const std::string& path) override { assert(false); return false; } Status ReadFile(const std::string& path, std::string* contents, std::string* err) override { assert(false); return NotFound; } int RemoveFile(const std::string& path) override { assert(false); return 0; } }; TEST_F(BuildLogTest, Restat) { FILE* f = fopen(kTestFilename, "wb"); fprintf(f, "# ninja log v7\n" "1\t2\t3\tout\tcommand\n"); fclose(f); std::string err; BuildLog log; EXPECT_TRUE(log.Load(kTestFilename, &err)); ASSERT_EQ("", err); BuildLog::LogEntry* e = log.LookupByOutput("out"); ASSERT_EQ(3, e->mtime); TestDiskInterface testDiskInterface; char out2[] = { 'o', 'u', 't', '2', 0 }; char* filter2[] = { out2 }; EXPECT_TRUE(log.Restat(kTestFilename, testDiskInterface, 1, filter2, &err)); ASSERT_EQ("", err); e = log.LookupByOutput("out"); ASSERT_EQ(3, e->mtime); // unchanged, since the filter doesn't match EXPECT_TRUE(log.Restat(kTestFilename, testDiskInterface, 0, NULL, &err)); ASSERT_EQ("", err); e = log.LookupByOutput("out"); ASSERT_EQ(4, e->mtime); } TEST_F(BuildLogTest, VeryLongInputLine) { // Ninja's build log buffer is currently 256kB. Lines longer than that are // silently ignored, but don't affect parsing of other lines. FILE* f = fopen(kTestFilename, "wb"); fprintf(f, "# ninja log v7\n"); fprintf(f, "123\t456\t456\tout\tcommand start"); for (size_t i = 0; i < (512 << 10) / strlen(" more_command"); ++i) fputs(" more_command", f); fprintf(f, "\n"); fprintf(f, "456\t789\t789\tout2\t%" PRIx64 "\n", BuildLog::LogEntry::HashCommand("command2")); fclose(f); std::string err; BuildLog log; EXPECT_TRUE(log.Load(kTestFilename, &err)); ASSERT_EQ("", err); BuildLog::LogEntry* e = log.LookupByOutput("out"); ASSERT_EQ(NULL, e); e = log.LookupByOutput("out2"); ASSERT_TRUE(e); ASSERT_EQ(456, e->start_time); ASSERT_EQ(789, e->end_time); ASSERT_EQ(789, e->mtime); ASSERT_NO_FATAL_FAILURE(AssertHash("command2", e->command_hash)); } TEST_F(BuildLogTest, MultiTargetEdge) { AssertParse(&state_, "build out out.d: cat\n"); BuildLog log; log.RecordCommand(state_.edges_[0], 21, 22); ASSERT_EQ(2u, log.entries().size()); BuildLog::LogEntry* e1 = log.LookupByOutput("out"); ASSERT_TRUE(e1); BuildLog::LogEntry* e2 = log.LookupByOutput("out.d"); ASSERT_TRUE(e2); ASSERT_EQ("out", e1->output); ASSERT_EQ("out.d", e2->output); ASSERT_EQ(21, e1->start_time); ASSERT_EQ(21, e2->start_time); ASSERT_EQ(22, e2->end_time); ASSERT_EQ(22, e2->end_time); } struct BuildLogRecompactTest : public BuildLogTest { virtual bool IsPathDead(StringPiece s) const { return s == "out2"; } }; TEST_F(BuildLogRecompactTest, Recompact) { AssertParse(&state_, "build out: cat in\n" "build out2: cat in\n"); BuildLog log1; std::string err; EXPECT_TRUE(log1.OpenForWrite(kTestFilename, *this, &err)); ASSERT_EQ("", err); // Record the same edge several times, to trigger recompaction // the next time the log is opened. for (int i = 0; i < 200; ++i) log1.RecordCommand(state_.edges_[0], 15, 18 + i); log1.RecordCommand(state_.edges_[1], 21, 22); log1.Close(); // Load... BuildLog log2; EXPECT_TRUE(log2.Load(kTestFilename, &err)); ASSERT_EQ("", err); ASSERT_EQ(2u, log2.entries().size()); ASSERT_TRUE(log2.LookupByOutput("out")); ASSERT_TRUE(log2.LookupByOutput("out2")); // ...and force a recompaction. EXPECT_TRUE(log2.OpenForWrite(kTestFilename, *this, &err)); log2.Close(); // "out2" is dead, it should've been removed. BuildLog log3; EXPECT_TRUE(log2.Load(kTestFilename, &err)); ASSERT_EQ("", err); ASSERT_EQ(1u, log2.entries().size()); ASSERT_TRUE(log2.LookupByOutput("out")); ASSERT_FALSE(log2.LookupByOutput("out2")); } } // anonymous namespace ninja-1.13.2/src/build_test.cc000066400000000000000000004126731510764045400161540ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "build.h" #include #include #include #include "build_log.h" #include "deps_log.h" #include "exit_status.h" #include "graph.h" #include "status_printer.h" #include "test.h" using namespace std; struct CompareEdgesByOutput { static bool cmp(const Edge* a, const Edge* b) { return a->outputs_[0]->path() < b->outputs_[0]->path(); } }; /// Fixture for tests involving Plan. // Though Plan doesn't use State, it's useful to have one around // to create Nodes and Edges. struct PlanTest : public StateTestWithBuiltinRules { Plan plan_; /// Because FindWork does not return Edges in any sort of predictable order, // provide a means to get available Edges in order and in a format which is // easy to write tests around. void FindWorkSorted(deque* ret, int count) { for (int i = 0; i < count; ++i) { ASSERT_TRUE(plan_.more_to_do()); Edge* edge = plan_.FindWork(); ASSERT_TRUE(edge); ret->push_back(edge); } ASSERT_FALSE(plan_.FindWork()); sort(ret->begin(), ret->end(), CompareEdgesByOutput::cmp); } void PrepareForTarget(const char* node, BuildLog *log=NULL) { string err; EXPECT_TRUE(plan_.AddTarget(GetNode(node), &err)); ASSERT_EQ("", err); plan_.PrepareQueue(); ASSERT_TRUE(plan_.more_to_do()); } void TestPoolWithDepthOne(const char *test_case); }; TEST_F(PlanTest, Basic) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build out: cat mid\n" "build mid: cat in\n")); GetNode("mid")->MarkDirty(); GetNode("out")->MarkDirty(); PrepareForTarget("out"); Edge* edge = plan_.FindWork(); ASSERT_TRUE(edge); ASSERT_EQ("in", edge->inputs_[0]->path()); ASSERT_EQ("mid", edge->outputs_[0]->path()); ASSERT_FALSE(plan_.FindWork()); string err; plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err); ASSERT_EQ("", err); edge = plan_.FindWork(); ASSERT_TRUE(edge); ASSERT_EQ("mid", edge->inputs_[0]->path()); ASSERT_EQ("out", edge->outputs_[0]->path()); plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err); ASSERT_EQ("", err); ASSERT_FALSE(plan_.more_to_do()); edge = plan_.FindWork(); ASSERT_EQ(0, edge); } // Test that two outputs from one rule can be handled as inputs to the next. TEST_F(PlanTest, DoubleOutputDirect) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build out: cat mid1 mid2\n" "build mid1 mid2: cat in\n")); GetNode("mid1")->MarkDirty(); GetNode("mid2")->MarkDirty(); GetNode("out")->MarkDirty(); PrepareForTarget("out"); Edge* edge; edge = plan_.FindWork(); ASSERT_TRUE(edge); // cat in string err; plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err); ASSERT_EQ("", err); edge = plan_.FindWork(); ASSERT_TRUE(edge); // cat mid1 mid2 plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err); ASSERT_EQ("", err); edge = plan_.FindWork(); ASSERT_FALSE(edge); // done } // Test that two outputs from one rule can eventually be routed to another. TEST_F(PlanTest, DoubleOutputIndirect) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build out: cat b1 b2\n" "build b1: cat a1\n" "build b2: cat a2\n" "build a1 a2: cat in\n")); GetNode("a1")->MarkDirty(); GetNode("a2")->MarkDirty(); GetNode("b1")->MarkDirty(); GetNode("b2")->MarkDirty(); GetNode("out")->MarkDirty(); PrepareForTarget("out"); Edge* edge; edge = plan_.FindWork(); ASSERT_TRUE(edge); // cat in string err; plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err); ASSERT_EQ("", err); edge = plan_.FindWork(); ASSERT_TRUE(edge); // cat a1 plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err); ASSERT_EQ("", err); edge = plan_.FindWork(); ASSERT_TRUE(edge); // cat a2 plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err); ASSERT_EQ("", err); edge = plan_.FindWork(); ASSERT_TRUE(edge); // cat b1 b2 plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err); ASSERT_EQ("", err); edge = plan_.FindWork(); ASSERT_FALSE(edge); // done } // Test that two edges from one output can both execute. TEST_F(PlanTest, DoubleDependent) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build out: cat a1 a2\n" "build a1: cat mid\n" "build a2: cat mid\n" "build mid: cat in\n")); GetNode("mid")->MarkDirty(); GetNode("a1")->MarkDirty(); GetNode("a2")->MarkDirty(); GetNode("out")->MarkDirty(); PrepareForTarget("out"); Edge* edge; edge = plan_.FindWork(); ASSERT_TRUE(edge); // cat in string err; plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err); ASSERT_EQ("", err); edge = plan_.FindWork(); ASSERT_TRUE(edge); // cat mid plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err); ASSERT_EQ("", err); edge = plan_.FindWork(); ASSERT_TRUE(edge); // cat mid plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err); ASSERT_EQ("", err); edge = plan_.FindWork(); ASSERT_TRUE(edge); // cat a1 a2 plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err); ASSERT_EQ("", err); edge = plan_.FindWork(); ASSERT_FALSE(edge); // done } void PlanTest::TestPoolWithDepthOne(const char* test_case) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, test_case)); GetNode("out1")->MarkDirty(); GetNode("out2")->MarkDirty(); string err; EXPECT_TRUE(plan_.AddTarget(GetNode("out1"), &err)); ASSERT_EQ("", err); EXPECT_TRUE(plan_.AddTarget(GetNode("out2"), &err)); ASSERT_EQ("", err); plan_.PrepareQueue(); ASSERT_TRUE(plan_.more_to_do()); Edge* edge = plan_.FindWork(); ASSERT_TRUE(edge); ASSERT_EQ("in", edge->inputs_[0]->path()); ASSERT_EQ("out1", edge->outputs_[0]->path()); // This will be false since poolcat is serialized ASSERT_FALSE(plan_.FindWork()); plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err); ASSERT_EQ("", err); edge = plan_.FindWork(); ASSERT_TRUE(edge); ASSERT_EQ("in", edge->inputs_[0]->path()); ASSERT_EQ("out2", edge->outputs_[0]->path()); ASSERT_FALSE(plan_.FindWork()); plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err); ASSERT_EQ("", err); ASSERT_FALSE(plan_.more_to_do()); edge = plan_.FindWork(); ASSERT_EQ(0, edge); } TEST_F(PlanTest, PoolWithDepthOne) { TestPoolWithDepthOne( "pool foobar\n" " depth = 1\n" "rule poolcat\n" " command = cat $in > $out\n" " pool = foobar\n" "build out1: poolcat in\n" "build out2: poolcat in\n"); } TEST_F(PlanTest, ConsolePool) { TestPoolWithDepthOne( "rule poolcat\n" " command = cat $in > $out\n" " pool = console\n" "build out1: poolcat in\n" "build out2: poolcat in\n"); } TEST_F(PlanTest, PoolsWithDepthTwo) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "pool foobar\n" " depth = 2\n" "pool bazbin\n" " depth = 2\n" "rule foocat\n" " command = cat $in > $out\n" " pool = foobar\n" "rule bazcat\n" " command = cat $in > $out\n" " pool = bazbin\n" "build out1: foocat in\n" "build out2: foocat in\n" "build out3: foocat in\n" "build outb1: bazcat in\n" "build outb2: bazcat in\n" "build outb3: bazcat in\n" " pool =\n" "build allTheThings: cat out1 out2 out3 outb1 outb2 outb3\n" )); // Mark all the out* nodes dirty for (int i = 0; i < 3; ++i) { GetNode("out" + string(1, '1' + static_cast(i)))->MarkDirty(); GetNode("outb" + string(1, '1' + static_cast(i)))->MarkDirty(); } GetNode("allTheThings")->MarkDirty(); PrepareForTarget("allTheThings"); deque edges; FindWorkSorted(&edges, 5); for (int i = 0; i < 4; ++i) { Edge *edge = edges[i]; ASSERT_EQ("in", edge->inputs_[0]->path()); string base_name(i < 2 ? "out" : "outb"); ASSERT_EQ(base_name + string(1, '1' + (i % 2)), edge->outputs_[0]->path()); } // outb3 is exempt because it has an empty pool Edge* edge = edges[4]; ASSERT_TRUE(edge); ASSERT_EQ("in", edge->inputs_[0]->path()); ASSERT_EQ("outb3", edge->outputs_[0]->path()); // finish out1 string err; plan_.EdgeFinished(edges.front(), Plan::kEdgeSucceeded, &err); ASSERT_EQ("", err); edges.pop_front(); // out3 should be available Edge* out3 = plan_.FindWork(); ASSERT_TRUE(out3); ASSERT_EQ("in", out3->inputs_[0]->path()); ASSERT_EQ("out3", out3->outputs_[0]->path()); ASSERT_FALSE(plan_.FindWork()); plan_.EdgeFinished(out3, Plan::kEdgeSucceeded, &err); ASSERT_EQ("", err); ASSERT_FALSE(plan_.FindWork()); for (deque::iterator it = edges.begin(); it != edges.end(); ++it) { plan_.EdgeFinished(*it, Plan::kEdgeSucceeded, &err); ASSERT_EQ("", err); } Edge* last = plan_.FindWork(); ASSERT_TRUE(last); ASSERT_EQ("allTheThings", last->outputs_[0]->path()); plan_.EdgeFinished(last, Plan::kEdgeSucceeded, &err); ASSERT_EQ("", err); ASSERT_FALSE(plan_.more_to_do()); ASSERT_FALSE(plan_.FindWork()); } TEST_F(PlanTest, PoolWithRedundantEdges) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "pool compile\n" " depth = 1\n" "rule gen_foo\n" " command = touch foo.cpp\n" "rule gen_bar\n" " command = touch bar.cpp\n" "rule echo\n" " command = echo $out > $out\n" "build foo.cpp.obj: echo foo.cpp || foo.cpp\n" " pool = compile\n" "build bar.cpp.obj: echo bar.cpp || bar.cpp\n" " pool = compile\n" "build libfoo.a: echo foo.cpp.obj bar.cpp.obj\n" "build foo.cpp: gen_foo\n" "build bar.cpp: gen_bar\n" "build all: phony libfoo.a\n")); GetNode("foo.cpp")->MarkDirty(); GetNode("foo.cpp.obj")->MarkDirty(); GetNode("bar.cpp")->MarkDirty(); GetNode("bar.cpp.obj")->MarkDirty(); GetNode("libfoo.a")->MarkDirty(); GetNode("all")->MarkDirty(); PrepareForTarget("all"); Edge* edge = NULL; deque initial_edges; FindWorkSorted(&initial_edges, 2); edge = initial_edges[1]; // Foo first ASSERT_EQ("foo.cpp", edge->outputs_[0]->path()); string err; plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err); ASSERT_EQ("", err); edge = plan_.FindWork(); ASSERT_TRUE(edge); ASSERT_FALSE(plan_.FindWork()); ASSERT_EQ("foo.cpp", edge->inputs_[0]->path()); ASSERT_EQ("foo.cpp", edge->inputs_[1]->path()); ASSERT_EQ("foo.cpp.obj", edge->outputs_[0]->path()); plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err); ASSERT_EQ("", err); edge = initial_edges[0]; // Now for bar ASSERT_EQ("bar.cpp", edge->outputs_[0]->path()); plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err); ASSERT_EQ("", err); edge = plan_.FindWork(); ASSERT_TRUE(edge); ASSERT_FALSE(plan_.FindWork()); ASSERT_EQ("bar.cpp", edge->inputs_[0]->path()); ASSERT_EQ("bar.cpp", edge->inputs_[1]->path()); ASSERT_EQ("bar.cpp.obj", edge->outputs_[0]->path()); plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err); ASSERT_EQ("", err); edge = plan_.FindWork(); ASSERT_TRUE(edge); ASSERT_FALSE(plan_.FindWork()); ASSERT_EQ("foo.cpp.obj", edge->inputs_[0]->path()); ASSERT_EQ("bar.cpp.obj", edge->inputs_[1]->path()); ASSERT_EQ("libfoo.a", edge->outputs_[0]->path()); plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err); ASSERT_EQ("", err); edge = plan_.FindWork(); ASSERT_TRUE(edge); ASSERT_FALSE(plan_.FindWork()); ASSERT_EQ("libfoo.a", edge->inputs_[0]->path()); ASSERT_EQ("all", edge->outputs_[0]->path()); plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err); ASSERT_EQ("", err); edge = plan_.FindWork(); ASSERT_FALSE(edge); ASSERT_FALSE(plan_.more_to_do()); } TEST_F(PlanTest, PoolWithFailingEdge) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "pool foobar\n" " depth = 1\n" "rule poolcat\n" " command = cat $in > $out\n" " pool = foobar\n" "build out1: poolcat in\n" "build out2: poolcat in\n")); GetNode("out1")->MarkDirty(); GetNode("out2")->MarkDirty(); string err; EXPECT_TRUE(plan_.AddTarget(GetNode("out1"), &err)); ASSERT_EQ("", err); EXPECT_TRUE(plan_.AddTarget(GetNode("out2"), &err)); ASSERT_EQ("", err); plan_.PrepareQueue(); ASSERT_TRUE(plan_.more_to_do()); Edge* edge = plan_.FindWork(); ASSERT_TRUE(edge); ASSERT_EQ("in", edge->inputs_[0]->path()); ASSERT_EQ("out1", edge->outputs_[0]->path()); // This will be false since poolcat is serialized ASSERT_FALSE(plan_.FindWork()); plan_.EdgeFinished(edge, Plan::kEdgeFailed, &err); ASSERT_EQ("", err); edge = plan_.FindWork(); ASSERT_TRUE(edge); ASSERT_EQ("in", edge->inputs_[0]->path()); ASSERT_EQ("out2", edge->outputs_[0]->path()); ASSERT_FALSE(plan_.FindWork()); plan_.EdgeFinished(edge, Plan::kEdgeFailed, &err); ASSERT_EQ("", err); ASSERT_TRUE(plan_.more_to_do()); // Jobs have failed edge = plan_.FindWork(); ASSERT_EQ(0, edge); } TEST_F(PlanTest, PriorityWithoutBuildLog) { // Without a build log, the critical time is equivalent to graph // depth. Test with the following graph: // a2 // | // a1 b1 // | | | // a0 b0 c0 // \ | / // out ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule r\n" " command = unused\n" "build out: r a0 b0 c0\n" "build a0: r a1\n" "build a1: r a2\n" "build b0: r b1\n" "build c0: r b1\n" )); GetNode("a1")->MarkDirty(); GetNode("a0")->MarkDirty(); GetNode("b0")->MarkDirty(); GetNode("c0")->MarkDirty(); GetNode("out")->MarkDirty(); BuildLog log; PrepareForTarget("out", &log); EXPECT_EQ(GetNode("out")->in_edge()->critical_path_weight(), 1); EXPECT_EQ(GetNode("a0")->in_edge()->critical_path_weight(), 2); EXPECT_EQ(GetNode("b0")->in_edge()->critical_path_weight(), 2); EXPECT_EQ(GetNode("c0")->in_edge()->critical_path_weight(), 2); EXPECT_EQ(GetNode("a1")->in_edge()->critical_path_weight(), 3); const int n_edges = 5; const char *expected_order[n_edges] = { "a1", "a0", "b0", "c0", "out"}; for (int i = 0; i < n_edges; ++i) { Edge* edge = plan_.FindWork(); ASSERT_TRUE(edge != nullptr); EXPECT_EQ(expected_order[i], edge->outputs_[0]->path()); std::string err; ASSERT_TRUE(plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err)); EXPECT_EQ(err, ""); } EXPECT_FALSE(plan_.FindWork()); } /// Fake implementation of CommandRunner, useful for tests. struct FakeCommandRunner : public CommandRunner { explicit FakeCommandRunner(VirtualFileSystem* fs) : max_active_edges_(1), fs_(fs) {} // CommandRunner impl virtual size_t CanRunMore() const; virtual bool StartCommand(Edge* edge); virtual bool WaitForCommand(Result* result); virtual vector GetActiveEdges(); virtual void Abort(); vector commands_ran_; vector active_edges_; size_t max_active_edges_; VirtualFileSystem* fs_; }; struct BuildTest : public StateTestWithBuiltinRules, public BuildLogUser { BuildTest() : config_(MakeConfig()), command_runner_(&fs_), status_(config_), builder_(&state_, config_, NULL, NULL, &fs_, &status_, 0) { } explicit BuildTest(DepsLog* log) : config_(MakeConfig()), command_runner_(&fs_), status_(config_), builder_(&state_, config_, NULL, log, &fs_, &status_, 0) {} virtual void SetUp() { StateTestWithBuiltinRules::SetUp(); builder_.command_runner_.reset(&command_runner_); AssertParse(&state_, "build cat1: cat in1\n" "build cat2: cat in1 in2\n" "build cat12: cat cat1 cat2\n"); fs_.Create("in1", ""); fs_.Create("in2", ""); } ~BuildTest() { builder_.command_runner_.release(); } virtual bool IsPathDead(StringPiece s) const { return false; } /// Rebuild target in the 'working tree' (fs_). /// State of command_runner_ and logs contents (if specified) ARE MODIFIED. /// Handy to check for NOOP builds, and higher-level rebuild tests. void RebuildTarget(const string& target, const char* manifest, const char* log_path = NULL, const char* deps_path = NULL, State* state = NULL); // Mark a path dirty. void Dirty(const string& path); BuildConfig MakeConfig() { BuildConfig config; config.verbosity = BuildConfig::QUIET; return config; } BuildConfig config_; FakeCommandRunner command_runner_; VirtualFileSystem fs_; StatusPrinter status_; Builder builder_; }; void BuildTest::RebuildTarget(const string& target, const char* manifest, const char* log_path, const char* deps_path, State* state) { State local_state, *pstate = &local_state; if (state) pstate = state; ASSERT_NO_FATAL_FAILURE(AddCatRule(pstate)); AssertParse(pstate, manifest); string err; BuildLog build_log, *pbuild_log = NULL; if (log_path) { ASSERT_TRUE(build_log.Load(log_path, &err)); ASSERT_TRUE(build_log.OpenForWrite(log_path, *this, &err)); ASSERT_EQ("", err); pbuild_log = &build_log; } DepsLog deps_log, *pdeps_log = NULL; if (deps_path) { ASSERT_TRUE(deps_log.Load(deps_path, pstate, &err)); ASSERT_TRUE(deps_log.OpenForWrite(deps_path, &err)); ASSERT_EQ("", err); pdeps_log = &deps_log; } Builder builder(pstate, config_, pbuild_log, pdeps_log, &fs_, &status_, 0); EXPECT_TRUE(builder.AddTarget(target, &err)); command_runner_.commands_ran_.clear(); builder.command_runner_.reset(&command_runner_); if (!builder.AlreadyUpToDate()) { ExitStatus build_res = builder.Build(&err); EXPECT_EQ(build_res, ExitSuccess); } builder.command_runner_.release(); } size_t FakeCommandRunner::CanRunMore() const { if (active_edges_.size() < max_active_edges_) return SIZE_MAX; return 0; } bool FakeCommandRunner::StartCommand(Edge* edge) { assert(active_edges_.size() < max_active_edges_); assert(find(active_edges_.begin(), active_edges_.end(), edge) == active_edges_.end()); commands_ran_.push_back(edge->EvaluateCommand()); if (edge->rule().name() == "cat" || edge->rule().name() == "cat_rsp" || edge->rule().name() == "cat_rsp_out" || edge->rule().name() == "cc" || edge->rule().name() == "cp_multi_msvc" || edge->rule().name() == "cp_multi_gcc" || edge->rule().name() == "touch" || edge->rule().name() == "touch-interrupt" || edge->rule().name() == "touch-fail-tick2") { for (vector::iterator out = edge->outputs_.begin(); out != edge->outputs_.end(); ++out) { fs_->Create((*out)->path(), ""); } } else if (edge->rule().name() == "true" || edge->rule().name() == "fail" || edge->rule().name() == "interrupt" || edge->rule().name() == "console") { // Don't do anything. } else if (edge->rule().name() == "cp") { assert(!edge->inputs_.empty()); assert(edge->outputs_.size() == 1); string content; string err; if (fs_->ReadFile(edge->inputs_[0]->path(), &content, &err) == DiskInterface::Okay) fs_->WriteFile(edge->outputs_[0]->path(), content, false); } else if (edge->rule().name() == "touch-implicit-dep-out") { string dep = edge->GetBinding("test_dependency"); fs_->Tick(); fs_->Create(dep, ""); fs_->Tick(); for (vector::iterator out = edge->outputs_.begin(); out != edge->outputs_.end(); ++out) { fs_->Create((*out)->path(), ""); } } else if (edge->rule().name() == "touch-out-implicit-dep") { string dep = edge->GetBinding("test_dependency"); for (vector::iterator out = edge->outputs_.begin(); out != edge->outputs_.end(); ++out) { fs_->Create((*out)->path(), ""); } fs_->Tick(); fs_->Create(dep, ""); } else if (edge->rule().name() == "generate-depfile") { string dep = edge->GetBinding("test_dependency"); bool touch_dep = edge->GetBindingBool("touch_dependency"); string depfile = edge->GetUnescapedDepfile(); if (touch_dep) { fs_->Tick(); fs_->Create(dep, ""); } string contents; for (vector::iterator out = edge->outputs_.begin(); out != edge->outputs_.end(); ++out) { contents += (*out)->path() + ": " + dep + "\n"; fs_->Create((*out)->path(), ""); } fs_->Create(depfile, contents); } else if (edge->rule().name() == "long-cc") { string dep = edge->GetBinding("test_dependency"); string depfile = edge->GetUnescapedDepfile(); string contents; for (vector::iterator out = edge->outputs_.begin(); out != edge->outputs_.end(); ++out) { fs_->Tick(); fs_->Tick(); fs_->Tick(); fs_->Create((*out)->path(), ""); contents += (*out)->path() + ": " + dep + "\n"; } if (!dep.empty() && !depfile.empty()) fs_->Create(depfile, contents); } else { printf("unknown command\n"); return false; } active_edges_.push_back(edge); // Allow tests to control the order by the name of the first output. sort(active_edges_.begin(), active_edges_.end(), CompareEdgesByOutput::cmp); return true; } bool FakeCommandRunner::WaitForCommand(Result* result) { if (active_edges_.empty()) return false; // All active edges were already completed immediately when started, // so we can pick any edge here. Pick the last edge. Tests can // control the order of edges by the name of the first output. vector::iterator edge_iter = active_edges_.end() - 1; Edge* edge = *edge_iter; result->edge = edge; if (edge->rule().name() == "interrupt" || edge->rule().name() == "touch-interrupt") { result->status = ExitInterrupted; return true; } if (edge->rule().name() == "console") { if (edge->use_console()) result->status = ExitSuccess; else result->status = ExitFailure; active_edges_.erase(edge_iter); return true; } if (edge->rule().name() == "cp_multi_msvc") { const std::string prefix = edge->GetBinding("msvc_deps_prefix"); for (std::vector::iterator in = edge->inputs_.begin(); in != edge->inputs_.end(); ++in) { result->output += prefix + (*in)->path() + '\n'; } } if (edge->rule().name() == "fail" || (edge->rule().name() == "touch-fail-tick2" && fs_->now_ == 2)) result->status = ExitFailure; else result->status = ExitSuccess; // This rule simulates an external process modifying files while the build command runs. // See TestInputMtimeRaceCondition and TestInputMtimeRaceConditionWithDepFile. // Note: only the first and third time the rule is run per test is the file modified, so // the test can verify that subsequent runs without the race have no work to do. if (edge->rule().name() == "long-cc") { string dep = edge->GetBinding("test_dependency"); if (fs_->now_ == 4) fs_->files_[dep].mtime = 3; if (fs_->now_ == 10) fs_->files_[dep].mtime = 9; } // Provide a way for test cases to verify when an edge finishes that // some other edge is still active. This is useful for test cases // covering behavior involving multiple active edges. const string& verify_active_edge = edge->GetBinding("verify_active_edge"); if (!verify_active_edge.empty()) { bool verify_active_edge_found = false; for (vector::iterator i = active_edges_.begin(); i != active_edges_.end(); ++i) { if (!(*i)->outputs_.empty() && (*i)->outputs_[0]->path() == verify_active_edge) { verify_active_edge_found = true; } } EXPECT_TRUE(verify_active_edge_found); } active_edges_.erase(edge_iter); return true; } vector FakeCommandRunner::GetActiveEdges() { return active_edges_; } void FakeCommandRunner::Abort() { active_edges_.clear(); } void BuildTest::Dirty(const string& path) { Node* node = GetNode(path); node->MarkDirty(); // If it's an input file, mark that we've already stat()ed it and // it's missing. if (!node->in_edge()) node->MarkMissing(); } TEST_F(BuildTest, NoWork) { string err; EXPECT_TRUE(builder_.AlreadyUpToDate()); } TEST_F(BuildTest, OneStep) { // Given a dirty target with one ready input, // we should rebuild the target. Dirty("cat1"); string err; EXPECT_TRUE(builder_.AddTarget("cat1", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); ASSERT_EQ("", err); ASSERT_EQ(1u, command_runner_.commands_ran_.size()); EXPECT_EQ("cat in1 > cat1", command_runner_.commands_ran_[0]); } TEST_F(BuildTest, OneStep2) { // Given a target with one dirty input, // we should rebuild the target. Dirty("cat1"); string err; EXPECT_TRUE(builder_.AddTarget("cat1", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_EQ("", err); ASSERT_EQ(1u, command_runner_.commands_ran_.size()); EXPECT_EQ("cat in1 > cat1", command_runner_.commands_ran_[0]); } TEST_F(BuildTest, TwoStep) { string err; EXPECT_TRUE(builder_.AddTarget("cat12", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_EQ("", err); ASSERT_EQ(3u, command_runner_.commands_ran_.size()); // Depending on how the pointers work out, we could've ran // the first two commands in either order. EXPECT_TRUE((command_runner_.commands_ran_[0] == "cat in1 > cat1" && command_runner_.commands_ran_[1] == "cat in1 in2 > cat2") || (command_runner_.commands_ran_[1] == "cat in1 > cat1" && command_runner_.commands_ran_[0] == "cat in1 in2 > cat2")); EXPECT_EQ("cat cat1 cat2 > cat12", command_runner_.commands_ran_[2]); fs_.Tick(); // Modifying in2 requires rebuilding one intermediate file // and the final file. fs_.Create("in2", ""); state_.Reset(); EXPECT_TRUE(builder_.AddTarget("cat12", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); ASSERT_EQ("", err); ASSERT_EQ(5u, command_runner_.commands_ran_.size()); EXPECT_EQ("cat in1 in2 > cat2", command_runner_.commands_ran_[3]); EXPECT_EQ("cat cat1 cat2 > cat12", command_runner_.commands_ran_[4]); } TEST_F(BuildTest, TwoOutputs) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule touch\n" " command = touch $out\n" "build out1 out2: touch in.txt\n")); fs_.Create("in.txt", ""); string err; EXPECT_TRUE(builder_.AddTarget("out1", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_EQ("", err); ASSERT_EQ(1u, command_runner_.commands_ran_.size()); EXPECT_EQ("touch out1 out2", command_runner_.commands_ran_[0]); } TEST_F(BuildTest, ImplicitOutput) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule touch\n" " command = touch $out $out.imp\n" "build out | out.imp: touch in.txt\n")); fs_.Create("in.txt", ""); string err; EXPECT_TRUE(builder_.AddTarget("out.imp", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_EQ("", err); ASSERT_EQ(1u, command_runner_.commands_ran_.size()); EXPECT_EQ("touch out out.imp", command_runner_.commands_ran_[0]); } // Test case from // https://github.com/ninja-build/ninja/issues/148 TEST_F(BuildTest, MultiOutIn) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule touch\n" " command = touch $out\n" "build in1 otherfile: touch in\n" "build out: touch in | in1\n")); fs_.Create("in", ""); fs_.Tick(); fs_.Create("in1", ""); string err; EXPECT_TRUE(builder_.AddTarget("out", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_EQ("", err); } TEST_F(BuildTest, Chain) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build c2: cat c1\n" "build c3: cat c2\n" "build c4: cat c3\n" "build c5: cat c4\n")); fs_.Create("c1", ""); string err; EXPECT_TRUE(builder_.AddTarget("c5", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_EQ("", err); ASSERT_EQ(4u, command_runner_.commands_ran_.size()); err.clear(); command_runner_.commands_ran_.clear(); state_.Reset(); EXPECT_TRUE(builder_.AddTarget("c5", &err)); ASSERT_EQ("", err); EXPECT_TRUE(builder_.AlreadyUpToDate()); fs_.Tick(); fs_.Create("c3", ""); err.clear(); command_runner_.commands_ran_.clear(); state_.Reset(); EXPECT_TRUE(builder_.AddTarget("c5", &err)); ASSERT_EQ("", err); EXPECT_FALSE(builder_.AlreadyUpToDate()); EXPECT_EQ(builder_.Build(&err), ExitSuccess); ASSERT_EQ(2u, command_runner_.commands_ran_.size()); // 3->4, 4->5 } TEST_F(BuildTest, MissingInput) { // Input is referenced by build file, but no rule for it. string err; Dirty("in1"); EXPECT_FALSE(builder_.AddTarget("cat1", &err)); EXPECT_EQ("'in1', needed by 'cat1', missing and no known rule to make it", err); } TEST_F(BuildTest, MissingTarget) { // Target is not referenced by build file. string err; EXPECT_FALSE(builder_.AddTarget("meow", &err)); EXPECT_EQ("unknown target: 'meow'", err); } TEST_F(BuildTest, MissingInputTarget) { // Target is a missing input file string err; Dirty("in1"); EXPECT_FALSE(builder_.AddTarget("in1", &err)); EXPECT_EQ("'in1' missing and no known rule to make it", err); } TEST_F(BuildTest, MakeDirs) { string err; #ifdef _WIN32 ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build subdir\\dir2\\file: cat in1\n")); #else ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build subdir/dir2/file: cat in1\n")); #endif EXPECT_TRUE(builder_.AddTarget("subdir/dir2/file", &err)); EXPECT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); ASSERT_EQ("", err); ASSERT_EQ(2u, fs_.directories_made_.size()); EXPECT_EQ("subdir", fs_.directories_made_[0]); EXPECT_EQ("subdir/dir2", fs_.directories_made_[1]); } TEST_F(BuildTest, DepFileMissing) { string err; ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule cc\n command = cc $in\n depfile = $out.d\n" "build fo$ o.o: cc foo.c\n")); fs_.Create("foo.c", ""); EXPECT_TRUE(builder_.AddTarget("fo o.o", &err)); ASSERT_EQ("", err); ASSERT_EQ(1u, fs_.files_read_.size()); EXPECT_EQ("fo o.o.d", fs_.files_read_[0]); } TEST_F(BuildTest, DepFileOK) { string err; int orig_edges = static_cast(state_.edges_.size()); ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule cc\n command = cc $in\n depfile = $out.d\n" "build foo.o: cc foo.c\n")); Edge* edge = state_.edges_.back(); fs_.Create("foo.c", ""); GetNode("bar.h")->MarkDirty(); // Mark bar.h as missing. fs_.Create("foo.o.d", "foo.o: blah.h bar.h\n"); EXPECT_TRUE(builder_.AddTarget("foo.o", &err)); ASSERT_EQ("", err); ASSERT_EQ(1u, fs_.files_read_.size()); EXPECT_EQ("foo.o.d", fs_.files_read_[0]); // Expect one new edge generating foo.o. Loading the depfile should have // added nodes, but not phony edges to the graph. ASSERT_EQ(orig_edges + 1, (int)state_.edges_.size()); // Verify that nodes for blah.h and bar.h were added and that they // are marked as generated by a dep loader. ASSERT_FALSE(state_.LookupNode("foo.o")->generated_by_dep_loader()); ASSERT_FALSE(state_.LookupNode("foo.c")->generated_by_dep_loader()); ASSERT_TRUE(state_.LookupNode("blah.h")); ASSERT_TRUE(state_.LookupNode("blah.h")->generated_by_dep_loader()); ASSERT_TRUE(state_.LookupNode("bar.h")); ASSERT_TRUE(state_.LookupNode("bar.h")->generated_by_dep_loader()); // Expect our edge to now have three inputs: foo.c and two headers. ASSERT_EQ(3u, edge->inputs_.size()); // Expect the command line we generate to only use the original input. ASSERT_EQ("cc foo.c", edge->EvaluateCommand()); } TEST_F(BuildTest, DepFileParseError) { string err; ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule cc\n command = cc $in\n depfile = $out.d\n" "build foo.o: cc foo.c\n")); fs_.Create("foo.c", ""); fs_.Create("foo.o.d", "randomtext\n"); EXPECT_FALSE(builder_.AddTarget("foo.o", &err)); EXPECT_EQ("foo.o.d: expected ':' in depfile", err); } TEST_F(BuildTest, EncounterReadyTwice) { string err; ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule touch\n" " command = touch $out\n" "build c: touch\n" "build b: touch || c\n" "build a: touch | b || c\n")); vector c_out = GetNode("c")->out_edges(); ASSERT_EQ(2u, c_out.size()); EXPECT_EQ("b", c_out[0]->outputs_[0]->path()); EXPECT_EQ("a", c_out[1]->outputs_[0]->path()); fs_.Create("b", ""); EXPECT_TRUE(builder_.AddTarget("a", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); ASSERT_EQ("", err); ASSERT_EQ(2u, command_runner_.commands_ran_.size()); } TEST_F(BuildTest, OrderOnlyDeps) { string err; ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule cc\n command = cc $in\n depfile = $out.d\n" "build foo.o: cc foo.c || otherfile\n")); Edge* edge = state_.edges_.back(); fs_.Create("foo.c", ""); fs_.Create("otherfile", ""); fs_.Create("foo.o.d", "foo.o: blah.h bar.h\n"); EXPECT_TRUE(builder_.AddTarget("foo.o", &err)); ASSERT_EQ("", err); // One explicit, two implicit, one order only. ASSERT_EQ(4u, edge->inputs_.size()); EXPECT_EQ(2, edge->implicit_deps_); EXPECT_EQ(1, edge->order_only_deps_); // Verify the inputs are in the order we expect // (explicit then implicit then orderonly). EXPECT_EQ("foo.c", edge->inputs_[0]->path()); EXPECT_EQ("blah.h", edge->inputs_[1]->path()); EXPECT_EQ("bar.h", edge->inputs_[2]->path()); EXPECT_EQ("otherfile", edge->inputs_[3]->path()); // Expect the command line we generate to only use the original input. ASSERT_EQ("cc foo.c", edge->EvaluateCommand()); // explicit dep dirty, expect a rebuild. EXPECT_EQ(builder_.Build(&err), ExitSuccess); ASSERT_EQ("", err); ASSERT_EQ(1u, command_runner_.commands_ran_.size()); fs_.Tick(); // Recreate the depfile, as it should have been deleted by the build. fs_.Create("foo.o.d", "foo.o: blah.h bar.h\n"); // implicit dep dirty, expect a rebuild. fs_.Create("blah.h", ""); fs_.Create("bar.h", ""); command_runner_.commands_ran_.clear(); state_.Reset(); EXPECT_TRUE(builder_.AddTarget("foo.o", &err)); EXPECT_EQ(builder_.Build(&err), ExitSuccess); ASSERT_EQ("", err); ASSERT_EQ(1u, command_runner_.commands_ran_.size()); fs_.Tick(); // Recreate the depfile, as it should have been deleted by the build. fs_.Create("foo.o.d", "foo.o: blah.h bar.h\n"); // order only dep dirty, no rebuild. fs_.Create("otherfile", ""); command_runner_.commands_ran_.clear(); state_.Reset(); EXPECT_TRUE(builder_.AddTarget("foo.o", &err)); EXPECT_EQ("", err); EXPECT_TRUE(builder_.AlreadyUpToDate()); // implicit dep missing, expect rebuild. fs_.RemoveFile("bar.h"); command_runner_.commands_ran_.clear(); state_.Reset(); EXPECT_TRUE(builder_.AddTarget("foo.o", &err)); EXPECT_EQ(builder_.Build(&err), ExitSuccess); ASSERT_EQ("", err); ASSERT_EQ(1u, command_runner_.commands_ran_.size()); } TEST_F(BuildTest, RebuildOrderOnlyDeps) { string err; ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule cc\n command = cc $in\n" "rule true\n command = true\n" "build oo.h: cc oo.h.in\n" "build foo.o: cc foo.c || oo.h\n")); fs_.Create("foo.c", ""); fs_.Create("oo.h.in", ""); // foo.o and order-only dep dirty, build both. EXPECT_TRUE(builder_.AddTarget("foo.o", &err)); EXPECT_EQ(builder_.Build(&err), ExitSuccess); ASSERT_EQ("", err); ASSERT_EQ(2u, command_runner_.commands_ran_.size()); // all clean, no rebuild. command_runner_.commands_ran_.clear(); state_.Reset(); EXPECT_TRUE(builder_.AddTarget("foo.o", &err)); EXPECT_EQ("", err); EXPECT_TRUE(builder_.AlreadyUpToDate()); // order-only dep missing, build it only. fs_.RemoveFile("oo.h"); command_runner_.commands_ran_.clear(); state_.Reset(); EXPECT_TRUE(builder_.AddTarget("foo.o", &err)); EXPECT_EQ(builder_.Build(&err), ExitSuccess); ASSERT_EQ("", err); ASSERT_EQ(1u, command_runner_.commands_ran_.size()); ASSERT_EQ("cc oo.h.in", command_runner_.commands_ran_[0]); fs_.Tick(); // order-only dep dirty, build it only. fs_.Create("oo.h.in", ""); command_runner_.commands_ran_.clear(); state_.Reset(); EXPECT_TRUE(builder_.AddTarget("foo.o", &err)); EXPECT_EQ(builder_.Build(&err), ExitSuccess); ASSERT_EQ("", err); ASSERT_EQ(1u, command_runner_.commands_ran_.size()); ASSERT_EQ("cc oo.h.in", command_runner_.commands_ran_[0]); } #ifdef _WIN32 TEST_F(BuildTest, DepFileCanonicalize) { string err; int orig_edges = state_.edges_.size(); ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule cc\n command = cc $in\n depfile = $out.d\n" "build gen/stuff\\things/foo.o: cc x\\y/z\\foo.c\n")); fs_.Create("x/y/z/foo.c", ""); GetNode("bar.h")->MarkDirty(); // Mark bar.h as missing. // Note, different slashes from manifest. fs_.Create("gen/stuff\\things/foo.o.d", "gen\\stuff\\things\\foo.o: blah.h bar.h\n"); EXPECT_TRUE(builder_.AddTarget("gen/stuff/things/foo.o", &err)); ASSERT_EQ("", err); ASSERT_EQ(1u, fs_.files_read_.size()); // The depfile path does not get Canonicalize as it seems unnecessary. EXPECT_EQ("gen/stuff\\things/foo.o.d", fs_.files_read_[0]); // Expect one new edge enerating foo.o. ASSERT_EQ(orig_edges + 1, (int)state_.edges_.size()); // Expect our edge to now have three inputs: foo.c and two headers. Edge* edge = state_.edges_.back(); ASSERT_EQ(3u, edge->inputs_.size()); // Expect the command line we generate to only use the original input, and // using the slashes from the manifest. ASSERT_EQ("cc x\\y/z\\foo.c", edge->EvaluateCommand()); } #endif TEST_F(BuildTest, Phony) { string err; ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build out: cat bar.cc\n" "build all: phony out\n")); fs_.Create("bar.cc", ""); EXPECT_TRUE(builder_.AddTarget("all", &err)); ASSERT_EQ("", err); // Only one command to run, because phony runs no command. EXPECT_FALSE(builder_.AlreadyUpToDate()); EXPECT_EQ(builder_.Build(&err), ExitSuccess); ASSERT_EQ("", err); ASSERT_EQ(1u, command_runner_.commands_ran_.size()); } TEST_F(BuildTest, PhonyNoWork) { string err; ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build out: cat bar.cc\n" "build all: phony out\n")); fs_.Create("bar.cc", ""); fs_.Create("out", ""); EXPECT_TRUE(builder_.AddTarget("all", &err)); ASSERT_EQ("", err); EXPECT_TRUE(builder_.AlreadyUpToDate()); } // Test a self-referencing phony. Ideally this should not work, but // ninja 1.7 and below tolerated and CMake 2.8.12.x and 3.0.x both // incorrectly produce it. We tolerate it for compatibility. TEST_F(BuildTest, PhonySelfReference) { string err; ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build a: phony a\n")); EXPECT_TRUE(builder_.AddTarget("a", &err)); ASSERT_EQ("", err); EXPECT_TRUE(builder_.AlreadyUpToDate()); } // There are 6 different cases for phony rules: // // 1. output edge does not exist, inputs are not real // 2. output edge does not exist, no inputs // 3. output edge does not exist, inputs are real, newest mtime is M // 4. output edge is real, inputs are not real // 5. output edge is real, no inputs // 6. output edge is real, inputs are real, newest mtime is M // // Expected results : // 1. Edge is marked as clean, mtime is newest mtime of dependents. // Touching inputs will cause dependents to rebuild. // 2. Edge is marked as dirty, causing dependent edges to always rebuild // 3. Edge is marked as clean, mtime is newest mtime of dependents. // Touching inputs will cause dependents to rebuild. // 4. Edge is marked as clean, mtime is newest mtime of dependents. // Touching inputs will cause dependents to rebuild. // 5. Edge is marked as dirty, causing dependent edges to always rebuild // 6. Edge is marked as clean, mtime is newest mtime of dependents. // Touching inputs will cause dependents to rebuild. void TestPhonyUseCase(BuildTest* t, int i) { State& state_ = t->state_; Builder& builder_ = t->builder_; FakeCommandRunner& command_runner_ = t->command_runner_; VirtualFileSystem& fs_ = t->fs_; string err; ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule touch\n" " command = touch $out\n" "build notreal: phony blank\n" "build phony1: phony notreal\n" "build phony2: phony\n" "build phony3: phony blank\n" "build phony4: phony notreal\n" "build phony5: phony\n" "build phony6: phony blank\n" "\n" "build test1: touch phony1\n" "build test2: touch phony2\n" "build test3: touch phony3\n" "build test4: touch phony4\n" "build test5: touch phony5\n" "build test6: touch phony6\n" )); // Set up test. builder_.command_runner_.release(); // BuildTest owns the CommandRunner builder_.command_runner_.reset(&command_runner_); fs_.Create("blank", ""); // a "real" file EXPECT_TRUE(builder_.AddTarget("test1", &err)); ASSERT_EQ("", err); EXPECT_TRUE(builder_.AddTarget("test2", &err)); ASSERT_EQ("", err); EXPECT_TRUE(builder_.AddTarget("test3", &err)); ASSERT_EQ("", err); EXPECT_TRUE(builder_.AddTarget("test4", &err)); ASSERT_EQ("", err); EXPECT_TRUE(builder_.AddTarget("test5", &err)); ASSERT_EQ("", err); EXPECT_TRUE(builder_.AddTarget("test6", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); ASSERT_EQ("", err); string ci; ci += static_cast('0' + i); // Tests 1, 3, 4, and 6 should rebuild when the input is updated. if (i != 2 && i != 5) { Node* testNode = t->GetNode("test" + ci); Node* phonyNode = t->GetNode("phony" + ci); Node* inputNode = t->GetNode("blank"); state_.Reset(); TimeStamp startTime = fs_.now_; // Build number 1 EXPECT_TRUE(builder_.AddTarget("test" + ci, &err)); ASSERT_EQ("", err); if (!builder_.AlreadyUpToDate()) { EXPECT_EQ(builder_.Build(&err), ExitSuccess); } ASSERT_EQ("", err); // Touch the input file state_.Reset(); command_runner_.commands_ran_.clear(); fs_.Tick(); fs_.Create("blank", ""); // a "real" file EXPECT_TRUE(builder_.AddTarget("test" + ci, &err)); ASSERT_EQ("", err); // Second build, expect testN edge to be rebuilt // and phonyN node's mtime to be updated. EXPECT_FALSE(builder_.AlreadyUpToDate()); EXPECT_EQ(builder_.Build(&err), ExitSuccess); ASSERT_EQ("", err); ASSERT_EQ(1u, command_runner_.commands_ran_.size()); EXPECT_EQ(string("touch test") + ci, command_runner_.commands_ran_[0]); EXPECT_TRUE(builder_.AlreadyUpToDate()); TimeStamp inputTime = inputNode->mtime(); EXPECT_FALSE(phonyNode->exists()); EXPECT_FALSE(phonyNode->dirty()); EXPECT_GT(phonyNode->mtime(), startTime); EXPECT_EQ(phonyNode->mtime(), inputTime); ASSERT_TRUE(testNode->Stat(&fs_, &err)); EXPECT_TRUE(testNode->exists()); EXPECT_GT(testNode->mtime(), startTime); } else { // Tests 2 and 5: Expect dependents to always rebuild. state_.Reset(); command_runner_.commands_ran_.clear(); fs_.Tick(); command_runner_.commands_ran_.clear(); EXPECT_TRUE(builder_.AddTarget("test" + ci, &err)); ASSERT_EQ("", err); EXPECT_FALSE(builder_.AlreadyUpToDate()); EXPECT_EQ(builder_.Build(&err), ExitSuccess); ASSERT_EQ("", err); ASSERT_EQ(1u, command_runner_.commands_ran_.size()); EXPECT_EQ("touch test" + ci, command_runner_.commands_ran_[0]); state_.Reset(); command_runner_.commands_ran_.clear(); EXPECT_TRUE(builder_.AddTarget("test" + ci, &err)); ASSERT_EQ("", err); EXPECT_FALSE(builder_.AlreadyUpToDate()); EXPECT_EQ(builder_.Build(&err), ExitSuccess); ASSERT_EQ("", err); ASSERT_EQ(1u, command_runner_.commands_ran_.size()); EXPECT_EQ("touch test" + ci, command_runner_.commands_ran_[0]); } } TEST_F(BuildTest, PhonyUseCase1) { TestPhonyUseCase(this, 1); } TEST_F(BuildTest, PhonyUseCase2) { TestPhonyUseCase(this, 2); } TEST_F(BuildTest, PhonyUseCase3) { TestPhonyUseCase(this, 3); } TEST_F(BuildTest, PhonyUseCase4) { TestPhonyUseCase(this, 4); } TEST_F(BuildTest, PhonyUseCase5) { TestPhonyUseCase(this, 5); } TEST_F(BuildTest, PhonyUseCase6) { TestPhonyUseCase(this, 6); } TEST_F(BuildTest, Fail) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule fail\n" " command = fail\n" "build out1: fail\n")); string err; EXPECT_TRUE(builder_.AddTarget("out1", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitFailure); ASSERT_EQ(1u, command_runner_.commands_ran_.size()); ASSERT_EQ("subcommand failed", err); } TEST_F(BuildTest, SwallowFailures) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule fail\n" " command = fail\n" "build out1: fail\n" "build out2: fail\n" "build out3: fail\n" "build all: phony out1 out2 out3\n")); // Swallow two failures, die on the third. config_.failures_allowed = 3; string err; EXPECT_TRUE(builder_.AddTarget("all", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitFailure); ASSERT_EQ(3u, command_runner_.commands_ran_.size()); ASSERT_EQ("subcommands failed", err); } TEST_F(BuildTest, SwallowFailuresLimit) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule fail\n" " command = fail\n" "build out1: fail\n" "build out2: fail\n" "build out3: fail\n" "build final: cat out1 out2 out3\n")); // Swallow ten failures; we should stop before building final. config_.failures_allowed = 11; string err; EXPECT_TRUE(builder_.AddTarget("final", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitFailure); ASSERT_EQ(3u, command_runner_.commands_ran_.size()); ASSERT_EQ("cannot make progress due to previous errors", err); } TEST_F(BuildTest, SwallowFailuresPool) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "pool failpool\n" " depth = 1\n" "rule fail\n" " command = fail\n" " pool = failpool\n" "build out1: fail\n" "build out2: fail\n" "build out3: fail\n" "build final: cat out1 out2 out3\n")); // Swallow ten failures; we should stop before building final. config_.failures_allowed = 11; string err; EXPECT_TRUE(builder_.AddTarget("final", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitFailure); ASSERT_EQ(3u, command_runner_.commands_ran_.size()); ASSERT_EQ("cannot make progress due to previous errors", err); } TEST_F(BuildTest, PoolEdgesReadyButNotWanted) { fs_.Create("x", ""); const char* manifest = "pool some_pool\n" " depth = 4\n" "rule touch\n" " command = touch $out\n" " pool = some_pool\n" "rule cc\n" " command = touch grit\n" "\n" "build B.d.stamp: cc | x\n" "build C.stamp: touch B.d.stamp\n" "build final.stamp: touch || C.stamp\n"; RebuildTarget("final.stamp", manifest); fs_.RemoveFile("B.d.stamp"); State save_state; RebuildTarget("final.stamp", manifest, NULL, NULL, &save_state); EXPECT_GE(save_state.LookupPool("some_pool")->current_use(), 0); } struct BuildWithLogTest : public BuildTest { BuildWithLogTest() { builder_.SetBuildLog(&build_log_); } BuildLog build_log_; }; TEST_F(BuildWithLogTest, ImplicitGeneratedOutOfDate) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule touch\n" " command = touch $out\n" " generator = 1\n" "build out.imp: touch | in\n")); fs_.Create("out.imp", ""); fs_.Tick(); fs_.Create("in", ""); string err; EXPECT_TRUE(builder_.AddTarget("out.imp", &err)); EXPECT_FALSE(builder_.AlreadyUpToDate()); EXPECT_TRUE(GetNode("out.imp")->dirty()); } TEST_F(BuildWithLogTest, ImplicitGeneratedOutOfDate2) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule touch-implicit-dep-out\n" " command = sleep 1 ; touch $test_dependency ; sleep 1 ; touch $out\n" " generator = 1\n" "build out.imp: touch-implicit-dep-out | inimp inimp2\n" " test_dependency = inimp\n")); fs_.Create("inimp", ""); fs_.Create("out.imp", ""); fs_.Tick(); fs_.Create("inimp2", ""); fs_.Tick(); string err; EXPECT_TRUE(builder_.AddTarget("out.imp", &err)); EXPECT_FALSE(builder_.AlreadyUpToDate()); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_TRUE(builder_.AlreadyUpToDate()); command_runner_.commands_ran_.clear(); state_.Reset(); builder_.Cleanup(); builder_.plan_.Reset(); EXPECT_TRUE(builder_.AddTarget("out.imp", &err)); EXPECT_TRUE(builder_.AlreadyUpToDate()); EXPECT_FALSE(GetNode("out.imp")->dirty()); command_runner_.commands_ran_.clear(); state_.Reset(); builder_.Cleanup(); builder_.plan_.Reset(); fs_.Tick(); fs_.Create("inimp", ""); EXPECT_TRUE(builder_.AddTarget("out.imp", &err)); EXPECT_FALSE(builder_.AlreadyUpToDate()); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_TRUE(builder_.AlreadyUpToDate()); command_runner_.commands_ran_.clear(); state_.Reset(); builder_.Cleanup(); builder_.plan_.Reset(); EXPECT_TRUE(builder_.AddTarget("out.imp", &err)); EXPECT_TRUE(builder_.AlreadyUpToDate()); EXPECT_FALSE(GetNode("out.imp")->dirty()); } TEST_F(BuildWithLogTest, NotInLogButOnDisk) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule cc\n" " command = cc\n" "build out1: cc in\n")); // Create input/output that would be considered up to date when // not considering the command line hash. fs_.Create("in", ""); fs_.Create("out1", ""); string err; // Because it's not in the log, it should not be up-to-date until // we build again. EXPECT_TRUE(builder_.AddTarget("out1", &err)); EXPECT_FALSE(builder_.AlreadyUpToDate()); command_runner_.commands_ran_.clear(); state_.Reset(); EXPECT_TRUE(builder_.AddTarget("out1", &err)); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_TRUE(builder_.AlreadyUpToDate()); } TEST_F(BuildWithLogTest, RebuildAfterFailure) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule touch-fail-tick2\n" " command = touch-fail-tick2\n" "build out1: touch-fail-tick2 in\n")); string err; fs_.Create("in", ""); // Run once successfully to get out1 in the log EXPECT_TRUE(builder_.AddTarget("out1", &err)); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_EQ("", err); EXPECT_EQ(1u, command_runner_.commands_ran_.size()); command_runner_.commands_ran_.clear(); state_.Reset(); builder_.Cleanup(); builder_.plan_.Reset(); fs_.Tick(); fs_.Create("in", ""); // Run again with a failure that updates the output file timestamp EXPECT_TRUE(builder_.AddTarget("out1", &err)); EXPECT_EQ(builder_.Build(&err), ExitFailure); EXPECT_EQ("subcommand failed", err); EXPECT_EQ(1u, command_runner_.commands_ran_.size()); command_runner_.commands_ran_.clear(); state_.Reset(); builder_.Cleanup(); builder_.plan_.Reset(); fs_.Tick(); // Run again, should rerun even though the output file is up to date on disk EXPECT_TRUE(builder_.AddTarget("out1", &err)); EXPECT_FALSE(builder_.AlreadyUpToDate()); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_EQ(1u, command_runner_.commands_ran_.size()); EXPECT_EQ("", err); } TEST_F(BuildWithLogTest, RebuildWithNoInputs) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule touch\n" " command = touch\n" "build out1: touch\n" "build out2: touch in\n")); string err; fs_.Create("in", ""); EXPECT_TRUE(builder_.AddTarget("out1", &err)); EXPECT_TRUE(builder_.AddTarget("out2", &err)); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_EQ("", err); EXPECT_EQ(2u, command_runner_.commands_ran_.size()); command_runner_.commands_ran_.clear(); state_.Reset(); fs_.Tick(); fs_.Create("in", ""); EXPECT_TRUE(builder_.AddTarget("out1", &err)); EXPECT_TRUE(builder_.AddTarget("out2", &err)); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_EQ("", err); EXPECT_EQ(1u, command_runner_.commands_ran_.size()); } TEST_F(BuildWithLogTest, RestatTest) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule true\n" " command = true\n" " restat = 1\n" "rule cc\n" " command = cc\n" " restat = 1\n" "build out1: cc in\n" "build out2: true out1\n" "build out3: cat out2\n")); fs_.Create("out1", ""); fs_.Create("out2", ""); fs_.Create("out3", ""); fs_.Tick(); fs_.Create("in", ""); // Do a pre-build so that there's commands in the log for the outputs, // otherwise, the lack of an entry in the build log will cause out3 to rebuild // regardless of restat. string err; EXPECT_TRUE(builder_.AddTarget("out3", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); ASSERT_EQ("", err); EXPECT_EQ(size_t(3), command_runner_.commands_ran_.size()); EXPECT_EQ(3, builder_.plan_.command_edge_count()); command_runner_.commands_ran_.clear(); state_.Reset(); fs_.Tick(); fs_.Create("in", ""); // "cc" touches out1, so we should build out2. But because "true" does not // touch out2, we should cancel the build of out3. EXPECT_TRUE(builder_.AddTarget("out3", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); ASSERT_EQ(2u, command_runner_.commands_ran_.size()); // If we run again, it should be a no-op, because the build log has recorded // that we've already built out2 with an input timestamp of 2 (from out1). command_runner_.commands_ran_.clear(); state_.Reset(); EXPECT_TRUE(builder_.AddTarget("out3", &err)); ASSERT_EQ("", err); EXPECT_TRUE(builder_.AlreadyUpToDate()); fs_.Tick(); fs_.Create("in", ""); // The build log entry should not, however, prevent us from rebuilding out2 // if out1 changes. command_runner_.commands_ran_.clear(); state_.Reset(); EXPECT_TRUE(builder_.AddTarget("out3", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); ASSERT_EQ(2u, command_runner_.commands_ran_.size()); } TEST_F(BuildWithLogTest, RestatMissingFile) { // If a restat rule doesn't create its output, and the output didn't // exist before the rule was run, consider that behavior equivalent // to a rule that doesn't modify its existent output file. ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule true\n" " command = true\n" " restat = 1\n" "rule cc\n" " command = cc\n" "build out1: true in\n" "build out2: cc out1\n")); fs_.Create("in", ""); fs_.Create("out2", ""); // Do a pre-build so that there's commands in the log for the outputs, // otherwise, the lack of an entry in the build log will cause out2 to rebuild // regardless of restat. string err; EXPECT_TRUE(builder_.AddTarget("out2", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); ASSERT_EQ("", err); command_runner_.commands_ran_.clear(); state_.Reset(); fs_.Tick(); fs_.Create("in", ""); fs_.Create("out2", ""); // Run a build, expect only the first command to run. // It doesn't touch its output (due to being the "true" command), so // we shouldn't run the dependent build. EXPECT_TRUE(builder_.AddTarget("out2", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); ASSERT_EQ(1u, command_runner_.commands_ran_.size()); } TEST_F(BuildWithLogTest, RestatSingleDependentOutputDirty) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule true\n" " command = true\n" " restat = 1\n" "rule touch\n" " command = touch\n" "build out1: true in\n" "build out2 out3: touch out1\n" "build out4: touch out2\n" )); // Create the necessary files fs_.Create("in", ""); string err; EXPECT_TRUE(builder_.AddTarget("out4", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); ASSERT_EQ("", err); ASSERT_EQ(3u, command_runner_.commands_ran_.size()); fs_.Tick(); fs_.Create("in", ""); fs_.RemoveFile("out3"); // Since "in" is missing, out1 will be built. Since "out3" is missing, // out2 and out3 will be built even though "in" is not touched when built. // Then, since out2 is rebuilt, out4 should be rebuilt -- the restat on the // "true" rule should not lead to the "touch" edge writing out2 and out3 being // cleared. command_runner_.commands_ran_.clear(); state_.Reset(); EXPECT_TRUE(builder_.AddTarget("out4", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); ASSERT_EQ("", err); ASSERT_EQ(3u, command_runner_.commands_ran_.size()); } // Test scenario, in which an input file is removed, but output isn't changed // https://github.com/ninja-build/ninja/issues/295 TEST_F(BuildWithLogTest, RestatMissingInput) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule true\n" " command = true\n" " depfile = $out.d\n" " restat = 1\n" "rule cc\n" " command = cc\n" "build out1: true in\n" "build out2: cc out1\n")); // Create all necessary files fs_.Create("in", ""); // The implicit dependencies and the depfile itself // are newer than the output TimeStamp restat_mtime = fs_.Tick(); fs_.Create("out1.d", "out1: will.be.deleted restat.file\n"); fs_.Create("will.be.deleted", ""); fs_.Create("restat.file", ""); // Run the build, out1 and out2 get built string err; EXPECT_TRUE(builder_.AddTarget("out2", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); ASSERT_EQ(2u, command_runner_.commands_ran_.size()); // See that an entry in the logfile is created, capturing // the right mtime BuildLog::LogEntry* log_entry = build_log_.LookupByOutput("out1"); ASSERT_TRUE(NULL != log_entry); ASSERT_EQ(restat_mtime, log_entry->mtime); // Now remove a file, referenced from depfile, so that target becomes // dirty, but the output does not change fs_.RemoveFile("will.be.deleted"); // Trigger the build again - only out1 gets built command_runner_.commands_ran_.clear(); state_.Reset(); EXPECT_TRUE(builder_.AddTarget("out2", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); ASSERT_EQ(1u, command_runner_.commands_ran_.size()); // Check that the logfile entry remains correctly set log_entry = build_log_.LookupByOutput("out1"); ASSERT_TRUE(NULL != log_entry); ASSERT_EQ(restat_mtime, log_entry->mtime); } TEST_F(BuildWithLogTest, RestatInputChangesDueToRule) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule generate-depfile\n" " command = sleep 1 ; touch $touch_dependency; touch $out ; echo \"$out: $test_dependency\" > $depfile\n" "build out1: generate-depfile || cat1\n" " test_dependency = in2\n" " touch_dependency = 1\n" " restat = 1\n" " depfile = out.d\n")); // Perform the first build. out1 is a restat rule, so its recorded mtime in the build // log should be the time the command completes, not the time the command started. One // of out1's discovered dependencies will have a newer mtime than when out1 started // running, due to its command touching the dependency itself. string err; EXPECT_TRUE(builder_.AddTarget("out1", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); ASSERT_EQ("", err); EXPECT_EQ(size_t(2), command_runner_.commands_ran_.size()); EXPECT_EQ(2, builder_.plan_.command_edge_count()); BuildLog::LogEntry* log_entry = build_log_.LookupByOutput("out1"); ASSERT_TRUE(NULL != log_entry); ASSERT_EQ(2u, log_entry->mtime); command_runner_.commands_ran_.clear(); state_.Reset(); builder_.Cleanup(); builder_.plan_.Reset(); fs_.Tick(); fs_.Create("in1", ""); // Touching a dependency of an order-only dependency of out1 should not cause out1 to // rebuild. If out1 were not a restat rule, then it would rebuild here because its // recorded mtime would have been an earlier mtime than its most recent input's (in2) // mtime EXPECT_TRUE(builder_.AddTarget("out1", &err)); ASSERT_EQ("", err); EXPECT_TRUE(!state_.GetNode("out1", 0)->dirty()); EXPECT_EQ(builder_.Build(&err), ExitSuccess); ASSERT_EQ("", err); EXPECT_EQ(size_t(1), command_runner_.commands_ran_.size()); EXPECT_EQ(1, builder_.plan_.command_edge_count()); } TEST_F(BuildWithLogTest, GeneratedPlainDepfileMtime) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule generate-depfile\n" " command = touch $out ; echo \"$out: $test_dependency\" > $depfile\n" "build out: generate-depfile\n" " test_dependency = inimp\n" " depfile = out.d\n")); fs_.Create("inimp", ""); fs_.Tick(); string err; EXPECT_TRUE(builder_.AddTarget("out", &err)); EXPECT_FALSE(builder_.AlreadyUpToDate()); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_TRUE(builder_.AlreadyUpToDate()); command_runner_.commands_ran_.clear(); state_.Reset(); builder_.Cleanup(); builder_.plan_.Reset(); EXPECT_TRUE(builder_.AddTarget("out", &err)); EXPECT_TRUE(builder_.AlreadyUpToDate()); } struct BuildDryRun : public BuildWithLogTest { BuildDryRun() { config_.dry_run = true; } }; TEST_F(BuildDryRun, AllCommandsShown) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule true\n" " command = true\n" " restat = 1\n" "rule cc\n" " command = cc\n" " restat = 1\n" "build out1: cc in\n" "build out2: true out1\n" "build out3: cat out2\n")); fs_.Create("out1", ""); fs_.Create("out2", ""); fs_.Create("out3", ""); fs_.Tick(); fs_.Create("in", ""); // "cc" touches out1, so we should build out2. But because "true" does not // touch out2, we should cancel the build of out3. string err; EXPECT_TRUE(builder_.AddTarget("out3", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); ASSERT_EQ(3u, command_runner_.commands_ran_.size()); } TEST_F(BuildDryRun, WithDyndep) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule touch\n" " command = touch $out\n" "rule cp\n" " command = cp $in $out\n" "build dd: cp dd-in\n" "build out: touch || dd\n" " dyndep = dd\n" "build out-copy: cp out\n" )); fs_.Create("dd-in", "ninja_dyndep_version = 1\n" "build out: dyndep\n" ); string err; EXPECT_TRUE(builder_.AddTarget("out-copy", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); ASSERT_EQ(3u, command_runner_.commands_ran_.size()); } // Test that RSP files are created when & where appropriate and deleted after // successful execution. TEST_F(BuildTest, RspFileSuccess) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule cat_rsp\n" " command = cat $rspfile > $out\n" " rspfile = $rspfile\n" " rspfile_content = $long_command\n" "rule cat_rsp_out\n" " command = cat $rspfile > $out\n" " rspfile = $out.rsp\n" " rspfile_content = $long_command\n" "build out1: cat in\n" "build out2: cat_rsp in\n" " rspfile = out 2.rsp\n" " long_command = Some very long command\n" "build out$ 3: cat_rsp_out in\n" " long_command = Some very long command\n")); fs_.Create("out1", ""); fs_.Create("out2", ""); fs_.Create("out 3", ""); fs_.Tick(); fs_.Create("in", ""); string err; EXPECT_TRUE(builder_.AddTarget("out1", &err)); ASSERT_EQ("", err); EXPECT_TRUE(builder_.AddTarget("out2", &err)); ASSERT_EQ("", err); EXPECT_TRUE(builder_.AddTarget("out 3", &err)); ASSERT_EQ("", err); size_t files_created = fs_.files_created_.size(); size_t files_removed = fs_.files_removed_.size(); EXPECT_EQ(builder_.Build(&err), ExitSuccess); ASSERT_EQ(3u, command_runner_.commands_ran_.size()); // The RSP files and temp file to acquire output mtimes were created ASSERT_EQ(files_created + 3, fs_.files_created_.size()); ASSERT_EQ(1u, fs_.files_created_.count("out 2.rsp")); ASSERT_EQ(1u, fs_.files_created_.count("out 3.rsp")); ASSERT_EQ(1u, fs_.files_created_.count(".ninja_lock")); // The RSP files were removed ASSERT_EQ(files_removed + 2, fs_.files_removed_.size()); ASSERT_EQ(1u, fs_.files_removed_.count("out 2.rsp")); ASSERT_EQ(1u, fs_.files_removed_.count("out 3.rsp")); } // Test that RSP file is created but not removed for commands, which fail TEST_F(BuildTest, RspFileFailure) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule fail\n" " command = fail\n" " rspfile = $rspfile\n" " rspfile_content = $long_command\n" "build out: fail in\n" " rspfile = out.rsp\n" " long_command = Another very long command\n")); fs_.Create("out", ""); fs_.Tick(); fs_.Create("in", ""); string err; EXPECT_TRUE(builder_.AddTarget("out", &err)); ASSERT_EQ("", err); size_t files_created = fs_.files_created_.size(); size_t files_removed = fs_.files_removed_.size(); EXPECT_EQ(builder_.Build(&err), ExitFailure); ASSERT_EQ("subcommand failed", err); ASSERT_EQ(1u, command_runner_.commands_ran_.size()); // The RSP file and temp file to acquire output mtimes were created ASSERT_EQ(files_created + 2, fs_.files_created_.size()); ASSERT_EQ(1u, fs_.files_created_.count("out.rsp")); ASSERT_EQ(1u, fs_.files_created_.count(".ninja_lock")); // The RSP file was NOT removed ASSERT_EQ(files_removed, fs_.files_removed_.size()); ASSERT_EQ(0u, fs_.files_removed_.count("out.rsp")); // The RSP file contains what it should ASSERT_EQ("Another very long command", fs_.files_["out.rsp"].contents); } // Test that contents of the RSP file behaves like a regular part of // command line, i.e. triggers a rebuild if changed TEST_F(BuildWithLogTest, RspFileCmdLineChange) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule cat_rsp\n" " command = cat $rspfile > $out\n" " rspfile = $rspfile\n" " rspfile_content = $long_command\n" "build out: cat_rsp in\n" " rspfile = out.rsp\n" " long_command = Original very long command\n")); fs_.Create("out", ""); fs_.Tick(); fs_.Create("in", ""); string err; EXPECT_TRUE(builder_.AddTarget("out", &err)); ASSERT_EQ("", err); // 1. Build for the 1st time (-> populate log) EXPECT_EQ(builder_.Build(&err), ExitSuccess); ASSERT_EQ(1u, command_runner_.commands_ran_.size()); // 2. Build again (no change) command_runner_.commands_ran_.clear(); state_.Reset(); EXPECT_TRUE(builder_.AddTarget("out", &err)); EXPECT_EQ("", err); ASSERT_TRUE(builder_.AlreadyUpToDate()); // 3. Alter the entry in the logfile // (to simulate a change in the command line between 2 builds) BuildLog::LogEntry* log_entry = build_log_.LookupByOutput("out"); ASSERT_TRUE(NULL != log_entry); ASSERT_NO_FATAL_FAILURE(AssertHash( "cat out.rsp > out;rspfile=Original very long command", log_entry->command_hash)); log_entry->command_hash++; // Change the command hash to something else. // Now expect the target to be rebuilt command_runner_.commands_ran_.clear(); state_.Reset(); EXPECT_TRUE(builder_.AddTarget("out", &err)); EXPECT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_EQ(1u, command_runner_.commands_ran_.size()); } TEST_F(BuildTest, InterruptCleanup) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule interrupt\n" " command = interrupt\n" "rule touch-interrupt\n" " command = touch-interrupt\n" "build out1: interrupt in1\n" "build out2: touch-interrupt in2\n")); fs_.Create("out1", ""); fs_.Create("out2", ""); fs_.Tick(); fs_.Create("in1", ""); fs_.Create("in2", ""); // An untouched output of an interrupted command should be retained. string err; EXPECT_TRUE(builder_.AddTarget("out1", &err)); EXPECT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitInterrupted); EXPECT_EQ("interrupted by user", err); builder_.Cleanup(); EXPECT_GT(fs_.Stat("out1", &err), 0); err = ""; // A touched output of an interrupted command should be deleted. EXPECT_TRUE(builder_.AddTarget("out2", &err)); EXPECT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitInterrupted); EXPECT_EQ("interrupted by user", err); builder_.Cleanup(); EXPECT_EQ(0, fs_.Stat("out2", &err)); } TEST_F(BuildTest, StatFailureAbortsBuild) { const string kTooLongToStat(400, 'i'); ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, ("build " + kTooLongToStat + ": cat in\n").c_str())); fs_.Create("in", ""); // This simulates a stat failure: fs_.files_[kTooLongToStat].mtime = -1; fs_.files_[kTooLongToStat].stat_error = "stat failed"; string err; EXPECT_FALSE(builder_.AddTarget(kTooLongToStat, &err)); EXPECT_EQ("stat failed", err); } TEST_F(BuildTest, PhonyWithNoInputs) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build nonexistent: phony\n" "build out1: cat || nonexistent\n" "build out2: cat nonexistent\n")); fs_.Create("out1", ""); fs_.Create("out2", ""); // out1 should be up to date even though its input is dirty, because its // order-only dependency has nothing to do. string err; EXPECT_TRUE(builder_.AddTarget("out1", &err)); ASSERT_EQ("", err); EXPECT_TRUE(builder_.AlreadyUpToDate()); // out2 should still be out of date though, because its input is dirty. err.clear(); command_runner_.commands_ran_.clear(); state_.Reset(); EXPECT_TRUE(builder_.AddTarget("out2", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_EQ("", err); ASSERT_EQ(1u, command_runner_.commands_ran_.size()); } TEST_F(BuildTest, DepsGccWithEmptyDepfileErrorsOut) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule cc\n" " command = cc\n" " deps = gcc\n" "build out: cc\n")); Dirty("out"); string err; EXPECT_TRUE(builder_.AddTarget("out", &err)); ASSERT_EQ("", err); EXPECT_FALSE(builder_.AlreadyUpToDate()); EXPECT_EQ(builder_.Build(&err), ExitFailure); ASSERT_EQ("subcommand failed", err); ASSERT_EQ(1u, command_runner_.commands_ran_.size()); } TEST_F(BuildTest, StatusFormatElapsed_e) { status_.BuildStarted(); // Before any task is done, the elapsed time must be zero. EXPECT_EQ("[%/e0.000]", status_.FormatProgressStatus("[%%/e%e]", 0)); } TEST_F(BuildTest, StatusFormatElapsed_w) { status_.BuildStarted(); // Before any task is done, the elapsed time must be zero. EXPECT_EQ("[%/e00:00]", status_.FormatProgressStatus("[%%/e%w]", 0)); } TEST_F(BuildTest, StatusFormatETA) { status_.BuildStarted(); // Before any task is done, the ETA time must be unknown. EXPECT_EQ("[%/E?]", status_.FormatProgressStatus("[%%/E%E]", 0)); } TEST_F(BuildTest, StatusFormatTimeProgress) { status_.BuildStarted(); // Before any task is done, the percentage of elapsed time must be zero. EXPECT_EQ("[%/p 0%]", status_.FormatProgressStatus("[%%/p%p]", 0)); } TEST_F(BuildTest, StatusFormatReplacePlaceholder) { EXPECT_EQ("[%/s0/t0/r0/u0/f0]", status_.FormatProgressStatus("[%%/s%s/t%t/r%r/u%u/f%f]", 0)); } TEST_F(BuildTest, FailedDepsParse) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build bad_deps.o: cat in1\n" " deps = gcc\n" " depfile = in1.d\n")); string err; EXPECT_TRUE(builder_.AddTarget("bad_deps.o", &err)); ASSERT_EQ("", err); // These deps will fail to parse, as they should only have one // path to the left of the colon. fs_.Create("in1.d", "AAA BBB"); EXPECT_EQ(builder_.Build(&err), ExitFailure); EXPECT_EQ("subcommand failed", err); } struct BuildWithQueryDepsLogTest : public BuildTest { BuildWithQueryDepsLogTest() : BuildTest(&log_), deps_log_file_("ninja_deps") {} ~BuildWithQueryDepsLogTest() { log_.Close(); } virtual void SetUp() { BuildTest::SetUp(); temp_dir_.CreateAndEnter("BuildWithQueryDepsLogTest"); std::string err; ASSERT_TRUE(log_.OpenForWrite(deps_log_file_.path(), &err)); ASSERT_EQ("", err); } ScopedTempDir temp_dir_; ScopedFilePath deps_log_file_; DepsLog log_; }; /// Test a MSVC-style deps log with multiple outputs. TEST_F(BuildWithQueryDepsLogTest, TwoOutputsDepFileMSVC) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule cp_multi_msvc\n" " command = echo 'using $in' && for file in $out; do cp $in $$file; done\n" " deps = msvc\n" " msvc_deps_prefix = using \n" "build out1 out2: cp_multi_msvc in1\n")); std::string err; EXPECT_TRUE(builder_.AddTarget("out1", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_EQ("", err); ASSERT_EQ(1u, command_runner_.commands_ran_.size()); EXPECT_EQ("echo 'using in1' && for file in out1 out2; do cp in1 $file; done", command_runner_.commands_ran_[0]); Node* out1_node = state_.LookupNode("out1"); DepsLog::Deps* out1_deps = log_.GetDeps(out1_node); EXPECT_EQ(1, out1_deps->node_count); EXPECT_EQ("in1", out1_deps->nodes[0]->path()); Node* out2_node = state_.LookupNode("out2"); DepsLog::Deps* out2_deps = log_.GetDeps(out2_node); EXPECT_EQ(1, out2_deps->node_count); EXPECT_EQ("in1", out2_deps->nodes[0]->path()); } /// Test a GCC-style deps log with multiple outputs. TEST_F(BuildWithQueryDepsLogTest, TwoOutputsDepFileGCCOneLine) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule cp_multi_gcc\n" " command = echo '$out: $in' > in.d && for file in $out; do cp in1 $$file; done\n" " deps = gcc\n" " depfile = in.d\n" "build out1 out2: cp_multi_gcc in1 in2\n")); std::string err; EXPECT_TRUE(builder_.AddTarget("out1", &err)); ASSERT_EQ("", err); fs_.Create("in.d", "out1 out2: in1 in2"); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_EQ("", err); ASSERT_EQ(1u, command_runner_.commands_ran_.size()); EXPECT_EQ("echo 'out1 out2: in1 in2' > in.d && for file in out1 out2; do cp in1 $file; done", command_runner_.commands_ran_[0]); Node* out1_node = state_.LookupNode("out1"); DepsLog::Deps* out1_deps = log_.GetDeps(out1_node); EXPECT_EQ(2, out1_deps->node_count); EXPECT_EQ("in1", out1_deps->nodes[0]->path()); EXPECT_EQ("in2", out1_deps->nodes[1]->path()); Node* out2_node = state_.LookupNode("out2"); DepsLog::Deps* out2_deps = log_.GetDeps(out2_node); EXPECT_EQ(2, out2_deps->node_count); EXPECT_EQ("in1", out2_deps->nodes[0]->path()); EXPECT_EQ("in2", out2_deps->nodes[1]->path()); } /// Test a GCC-style deps log with multiple outputs using a line per input. TEST_F(BuildWithQueryDepsLogTest, TwoOutputsDepFileGCCMultiLineInput) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule cp_multi_gcc\n" " command = echo '$out: in1\\n$out: in2' > in.d && for file in $out; do cp in1 $$file; done\n" " deps = gcc\n" " depfile = in.d\n" "build out1 out2: cp_multi_gcc in1 in2\n")); std::string err; EXPECT_TRUE(builder_.AddTarget("out1", &err)); ASSERT_EQ("", err); fs_.Create("in.d", "out1 out2: in1\nout1 out2: in2"); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_EQ("", err); ASSERT_EQ(1u, command_runner_.commands_ran_.size()); EXPECT_EQ("echo 'out1 out2: in1\\nout1 out2: in2' > in.d && for file in out1 out2; do cp in1 $file; done", command_runner_.commands_ran_[0]); Node* out1_node = state_.LookupNode("out1"); DepsLog::Deps* out1_deps = log_.GetDeps(out1_node); EXPECT_EQ(2, out1_deps->node_count); EXPECT_EQ("in1", out1_deps->nodes[0]->path()); EXPECT_EQ("in2", out1_deps->nodes[1]->path()); Node* out2_node = state_.LookupNode("out2"); DepsLog::Deps* out2_deps = log_.GetDeps(out2_node); EXPECT_EQ(2, out2_deps->node_count); EXPECT_EQ("in1", out2_deps->nodes[0]->path()); EXPECT_EQ("in2", out2_deps->nodes[1]->path()); } /// Test a GCC-style deps log with multiple outputs using a line per output. TEST_F(BuildWithQueryDepsLogTest, TwoOutputsDepFileGCCMultiLineOutput) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule cp_multi_gcc\n" " command = echo 'out1: $in\\nout2: $in' > in.d && for file in $out; do cp in1 $$file; done\n" " deps = gcc\n" " depfile = in.d\n" "build out1 out2: cp_multi_gcc in1 in2\n")); std::string err; EXPECT_TRUE(builder_.AddTarget("out1", &err)); ASSERT_EQ("", err); fs_.Create("in.d", "out1: in1 in2\nout2: in1 in2"); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_EQ("", err); ASSERT_EQ(1u, command_runner_.commands_ran_.size()); EXPECT_EQ("echo 'out1: in1 in2\\nout2: in1 in2' > in.d && for file in out1 out2; do cp in1 $file; done", command_runner_.commands_ran_[0]); Node* out1_node = state_.LookupNode("out1"); DepsLog::Deps* out1_deps = log_.GetDeps(out1_node); EXPECT_EQ(2, out1_deps->node_count); EXPECT_EQ("in1", out1_deps->nodes[0]->path()); EXPECT_EQ("in2", out1_deps->nodes[1]->path()); Node* out2_node = state_.LookupNode("out2"); DepsLog::Deps* out2_deps = log_.GetDeps(out2_node); EXPECT_EQ(2, out2_deps->node_count); EXPECT_EQ("in1", out2_deps->nodes[0]->path()); EXPECT_EQ("in2", out2_deps->nodes[1]->path()); } /// Test a GCC-style deps log with multiple outputs mentioning only the main output. TEST_F(BuildWithQueryDepsLogTest, TwoOutputsDepFileGCCOnlyMainOutput) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule cp_multi_gcc\n" " command = echo 'out1: $in' > in.d && for file in $out; do cp in1 $$file; done\n" " deps = gcc\n" " depfile = in.d\n" "build out1 out2: cp_multi_gcc in1 in2\n")); std::string err; EXPECT_TRUE(builder_.AddTarget("out1", &err)); ASSERT_EQ("", err); fs_.Create("in.d", "out1: in1 in2"); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_EQ("", err); ASSERT_EQ(1u, command_runner_.commands_ran_.size()); EXPECT_EQ("echo 'out1: in1 in2' > in.d && for file in out1 out2; do cp in1 $file; done", command_runner_.commands_ran_[0]); Node* out1_node = state_.LookupNode("out1"); DepsLog::Deps* out1_deps = log_.GetDeps(out1_node); EXPECT_EQ(2, out1_deps->node_count); EXPECT_EQ("in1", out1_deps->nodes[0]->path()); EXPECT_EQ("in2", out1_deps->nodes[1]->path()); Node* out2_node = state_.LookupNode("out2"); DepsLog::Deps* out2_deps = log_.GetDeps(out2_node); EXPECT_EQ(2, out2_deps->node_count); EXPECT_EQ("in1", out2_deps->nodes[0]->path()); EXPECT_EQ("in2", out2_deps->nodes[1]->path()); } /// Test a GCC-style deps log with multiple outputs mentioning only the secondary output. TEST_F(BuildWithQueryDepsLogTest, TwoOutputsDepFileGCCOnlySecondaryOutput) { // Note: This ends up short-circuiting the node creation due to the primary // output not being present, but it should still work. ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule cp_multi_gcc\n" " command = echo 'out2: $in' > in.d && for file in $out; do cp in1 $$file; done\n" " deps = gcc\n" " depfile = in.d\n" "build out1 out2: cp_multi_gcc in1 in2\n")); std::string err; EXPECT_TRUE(builder_.AddTarget("out1", &err)); ASSERT_EQ("", err); fs_.Create("in.d", "out2: in1 in2"); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_EQ("", err); ASSERT_EQ(1u, command_runner_.commands_ran_.size()); EXPECT_EQ("echo 'out2: in1 in2' > in.d && for file in out1 out2; do cp in1 $file; done", command_runner_.commands_ran_[0]); Node* out1_node = state_.LookupNode("out1"); DepsLog::Deps* out1_deps = log_.GetDeps(out1_node); EXPECT_EQ(2, out1_deps->node_count); EXPECT_EQ("in1", out1_deps->nodes[0]->path()); EXPECT_EQ("in2", out1_deps->nodes[1]->path()); Node* out2_node = state_.LookupNode("out2"); DepsLog::Deps* out2_deps = log_.GetDeps(out2_node); EXPECT_EQ(2, out2_deps->node_count); EXPECT_EQ("in1", out2_deps->nodes[0]->path()); EXPECT_EQ("in2", out2_deps->nodes[1]->path()); } /// Tests of builds involving deps logs necessarily must span /// multiple builds. We reuse methods on BuildTest but not the /// builder_ it sets up, because we want pristine objects for /// each build. struct BuildWithDepsLogTest : public BuildTest { BuildWithDepsLogTest() : build_log_file_("build_log"), deps_log_file_("ninja_deps") {} virtual void SetUp() { BuildTest::SetUp(); temp_dir_.CreateAndEnter("BuildWithDepsLogTest"); } virtual void TearDown() { temp_dir_.Cleanup(); } ScopedTempDir temp_dir_; ScopedFilePath build_log_file_; ScopedFilePath deps_log_file_; /// Shadow parent class builder_ so we don't accidentally use it. void* builder_; }; /// Run a straightforward build where the deps log is used. TEST_F(BuildWithDepsLogTest, Straightforward) { string err; // Note: in1 was created by the superclass SetUp(). const char* manifest = "build out: cat in1\n" " deps = gcc\n" " depfile = in1.d\n"; { State state; ASSERT_NO_FATAL_FAILURE(AddCatRule(&state)); ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest)); // Run the build once, everything should be ok. DepsLog deps_log; ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err)); ASSERT_EQ("", err); Builder builder(&state, config_, NULL, &deps_log, &fs_, &status_, 0); builder.command_runner_.reset(&command_runner_); EXPECT_TRUE(builder.AddTarget("out", &err)); ASSERT_EQ("", err); fs_.Create("in1.d", "out: in2"); EXPECT_EQ(builder.Build(&err), ExitSuccess); EXPECT_EQ("", err); // The deps file should have been removed. EXPECT_EQ(0, fs_.Stat("in1.d", &err)); // Recreate it for the next step. fs_.Create("in1.d", "out: in2"); deps_log.Close(); builder.command_runner_.release(); } { State state; ASSERT_NO_FATAL_FAILURE(AddCatRule(&state)); ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest)); // Touch the file only mentioned in the deps. fs_.Tick(); fs_.Create("in2", ""); // Run the build again. DepsLog deps_log; ASSERT_TRUE(deps_log.Load(deps_log_file_.path(), &state, &err)); ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err)); Builder builder(&state, config_, NULL, &deps_log, &fs_, &status_, 0); builder.command_runner_.reset(&command_runner_); command_runner_.commands_ran_.clear(); EXPECT_TRUE(builder.AddTarget("out", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder.Build(&err), ExitSuccess); EXPECT_EQ("", err); // We should have rebuilt the output due to in2 being // out of date. EXPECT_EQ(1u, command_runner_.commands_ran_.size()); builder.command_runner_.release(); } } /// Verify that obsolete dependency info causes a rebuild. /// 1) Run a successful build where everything has time t, record deps. /// 2) Move input/output to time t+1 -- despite files in alignment, /// should still need to rebuild due to deps at older time. TEST_F(BuildWithDepsLogTest, ObsoleteDeps) { string err; // Note: in1 was created by the superclass SetUp(). const char* manifest = "build out: cat in1\n" " deps = gcc\n" " depfile = in1.d\n"; { // Run an ordinary build that gathers dependencies. fs_.Create("in1", ""); fs_.Create("in1.d", "out: "); State state; ASSERT_NO_FATAL_FAILURE(AddCatRule(&state)); ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest)); // Run the build once, everything should be ok. DepsLog deps_log; ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err)); ASSERT_EQ("", err); Builder builder(&state, config_, NULL, &deps_log, &fs_, &status_, 0); builder.command_runner_.reset(&command_runner_); EXPECT_TRUE(builder.AddTarget("out", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder.Build(&err), ExitSuccess); EXPECT_EQ("", err); deps_log.Close(); builder.command_runner_.release(); } // Push all files one tick forward so that only the deps are out // of date. fs_.Tick(); fs_.Create("in1", ""); fs_.Create("out", ""); // The deps file should have been removed, so no need to timestamp it. EXPECT_EQ(0, fs_.Stat("in1.d", &err)); { State state; ASSERT_NO_FATAL_FAILURE(AddCatRule(&state)); ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest)); DepsLog deps_log; ASSERT_TRUE(deps_log.Load(deps_log_file_.path(), &state, &err)); ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err)); Builder builder(&state, config_, NULL, &deps_log, &fs_, &status_, 0); builder.command_runner_.reset(&command_runner_); command_runner_.commands_ran_.clear(); EXPECT_TRUE(builder.AddTarget("out", &err)); ASSERT_EQ("", err); // Recreate the deps file here because the build expects them to exist. fs_.Create("in1.d", "out: "); EXPECT_EQ(builder.Build(&err), ExitSuccess); EXPECT_EQ("", err); // We should have rebuilt the output due to the deps being // out of date. EXPECT_EQ(1u, command_runner_.commands_ran_.size()); builder.command_runner_.release(); } } TEST_F(BuildWithDepsLogTest, DepsIgnoredInDryRun) { const char* manifest = "build out: cat in1\n" " deps = gcc\n" " depfile = in1.d\n"; fs_.Create("out", ""); fs_.Tick(); fs_.Create("in1", ""); State state; ASSERT_NO_FATAL_FAILURE(AddCatRule(&state)); ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest)); // The deps log is NULL in dry runs. config_.dry_run = true; Builder builder(&state, config_, NULL, NULL, &fs_, &status_, 0); builder.command_runner_.reset(&command_runner_); command_runner_.commands_ran_.clear(); string err; EXPECT_TRUE(builder.AddTarget("out", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder.Build(&err), ExitSuccess); ASSERT_EQ(1u, command_runner_.commands_ran_.size()); builder.command_runner_.release(); } TEST_F(BuildWithDepsLogTest, TestInputMtimeRaceCondition) { string err; const char* manifest = "rule long-cc\n" " command = long-cc\n" "build out: long-cc in1\n" " test_dependency = in1\n"; State state; ASSERT_NO_FATAL_FAILURE(AddCatRule(&state)); ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest)); BuildLog build_log; ASSERT_TRUE(build_log.Load(build_log_file_.path(), &err)); ASSERT_TRUE(build_log.OpenForWrite(build_log_file_.path(), *this, &err)); DepsLog deps_log; ASSERT_TRUE(deps_log.Load(deps_log_file_.path(), &state, &err)); ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err)); BuildLog::LogEntry* log_entry = NULL; { Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0); builder.command_runner_.reset(&command_runner_); command_runner_.commands_ran_.clear(); // Run the build, out gets built, dep file is created EXPECT_TRUE(builder.AddTarget("out", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder.Build(&err), ExitSuccess); ASSERT_EQ(1u, command_runner_.commands_ran_.size()); // See that an entry in the logfile is created. the input_mtime is 1 since that was // the mtime of in1 when the command was started log_entry = build_log.LookupByOutput("out"); ASSERT_TRUE(NULL != log_entry); ASSERT_EQ(1u, log_entry->mtime); builder.command_runner_.release(); } { Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0); builder.command_runner_.reset(&command_runner_); command_runner_.commands_ran_.clear(); // Trigger the build again - "out" should rebuild despite having a newer mtime than // "in1", since "in1" was touched during the build of out (simulated by changing its // mtime in the the test builder's WaitForCommand() which runs before FinishCommand() command_runner_.commands_ran_.clear(); state.Reset(); EXPECT_TRUE(builder.AddTarget("out", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder.Build(&err), ExitSuccess); ASSERT_EQ(1u, command_runner_.commands_ran_.size()); // Check that the logfile entry is still correct log_entry = build_log.LookupByOutput("out"); ASSERT_TRUE(NULL != log_entry); ASSERT_TRUE(fs_.files_["in1"].mtime < log_entry->mtime); builder.command_runner_.release(); } { Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0); builder.command_runner_.reset(&command_runner_); command_runner_.commands_ran_.clear(); // And a subsequent run should not have any work to do command_runner_.commands_ran_.clear(); state.Reset(); EXPECT_TRUE(builder.AddTarget("out", &err)); ASSERT_EQ("", err); EXPECT_TRUE(builder.AlreadyUpToDate()); builder.command_runner_.release(); } } TEST_F(BuildWithDepsLogTest, TestInputMtimeRaceConditionWithDepFile) { string err; const char* manifest = "rule long-cc\n" " command = long-cc\n" "build out: long-cc\n" " deps = gcc\n" " depfile = out.d\n" " test_dependency = header.h\n"; fs_.Create("header.h", ""); State state; ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest)); BuildLog build_log; ASSERT_TRUE(build_log.Load(build_log_file_.path(), &err)); ASSERT_TRUE(build_log.OpenForWrite(build_log_file_.path(), *this, &err)); DepsLog deps_log; ASSERT_TRUE(deps_log.Load(deps_log_file_.path(), &state, &err)); ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err)); { Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0); builder.command_runner_.reset(&command_runner_); // Run the build, out gets built, dep file is created EXPECT_TRUE(builder.AddTarget("out", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder.Build(&err), ExitSuccess); ASSERT_EQ(1u, command_runner_.commands_ran_.size()); // See that an entry in the logfile is created. the mtime is 1 due to the command // starting when the file system's mtime was 1. BuildLog::LogEntry* log_entry = build_log.LookupByOutput("out"); ASSERT_TRUE(NULL != log_entry); ASSERT_EQ(1u, log_entry->mtime); builder.command_runner_.release(); } { // Trigger the build again - "out" will rebuild since its newest input mtime (header.h) // is newer than the recorded mtime of out in the build log Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0); builder.command_runner_.reset(&command_runner_); command_runner_.commands_ran_.clear(); state.Reset(); EXPECT_TRUE(builder.AddTarget("out", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder.Build(&err), ExitSuccess); ASSERT_EQ(1u, command_runner_.commands_ran_.size()); builder.command_runner_.release(); } { // Trigger the build again - "out" won't rebuild since the file wasn't updated during // the previous build Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0); builder.command_runner_.reset(&command_runner_); command_runner_.commands_ran_.clear(); state.Reset(); EXPECT_TRUE(builder.AddTarget("out", &err)); ASSERT_EQ("", err); ASSERT_TRUE(builder.AlreadyUpToDate()); builder.command_runner_.release(); } // touch the header to trigger a rebuild fs_.Create("header.h", ""); ASSERT_EQ(fs_.now_, 7); { // Rebuild. This time, long-cc will cause header.h to be updated while the build is // in progress Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0); builder.command_runner_.reset(&command_runner_); command_runner_.commands_ran_.clear(); state.Reset(); EXPECT_TRUE(builder.AddTarget("out", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder.Build(&err), ExitSuccess); ASSERT_EQ(1u, command_runner_.commands_ran_.size()); builder.command_runner_.release(); } { // Rebuild. Because header.h is now in the deplog for out, it should be detectable as // a change-while-in-progress and should cause a rebuild of out. Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0); builder.command_runner_.reset(&command_runner_); command_runner_.commands_ran_.clear(); state.Reset(); EXPECT_TRUE(builder.AddTarget("out", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder.Build(&err), ExitSuccess); ASSERT_EQ(1u, command_runner_.commands_ran_.size()); builder.command_runner_.release(); } { // This time, the header.h file was not updated during the build, so the target should // not be considered dirty. Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0); builder.command_runner_.reset(&command_runner_); command_runner_.commands_ran_.clear(); state.Reset(); EXPECT_TRUE(builder.AddTarget("out", &err)); ASSERT_EQ("", err); EXPECT_TRUE(builder.AlreadyUpToDate()); builder.command_runner_.release(); } } /// Check that a restat rule generating a header cancels compilations correctly. TEST_F(BuildTest, RestatDepfileDependency) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule true\n" " command = true\n" // Would be "write if out-of-date" in reality. " restat = 1\n" "build header.h: true header.in\n" "build out: cat in1\n" " depfile = in1.d\n")); fs_.Create("header.h", ""); fs_.Create("in1.d", "out: header.h"); fs_.Tick(); fs_.Create("header.in", ""); string err; EXPECT_TRUE(builder_.AddTarget("out", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_EQ("", err); } /// Check that a restat rule generating a header cancels compilations correctly, /// depslog case. TEST_F(BuildWithDepsLogTest, RestatDepfileDependencyDepsLog) { string err; // Note: in1 was created by the superclass SetUp(). const char* manifest = "rule true\n" " command = true\n" // Would be "write if out-of-date" in reality. " restat = 1\n" "build header.h: true header.in\n" "build out: cat in1\n" " deps = gcc\n" " depfile = in1.d\n"; { State state; ASSERT_NO_FATAL_FAILURE(AddCatRule(&state)); ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest)); // Run the build once, everything should be ok. DepsLog deps_log; ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err)); ASSERT_EQ("", err); Builder builder(&state, config_, NULL, &deps_log, &fs_, &status_, 0); builder.command_runner_.reset(&command_runner_); EXPECT_TRUE(builder.AddTarget("out", &err)); ASSERT_EQ("", err); fs_.Create("in1.d", "out: header.h"); EXPECT_EQ(builder.Build(&err), ExitSuccess); EXPECT_EQ("", err); deps_log.Close(); builder.command_runner_.release(); } { State state; ASSERT_NO_FATAL_FAILURE(AddCatRule(&state)); ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest)); // Touch the input of the restat rule. fs_.Tick(); fs_.Create("header.in", ""); // Run the build again. DepsLog deps_log; ASSERT_TRUE(deps_log.Load(deps_log_file_.path(), &state, &err)); ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err)); Builder builder(&state, config_, NULL, &deps_log, &fs_, &status_, 0); builder.command_runner_.reset(&command_runner_); command_runner_.commands_ran_.clear(); EXPECT_TRUE(builder.AddTarget("out", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder.Build(&err), ExitSuccess); EXPECT_EQ("", err); // Rule "true" should have run again, but the build of "out" should have // been cancelled due to restat propagating through the depfile header. EXPECT_EQ(1u, command_runner_.commands_ran_.size()); builder.command_runner_.release(); } } TEST_F(BuildWithDepsLogTest, DepFileOKDepsLog) { string err; const char* manifest = "rule cc\n command = cc $in\n depfile = $out.d\n deps = gcc\n" "build fo$ o.o: cc foo.c\n"; fs_.Create("foo.c", ""); { State state; ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest)); // Run the build once, everything should be ok. DepsLog deps_log; ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err)); ASSERT_EQ("", err); Builder builder(&state, config_, NULL, &deps_log, &fs_, &status_, 0); builder.command_runner_.reset(&command_runner_); EXPECT_TRUE(builder.AddTarget("fo o.o", &err)); ASSERT_EQ("", err); fs_.Create("fo o.o.d", "fo\\ o.o: blah.h bar.h\n"); EXPECT_EQ(builder.Build(&err), ExitSuccess); EXPECT_EQ("", err); deps_log.Close(); builder.command_runner_.release(); } { State state; ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest)); DepsLog deps_log; ASSERT_TRUE(deps_log.Load(deps_log_file_.path(), &state, &err)); ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err)); ASSERT_EQ("", err); Builder builder(&state, config_, NULL, &deps_log, &fs_, &status_, 0); builder.command_runner_.reset(&command_runner_); Edge* edge = state.edges_.back(); state.GetNode("bar.h", 0)->MarkDirty(); // Mark bar.h as missing. EXPECT_TRUE(builder.AddTarget("fo o.o", &err)); ASSERT_EQ("", err); // Expect one new edge generating fo o.o, loading the depfile should // not generate new edges. ASSERT_EQ(1u, state.edges_.size()); // Expect our edge to now have three inputs: foo.c and two headers. ASSERT_EQ(3u, edge->inputs_.size()); // Expect the command line we generate to only use the original input. ASSERT_EQ("cc foo.c", edge->EvaluateCommand()); deps_log.Close(); builder.command_runner_.release(); } } TEST_F(BuildWithDepsLogTest, DiscoveredDepDuringBuildChanged) { string err; const char* manifest = "rule touch-out-implicit-dep\n" " command = touch $out ; sleep 1 ; touch $test_dependency\n" "rule generate-depfile\n" " command = touch $out ; echo \"$out: $test_dependency\" > $depfile\n" "build out1: touch-out-implicit-dep in1\n" " test_dependency = inimp\n" "build out2: generate-depfile in1 || out1\n" " test_dependency = inimp\n" " depfile = out2.d\n" " deps = gcc\n"; fs_.Create("in1", ""); fs_.Tick(); BuildLog build_log; { State state; ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest)); DepsLog deps_log; ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err)); ASSERT_EQ("", err); Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0); builder.command_runner_.reset(&command_runner_); EXPECT_TRUE(builder.AddTarget("out2", &err)); EXPECT_FALSE(builder.AlreadyUpToDate()); EXPECT_EQ(builder.Build(&err), ExitSuccess); EXPECT_TRUE(builder.AlreadyUpToDate()); deps_log.Close(); builder.command_runner_.release(); } fs_.Tick(); fs_.Create("in1", ""); { State state; ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest)); DepsLog deps_log; ASSERT_TRUE(deps_log.Load(deps_log_file_.path(), &state, &err)); ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err)); ASSERT_EQ("", err); Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0); builder.command_runner_.reset(&command_runner_); EXPECT_TRUE(builder.AddTarget("out2", &err)); EXPECT_FALSE(builder.AlreadyUpToDate()); EXPECT_EQ(builder.Build(&err), ExitSuccess); EXPECT_TRUE(builder.AlreadyUpToDate()); deps_log.Close(); builder.command_runner_.release(); } fs_.Tick(); { State state; ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest)); DepsLog deps_log; ASSERT_TRUE(deps_log.Load(deps_log_file_.path(), &state, &err)); ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err)); ASSERT_EQ("", err); Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0); builder.command_runner_.reset(&command_runner_); EXPECT_TRUE(builder.AddTarget("out2", &err)); EXPECT_TRUE(builder.AlreadyUpToDate()); deps_log.Close(); builder.command_runner_.release(); } } #ifdef _WIN32 TEST_F(BuildWithDepsLogTest, DepFileDepsLogCanonicalize) { string err; const char* manifest = "rule cc\n command = cc $in\n depfile = $out.d\n deps = gcc\n" "build a/b\\c\\d/e/fo$ o.o: cc x\\y/z\\foo.c\n"; fs_.Create("x/y/z/foo.c", ""); { State state; ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest)); // Run the build once, everything should be ok. DepsLog deps_log; ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err)); ASSERT_EQ("", err); Builder builder(&state, config_, NULL, &deps_log, &fs_, &status_, 0); builder.command_runner_.reset(&command_runner_); EXPECT_TRUE(builder.AddTarget("a/b/c/d/e/fo o.o", &err)); ASSERT_EQ("", err); // Note, different slashes from manifest. fs_.Create("a/b\\c\\d/e/fo o.o.d", "a\\b\\c\\d\\e\\fo\\ o.o: blah.h bar.h\n"); EXPECT_EQ(builder.Build(&err), ExitSuccess); EXPECT_EQ("", err); deps_log.Close(); builder.command_runner_.release(); } { State state; ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest)); DepsLog deps_log; ASSERT_TRUE(deps_log.Load(deps_log_file_.path(), &state, &err)); ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err)); ASSERT_EQ("", err); Builder builder(&state, config_, NULL, &deps_log, &fs_, &status_, 0); builder.command_runner_.reset(&command_runner_); state.GetNode("bar.h", 0)->MarkDirty(); // Mark bar.h as missing. EXPECT_TRUE(builder.AddTarget("a/b/c/d/e/fo o.o", &err)); ASSERT_EQ("", err); // Expect one new edge generating fo o.o. ASSERT_EQ(1u, state.edges_.size()); // Expect our edge to now have three inputs: foo.c and two headers. Edge* edge = state.edges_.back(); ASSERT_EQ(3u, edge->inputs_.size()); // Expect the command line we generate to only use the original input. // Note, slashes from manifest, not .d. ASSERT_EQ("cc x\\y/z\\foo.c", edge->EvaluateCommand()); deps_log.Close(); builder.command_runner_.release(); } } #endif /// Check that a restat rule doesn't clear an edge if the depfile is missing. /// Follows from: https://github.com/ninja-build/ninja/issues/603 TEST_F(BuildTest, RestatMissingDepfile) { const char* manifest = "rule true\n" " command = true\n" // Would be "write if out-of-date" in reality. " restat = 1\n" "build header.h: true header.in\n" "build out: cat header.h\n" " depfile = out.d\n"; fs_.Create("header.h", ""); fs_.Tick(); fs_.Create("out", ""); fs_.Create("header.in", ""); // Normally, only 'header.h' would be rebuilt, as // its rule doesn't touch the output and has 'restat=1' set. // But we are also missing the depfile for 'out', // which should force its command to run anyway! RebuildTarget("out", manifest); ASSERT_EQ(2u, command_runner_.commands_ran_.size()); } /// Check that a restat rule doesn't clear an edge if the deps are missing. /// https://github.com/ninja-build/ninja/issues/603 TEST_F(BuildWithDepsLogTest, RestatMissingDepfileDepslog) { string err; const char* manifest = "rule true\n" " command = true\n" // Would be "write if out-of-date" in reality. " restat = 1\n" "build header.h: true header.in\n" "build out: cat header.h\n" " deps = gcc\n" " depfile = out.d\n"; // Build once to populate ninja deps logs from out.d fs_.Create("header.in", ""); fs_.Create("out.d", "out: header.h"); fs_.Create("header.h", ""); RebuildTarget("out", manifest, build_log_file_.c_str(), deps_log_file_.c_str()); ASSERT_EQ(2u, command_runner_.commands_ran_.size()); // Sanity: this rebuild should be NOOP RebuildTarget("out", manifest, build_log_file_.c_str(), deps_log_file_.c_str()); ASSERT_EQ(0u, command_runner_.commands_ran_.size()); // Touch 'header.in', blank dependencies log (create a different one). // Building header.h triggers 'restat' outputs cleanup. // Validate that out is rebuilt nevertheless, as deps are missing. fs_.Tick(); fs_.Create("header.in", ""); ScopedFilePath deps2_file_("ninja_deps2"); // (switch to a new blank deps_log "ninja_deps2") RebuildTarget("out", manifest, build_log_file_.c_str(), deps2_file_.c_str()); ASSERT_EQ(2u, command_runner_.commands_ran_.size()); // Sanity: this build should be NOOP RebuildTarget("out", manifest, build_log_file_.c_str(), deps2_file_.c_str()); ASSERT_EQ(0u, command_runner_.commands_ran_.size()); // Check that invalidating deps by target timestamp also works here // Repeat the test but touch target instead of blanking the log. fs_.Tick(); fs_.Create("header.in", ""); fs_.Create("out", ""); RebuildTarget("out", manifest, build_log_file_.c_str(), deps2_file_.c_str()); ASSERT_EQ(2u, command_runner_.commands_ran_.size()); // And this build should be NOOP again RebuildTarget("out", manifest, build_log_file_.c_str(), deps2_file_.c_str()); ASSERT_EQ(0u, command_runner_.commands_ran_.size()); } TEST_F(BuildTest, WrongOutputInDepfileCausesRebuild) { string err; const char* manifest = "rule cc\n" " command = cc $in\n" " depfile = $out.d\n" "build foo.o: cc foo.c\n"; fs_.Create("foo.c", ""); fs_.Create("foo.o", ""); fs_.Create("header.h", ""); fs_.Create("foo.o.d", "bar.o.d: header.h\n"); ScopedFilePath build_log("build_log"); ScopedFilePath deps_file("ninja_deps"); RebuildTarget("foo.o", manifest, build_log.c_str(), deps_file.c_str()); ASSERT_EQ(1u, command_runner_.commands_ran_.size()); } TEST_F(BuildTest, Console) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule console\n" " command = console\n" " pool = console\n" "build cons: console in.txt\n")); fs_.Create("in.txt", ""); string err; EXPECT_TRUE(builder_.AddTarget("cons", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_EQ("", err); ASSERT_EQ(1u, command_runner_.commands_ran_.size()); } TEST_F(BuildTest, DyndepMissingAndNoRule) { // Verify that we can diagnose when a dyndep file is missing and // has no rule to build it. ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule touch\n" " command = touch $out\n" "build out: touch || dd\n" " dyndep = dd\n" )); string err; EXPECT_FALSE(builder_.AddTarget("out", &err)); EXPECT_EQ("loading 'dd': No such file or directory", err); } TEST_F(BuildTest, DyndepReadyImplicitConnection) { // Verify that a dyndep file can be loaded immediately to discover // that one edge has an implicit output that is also an implicit // input of another edge. ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule touch\n" " command = touch $out $out.imp\n" "build tmp: touch || dd\n" " dyndep = dd\n" "build out: touch || dd\n" " dyndep = dd\n" )); fs_.Create("dd", "ninja_dyndep_version = 1\n" "build out | out.imp: dyndep | tmp.imp\n" "build tmp | tmp.imp: dyndep\n" ); string err; EXPECT_TRUE(builder_.AddTarget("out", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_EQ("", err); ASSERT_EQ(2u, command_runner_.commands_ran_.size()); EXPECT_EQ("touch tmp tmp.imp", command_runner_.commands_ran_[0]); EXPECT_EQ("touch out out.imp", command_runner_.commands_ran_[1]); } TEST_F(BuildTest, DyndepReadySyntaxError) { // Verify that a dyndep file can be loaded immediately to discover // and reject a syntax error in it. ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule touch\n" " command = touch $out\n" "build out: touch || dd\n" " dyndep = dd\n" )); fs_.Create("dd", "build out: dyndep\n" ); string err; EXPECT_FALSE(builder_.AddTarget("out", &err)); EXPECT_EQ("dd:1: expected 'ninja_dyndep_version = ...'\n", err); } TEST_F(BuildTest, DyndepReadyCircular) { // Verify that a dyndep file can be loaded immediately to discover // and reject a circular dependency. ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule r\n" " command = unused\n" "build out: r in || dd\n" " dyndep = dd\n" "build in: r circ\n" )); fs_.Create("dd", "ninja_dyndep_version = 1\n" "build out | circ: dyndep\n" ); fs_.Create("out", ""); string err; EXPECT_FALSE(builder_.AddTarget("out", &err)); EXPECT_EQ("dependency cycle: circ -> in -> circ", err); } TEST_F(BuildTest, DyndepBuild) { // Verify that a dyndep file can be built and loaded to discover nothing. ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule touch\n" " command = touch $out\n" "rule cp\n" " command = cp $in $out\n" "build dd: cp dd-in\n" "build out: touch || dd\n" " dyndep = dd\n" )); fs_.Create("dd-in", "ninja_dyndep_version = 1\n" "build out: dyndep\n" ); string err; EXPECT_TRUE(builder_.AddTarget("out", &err)); EXPECT_EQ("", err); size_t files_created = fs_.files_created_.size(); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_EQ("", err); ASSERT_EQ(2u, command_runner_.commands_ran_.size()); EXPECT_EQ("cp dd-in dd", command_runner_.commands_ran_[0]); EXPECT_EQ("touch out", command_runner_.commands_ran_[1]); ASSERT_EQ(2u, fs_.files_read_.size()); EXPECT_EQ("dd-in", fs_.files_read_[0]); EXPECT_EQ("dd", fs_.files_read_[1]); ASSERT_EQ(3u + files_created, fs_.files_created_.size()); EXPECT_EQ(1u, fs_.files_created_.count("dd")); EXPECT_EQ(1u, fs_.files_created_.count("out")); EXPECT_EQ(1u, fs_.files_created_.count(".ninja_lock")); } TEST_F(BuildTest, DyndepBuildSyntaxError) { // Verify that a dyndep file can be built and loaded to discover // and reject a syntax error in it. ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule touch\n" " command = touch $out\n" "rule cp\n" " command = cp $in $out\n" "build dd: cp dd-in\n" "build out: touch || dd\n" " dyndep = dd\n" )); fs_.Create("dd-in", "build out: dyndep\n" ); string err; EXPECT_TRUE(builder_.AddTarget("out", &err)); EXPECT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitFailure); EXPECT_EQ("dd:1: expected 'ninja_dyndep_version = ...'\n", err); } TEST_F(BuildTest, DyndepBuildUnrelatedOutput) { // Verify that a dyndep file can have dependents that do not specify // it as their dyndep binding. ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule touch\n" " command = touch $out\n" "rule cp\n" " command = cp $in $out\n" "build dd: cp dd-in\n" "build unrelated: touch || dd\n" "build out: touch unrelated || dd\n" " dyndep = dd\n" )); fs_.Create("dd-in", "ninja_dyndep_version = 1\n" "build out: dyndep\n" ); fs_.Tick(); fs_.Create("out", ""); string err; EXPECT_TRUE(builder_.AddTarget("out", &err)); EXPECT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_EQ("", err); ASSERT_EQ(3u, command_runner_.commands_ran_.size()); EXPECT_EQ("cp dd-in dd", command_runner_.commands_ran_[0]); EXPECT_EQ("touch unrelated", command_runner_.commands_ran_[1]); EXPECT_EQ("touch out", command_runner_.commands_ran_[2]); } TEST_F(BuildTest, DyndepBuildDiscoverNewOutput) { // Verify that a dyndep file can be built and loaded to discover // a new output of an edge. ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule touch\n" " command = touch $out $out.imp\n" "rule cp\n" " command = cp $in $out\n" "build dd: cp dd-in\n" "build out: touch in || dd\n" " dyndep = dd\n" )); fs_.Create("in", ""); fs_.Create("dd-in", "ninja_dyndep_version = 1\n" "build out | out.imp: dyndep\n" ); fs_.Tick(); fs_.Create("out", ""); string err; EXPECT_TRUE(builder_.AddTarget("out", &err)); EXPECT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_EQ("", err); ASSERT_EQ(2u, command_runner_.commands_ran_.size()); EXPECT_EQ("cp dd-in dd", command_runner_.commands_ran_[0]); EXPECT_EQ("touch out out.imp", command_runner_.commands_ran_[1]); } TEST_F(BuildTest, DyndepBuildDiscoverNewOutputWithMultipleRules1) { // Verify that a dyndep file can be built and loaded to discover // a new output of an edge that is already the output of another edge. ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule touch\n" " command = touch $out $out.imp\n" "rule cp\n" " command = cp $in $out\n" "build dd: cp dd-in\n" "build out1 | out-twice.imp: touch in\n" "build out2: touch in || dd\n" " dyndep = dd\n" )); fs_.Create("in", ""); fs_.Create("dd-in", "ninja_dyndep_version = 1\n" "build out2 | out-twice.imp: dyndep\n" ); fs_.Tick(); fs_.Create("out1", ""); fs_.Create("out2", ""); string err; EXPECT_TRUE(builder_.AddTarget("out1", &err)); EXPECT_TRUE(builder_.AddTarget("out2", &err)); EXPECT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitFailure); EXPECT_EQ("multiple rules generate out-twice.imp", err); } TEST_F(BuildTest, DyndepBuildDiscoverNewOutputWithMultipleRules2) { // Verify that a dyndep file can be built and loaded to discover // a new output of an edge that is already the output of another // edge also discovered by dyndep. ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule touch\n" " command = touch $out $out.imp\n" "rule cp\n" " command = cp $in $out\n" "build dd1: cp dd1-in\n" "build out1: touch || dd1\n" " dyndep = dd1\n" "build dd2: cp dd2-in || dd1\n" // make order predictable for test "build out2: touch || dd2\n" " dyndep = dd2\n" )); fs_.Create("out1", ""); fs_.Create("out2", ""); fs_.Create("dd1-in", "ninja_dyndep_version = 1\n" "build out1 | out-twice.imp: dyndep\n" ); fs_.Create("dd2-in", ""); fs_.Create("dd2", "ninja_dyndep_version = 1\n" "build out2 | out-twice.imp: dyndep\n" ); fs_.Tick(); fs_.Create("out1", ""); fs_.Create("out2", ""); string err; EXPECT_TRUE(builder_.AddTarget("out1", &err)); EXPECT_TRUE(builder_.AddTarget("out2", &err)); EXPECT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitFailure); EXPECT_EQ("multiple rules generate out-twice.imp", err); } TEST_F(BuildTest, DyndepBuildDiscoverNewInput) { // Verify that a dyndep file can be built and loaded to discover // a new input to an edge. ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule touch\n" " command = touch $out\n" "rule cp\n" " command = cp $in $out\n" "build dd: cp dd-in\n" "build in: touch\n" "build out: touch || dd\n" " dyndep = dd\n" )); fs_.Create("dd-in", "ninja_dyndep_version = 1\n" "build out: dyndep | in\n" ); fs_.Tick(); fs_.Create("out", ""); string err; EXPECT_TRUE(builder_.AddTarget("out", &err)); EXPECT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_EQ("", err); ASSERT_EQ(3u, command_runner_.commands_ran_.size()); EXPECT_EQ("cp dd-in dd", command_runner_.commands_ran_[0]); EXPECT_EQ("touch in", command_runner_.commands_ran_[1]); EXPECT_EQ("touch out", command_runner_.commands_ran_[2]); } TEST_F(BuildTest, DyndepBuildDiscoverNewInputWithValidation) { // Verify that a dyndep file cannot contain the |@ validation // syntax. ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule touch\n" " command = touch $out\n" "rule cp\n" " command = cp $in $out\n" "build dd: cp dd-in\n" "build out: touch || dd\n" " dyndep = dd\n" )); fs_.Create("dd-in", "ninja_dyndep_version = 1\n" "build out: dyndep |@ validation\n" ); string err; EXPECT_TRUE(builder_.AddTarget("out", &err)); EXPECT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitFailure); string err_first_line = err.substr(0, err.find("\n")); EXPECT_EQ("dd:2: expected newline, got '|@'", err_first_line); } TEST_F(BuildTest, DyndepBuildDiscoverNewInputWithTransitiveValidation) { // Verify that a dyndep file can be built and loaded to discover // a new input to an edge that has a validation edge. ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule touch\n" " command = touch $out\n" "rule cp\n" " command = cp $in $out\n" "build dd: cp dd-in\n" "build in: touch |@ validation\n" "build validation: touch in out\n" "build out: touch || dd\n" " dyndep = dd\n" )); fs_.Create("dd-in", "ninja_dyndep_version = 1\n" "build out: dyndep | in\n" ); fs_.Tick(); fs_.Create("out", ""); string err; EXPECT_TRUE(builder_.AddTarget("out", &err)); EXPECT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_EQ("", err); ASSERT_EQ(4u, command_runner_.commands_ran_.size()); EXPECT_EQ("cp dd-in dd", command_runner_.commands_ran_[0]); EXPECT_EQ("touch in", command_runner_.commands_ran_[1]); EXPECT_EQ("touch out", command_runner_.commands_ran_[2]); EXPECT_EQ("touch validation", command_runner_.commands_ran_[3]); } TEST_F(BuildTest, DyndepBuildDiscoverImplicitConnection) { // Verify that a dyndep file can be built and loaded to discover // that one edge has an implicit output that is also an implicit // input of another edge. ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule touch\n" " command = touch $out $out.imp\n" "rule cp\n" " command = cp $in $out\n" "build dd: cp dd-in\n" "build tmp: touch || dd\n" " dyndep = dd\n" "build out: touch || dd\n" " dyndep = dd\n" )); fs_.Create("dd-in", "ninja_dyndep_version = 1\n" "build out | out.imp: dyndep | tmp.imp\n" "build tmp | tmp.imp: dyndep\n" ); string err; EXPECT_TRUE(builder_.AddTarget("out", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_EQ("", err); ASSERT_EQ(3u, command_runner_.commands_ran_.size()); EXPECT_EQ("cp dd-in dd", command_runner_.commands_ran_[0]); EXPECT_EQ("touch tmp tmp.imp", command_runner_.commands_ran_[1]); EXPECT_EQ("touch out out.imp", command_runner_.commands_ran_[2]); } TEST_F(BuildTest, DyndepBuildDiscoverOutputAndDepfileInput) { // Verify that a dyndep file can be built and loaded to discover // that one edge has an implicit output that is also reported by // a depfile as an input of another edge. ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule touch\n" " command = touch $out $out.imp\n" "rule cp\n" " command = cp $in $out\n" "build dd: cp dd-in\n" "build tmp: touch || dd\n" " dyndep = dd\n" "build out: cp tmp\n" " depfile = out.d\n" )); fs_.Create("out.d", "out: tmp.imp\n"); fs_.Create("dd-in", "ninja_dyndep_version = 1\n" "build tmp | tmp.imp: dyndep\n" ); string err; EXPECT_TRUE(builder_.AddTarget("out", &err)); ASSERT_EQ("", err); // Loading the depfile did not give tmp.imp a phony input edge. ASSERT_FALSE(GetNode("tmp.imp")->in_edge()); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_EQ("", err); // Loading the dyndep file gave tmp.imp a real input edge. ASSERT_FALSE(GetNode("tmp.imp")->in_edge()->is_phony()); ASSERT_EQ(3u, command_runner_.commands_ran_.size()); EXPECT_EQ("cp dd-in dd", command_runner_.commands_ran_[0]); EXPECT_EQ("touch tmp tmp.imp", command_runner_.commands_ran_[1]); EXPECT_EQ("cp tmp out", command_runner_.commands_ran_[2]); EXPECT_EQ(1u, fs_.files_created_.count("tmp.imp")); EXPECT_TRUE(builder_.AlreadyUpToDate()); } TEST_F(BuildTest, DyndepBuildDiscoverNowWantEdge) { // Verify that a dyndep file can be built and loaded to discover // that an edge is actually wanted due to a missing implicit output. ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule touch\n" " command = touch $out $out.imp\n" "rule cp\n" " command = cp $in $out\n" "build dd: cp dd-in\n" "build tmp: touch || dd\n" " dyndep = dd\n" "build out: touch tmp || dd\n" " dyndep = dd\n" )); fs_.Create("tmp", ""); fs_.Create("out", ""); fs_.Create("dd-in", "ninja_dyndep_version = 1\n" "build out: dyndep\n" "build tmp | tmp.imp: dyndep\n" ); string err; EXPECT_TRUE(builder_.AddTarget("out", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_EQ("", err); ASSERT_EQ(3u, command_runner_.commands_ran_.size()); EXPECT_EQ("cp dd-in dd", command_runner_.commands_ran_[0]); EXPECT_EQ("touch tmp tmp.imp", command_runner_.commands_ran_[1]); EXPECT_EQ("touch out out.imp", command_runner_.commands_ran_[2]); } TEST_F(BuildTest, DyndepBuildDiscoverNowWantEdgeAndDependent) { // Verify that a dyndep file can be built and loaded to discover // that an edge and a dependent are actually wanted. ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule touch\n" " command = touch $out $out.imp\n" "rule cp\n" " command = cp $in $out\n" "build dd: cp dd-in\n" "build tmp: touch || dd\n" " dyndep = dd\n" "build out: touch tmp\n" )); fs_.Create("tmp", ""); fs_.Create("out", ""); fs_.Create("dd-in", "ninja_dyndep_version = 1\n" "build tmp | tmp.imp: dyndep\n" ); string err; EXPECT_TRUE(builder_.AddTarget("out", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_EQ("", err); ASSERT_EQ(3u, command_runner_.commands_ran_.size()); EXPECT_EQ("cp dd-in dd", command_runner_.commands_ran_[0]); EXPECT_EQ("touch tmp tmp.imp", command_runner_.commands_ran_[1]); EXPECT_EQ("touch out out.imp", command_runner_.commands_ran_[2]); } TEST_F(BuildTest, DyndepBuildDiscoverCircular) { // Verify that a dyndep file can be built and loaded to discover // and reject a circular dependency. ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule r\n" " command = unused\n" "rule cp\n" " command = cp $in $out\n" "build dd: cp dd-in\n" "build out: r in || dd\n" " depfile = out.d\n" " dyndep = dd\n" "build in: r || dd\n" " dyndep = dd\n" )); fs_.Create("out.d", "out: inimp\n"); fs_.Create("dd-in", "ninja_dyndep_version = 1\n" "build out | circ: dyndep\n" "build in: dyndep | circ\n" ); fs_.Create("out", ""); string err; EXPECT_TRUE(builder_.AddTarget("out", &err)); EXPECT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitFailure); // Depending on how the pointers in Plan::ready_ work out, we could have // discovered the cycle from either starting point. EXPECT_TRUE(err == "dependency cycle: circ -> in -> circ" || err == "dependency cycle: in -> circ -> in"); } TEST_F(BuildWithLogTest, DyndepBuildDiscoverRestat) { // Verify that a dyndep file can be built and loaded to discover // that an edge has a restat binding. ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule true\n" " command = true\n" "rule cp\n" " command = cp $in $out\n" "build dd: cp dd-in\n" "build out1: true in || dd\n" " dyndep = dd\n" "build out2: cat out1\n")); fs_.Create("out1", ""); fs_.Create("out2", ""); fs_.Create("dd-in", "ninja_dyndep_version = 1\n" "build out1: dyndep\n" " restat = 1\n" ); fs_.Tick(); fs_.Create("in", ""); // Do a pre-build so that there's commands in the log for the outputs, // otherwise, the lack of an entry in the build log will cause "out2" to // rebuild regardless of restat. string err; EXPECT_TRUE(builder_.AddTarget("out2", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); ASSERT_EQ("", err); ASSERT_EQ(3u, command_runner_.commands_ran_.size()); EXPECT_EQ("cp dd-in dd", command_runner_.commands_ran_[0]); EXPECT_EQ("true", command_runner_.commands_ran_[1]); EXPECT_EQ("cat out1 > out2", command_runner_.commands_ran_[2]); command_runner_.commands_ran_.clear(); state_.Reset(); fs_.Tick(); fs_.Create("in", ""); // We touched "in", so we should build "out1". But because "true" does not // touch "out1", we should cancel the build of "out2". EXPECT_TRUE(builder_.AddTarget("out2", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); ASSERT_EQ(1u, command_runner_.commands_ran_.size()); EXPECT_EQ("true", command_runner_.commands_ran_[0]); } TEST_F(BuildTest, DyndepBuildDiscoverScheduledEdge) { // Verify that a dyndep file can be built and loaded to discover a // new input that itself is an output from an edge that has already // been scheduled but not finished. We should not re-schedule it. ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule touch\n" " command = touch $out $out.imp\n" "rule cp\n" " command = cp $in $out\n" "build out1 | out1.imp: touch\n" "build zdd: cp zdd-in\n" " verify_active_edge = out1\n" // verify out1 is active when zdd is finished "build out2: cp out1 || zdd\n" " dyndep = zdd\n" )); fs_.Create("zdd-in", "ninja_dyndep_version = 1\n" "build out2: dyndep | out1.imp\n" ); // Enable concurrent builds so that we can load the dyndep file // while another edge is still active. command_runner_.max_active_edges_ = 2; // During the build "out1" and "zdd" should be built concurrently. // The fake command runner will finish these in reverse order // of the names of the first outputs, so "zdd" will finish first // and we will load the dyndep file while the edge for "out1" is // still active. This will add a new dependency on "out1.imp", // also produced by the active edge. The builder should not // re-schedule the already-active edge. string err; EXPECT_TRUE(builder_.AddTarget("out1", &err)); EXPECT_TRUE(builder_.AddTarget("out2", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_EQ("", err); ASSERT_EQ(3u, command_runner_.commands_ran_.size()); // Depending on how the pointers in Plan::ready_ work out, the first // two commands may have run in either order. EXPECT_TRUE((command_runner_.commands_ran_[0] == "touch out1 out1.imp" && command_runner_.commands_ran_[1] == "cp zdd-in zdd") || (command_runner_.commands_ran_[1] == "touch out1 out1.imp" && command_runner_.commands_ran_[0] == "cp zdd-in zdd")); EXPECT_EQ("cp out1 out2", command_runner_.commands_ran_[2]); } TEST_F(BuildTest, DyndepTwoLevelDirect) { // Verify that a clean dyndep file can depend on a dirty dyndep file // and be loaded properly after the dirty one is built and loaded. ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule touch\n" " command = touch $out $out.imp\n" "rule cp\n" " command = cp $in $out\n" "build dd1: cp dd1-in\n" "build out1 | out1.imp: touch || dd1\n" " dyndep = dd1\n" "build dd2: cp dd2-in || dd1\n" // direct order-only dep on dd1 "build out2: touch || dd2\n" " dyndep = dd2\n" )); fs_.Create("out1.imp", ""); fs_.Create("out2", ""); fs_.Create("out2.imp", ""); fs_.Create("dd1-in", "ninja_dyndep_version = 1\n" "build out1: dyndep\n" ); fs_.Create("dd2-in", ""); fs_.Create("dd2", "ninja_dyndep_version = 1\n" "build out2 | out2.imp: dyndep | out1.imp\n" ); // During the build dd1 should be built and loaded. The RecomputeDirty // called as a result of loading dd1 should not cause dd2 to be loaded // because the builder will never get a chance to update the build plan // to account for dd2. Instead dd2 should only be later loaded once the // builder recognizes that it is now ready (as its order-only dependency // on dd1 has been satisfied). This test case verifies that each dyndep // file is loaded to update the build graph independently. string err; EXPECT_TRUE(builder_.AddTarget("out2", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_EQ("", err); ASSERT_EQ(3u, command_runner_.commands_ran_.size()); EXPECT_EQ("cp dd1-in dd1", command_runner_.commands_ran_[0]); EXPECT_EQ("touch out1 out1.imp", command_runner_.commands_ran_[1]); EXPECT_EQ("touch out2 out2.imp", command_runner_.commands_ran_[2]); } TEST_F(BuildTest, DyndepTwoLevelIndirect) { // Verify that dyndep files can add to an edge new implicit inputs that // correspond to implicit outputs added to other edges by other dyndep // files on which they (order-only) depend. ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule touch\n" " command = touch $out $out.imp\n" "rule cp\n" " command = cp $in $out\n" "build dd1: cp dd1-in\n" "build out1: touch || dd1\n" " dyndep = dd1\n" "build dd2: cp dd2-in || out1\n" // indirect order-only dep on dd1 "build out2: touch || dd2\n" " dyndep = dd2\n" )); fs_.Create("out1.imp", ""); fs_.Create("out2", ""); fs_.Create("out2.imp", ""); fs_.Create("dd1-in", "ninja_dyndep_version = 1\n" "build out1 | out1.imp: dyndep\n" ); fs_.Create("dd2-in", ""); fs_.Create("dd2", "ninja_dyndep_version = 1\n" "build out2 | out2.imp: dyndep | out1.imp\n" ); // During the build dd1 should be built and loaded. Then dd2 should // be built and loaded. Loading dd2 should cause the builder to // recognize that out2 needs to be built even though it was originally // clean without dyndep info. string err; EXPECT_TRUE(builder_.AddTarget("out2", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_EQ("", err); ASSERT_EQ(3u, command_runner_.commands_ran_.size()); EXPECT_EQ("cp dd1-in dd1", command_runner_.commands_ran_[0]); EXPECT_EQ("touch out1 out1.imp", command_runner_.commands_ran_[1]); EXPECT_EQ("touch out2 out2.imp", command_runner_.commands_ran_[2]); } TEST_F(BuildTest, DyndepTwoLevelDiscoveredReady) { // Verify that a dyndep file can discover a new input whose // edge also has a dyndep file that is ready to load immediately. ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule touch\n" " command = touch $out\n" "rule cp\n" " command = cp $in $out\n" "build dd0: cp dd0-in\n" "build dd1: cp dd1-in\n" "build in: touch\n" "build tmp: touch || dd0\n" " dyndep = dd0\n" "build out: touch || dd1\n" " dyndep = dd1\n" )); fs_.Create("dd1-in", "ninja_dyndep_version = 1\n" "build out: dyndep | tmp\n" ); fs_.Create("dd0-in", ""); fs_.Create("dd0", "ninja_dyndep_version = 1\n" "build tmp: dyndep | in\n" ); fs_.Tick(); fs_.Create("out", ""); string err; EXPECT_TRUE(builder_.AddTarget("out", &err)); EXPECT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_EQ("", err); ASSERT_EQ(4u, command_runner_.commands_ran_.size()); EXPECT_EQ("cp dd1-in dd1", command_runner_.commands_ran_[0]); EXPECT_EQ("touch in", command_runner_.commands_ran_[1]); EXPECT_EQ("touch tmp", command_runner_.commands_ran_[2]); EXPECT_EQ("touch out", command_runner_.commands_ran_[3]); } TEST_F(BuildTest, DyndepTwoLevelDiscoveredDirty) { // Verify that a dyndep file can discover a new input whose // edge also has a dyndep file that needs to be built. ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule touch\n" " command = touch $out\n" "rule cp\n" " command = cp $in $out\n" "build dd0: cp dd0-in\n" "build dd1: cp dd1-in\n" "build in: touch\n" "build tmp: touch || dd0\n" " dyndep = dd0\n" "build out: touch || dd1\n" " dyndep = dd1\n" )); fs_.Create("dd1-in", "ninja_dyndep_version = 1\n" "build out: dyndep | tmp\n" ); fs_.Create("dd0-in", "ninja_dyndep_version = 1\n" "build tmp: dyndep | in\n" ); fs_.Tick(); fs_.Create("out", ""); string err; EXPECT_TRUE(builder_.AddTarget("out", &err)); EXPECT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_EQ("", err); ASSERT_EQ(5u, command_runner_.commands_ran_.size()); EXPECT_EQ("cp dd1-in dd1", command_runner_.commands_ran_[0]); EXPECT_EQ("cp dd0-in dd0", command_runner_.commands_ran_[1]); EXPECT_EQ("touch in", command_runner_.commands_ran_[2]); EXPECT_EQ("touch tmp", command_runner_.commands_ran_[3]); EXPECT_EQ("touch out", command_runner_.commands_ran_[4]); } TEST_F(BuildTest, Validation) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build out: cat in |@ validate\n" "build validate: cat in2\n")); fs_.Create("in", ""); fs_.Create("in2", ""); string err; EXPECT_TRUE(builder_.AddTarget("out", &err)); EXPECT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_EQ("", err); EXPECT_EQ(2u, command_runner_.commands_ran_.size()); // Test touching "in" only rebuilds "out" ("validate" doesn't depend on // "out"). fs_.Tick(); fs_.Create("in", ""); err.clear(); command_runner_.commands_ran_.clear(); state_.Reset(); EXPECT_TRUE(builder_.AddTarget("out", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_EQ("", err); ASSERT_EQ(1u, command_runner_.commands_ran_.size()); EXPECT_EQ("cat in > out", command_runner_.commands_ran_[0]); // Test touching "in2" only rebuilds "validate" ("out" doesn't depend on // "validate"). fs_.Tick(); fs_.Create("in2", ""); err.clear(); command_runner_.commands_ran_.clear(); state_.Reset(); EXPECT_TRUE(builder_.AddTarget("out", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_EQ("", err); ASSERT_EQ(1u, command_runner_.commands_ran_.size()); EXPECT_EQ("cat in2 > validate", command_runner_.commands_ran_[0]); } TEST_F(BuildTest, ValidationDependsOnOutput) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build out: cat in |@ validate\n" "build validate: cat in2 | out\n")); fs_.Create("in", ""); fs_.Create("in2", ""); string err; EXPECT_TRUE(builder_.AddTarget("out", &err)); EXPECT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_EQ("", err); EXPECT_EQ(2u, command_runner_.commands_ran_.size()); // Test touching "in" rebuilds "out" and "validate". fs_.Tick(); fs_.Create("in", ""); err.clear(); command_runner_.commands_ran_.clear(); state_.Reset(); EXPECT_TRUE(builder_.AddTarget("out", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_EQ("", err); EXPECT_EQ(2u, command_runner_.commands_ran_.size()); // Test touching "in2" only rebuilds "validate" ("out" doesn't depend on // "validate"). fs_.Tick(); fs_.Create("in2", ""); err.clear(); command_runner_.commands_ran_.clear(); state_.Reset(); EXPECT_TRUE(builder_.AddTarget("out", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_EQ("", err); ASSERT_EQ(1u, command_runner_.commands_ran_.size()); EXPECT_EQ("cat in2 > validate", command_runner_.commands_ran_[0]); } TEST_F(BuildWithDepsLogTest, ValidationThroughDepfile) { const char* manifest = "build out: cat in |@ validate\n" "build validate: cat in2 | out\n" "build out2: cat in3\n" " deps = gcc\n" " depfile = out2.d\n"; string err; { fs_.Create("in", ""); fs_.Create("in2", ""); fs_.Create("in3", ""); fs_.Create("out2.d", "out: out"); State state; ASSERT_NO_FATAL_FAILURE(AddCatRule(&state)); ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest)); DepsLog deps_log; ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err)); ASSERT_EQ("", err); Builder builder(&state, config_, NULL, &deps_log, &fs_, &status_, 0); builder.command_runner_.reset(&command_runner_); EXPECT_TRUE(builder.AddTarget("out2", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder.Build(&err), ExitSuccess); EXPECT_EQ("", err); // On the first build, only the out2 command is run. ASSERT_EQ(command_runner_.commands_ran_.size(), size_t(1)); EXPECT_EQ("cat in3 > out2", command_runner_.commands_ran_[0]); // The deps file should have been removed. EXPECT_EQ(0, fs_.Stat("out2.d", &err)); deps_log.Close(); builder.command_runner_.release(); } fs_.Tick(); command_runner_.commands_ran_.clear(); { fs_.Create("in2", ""); fs_.Create("in3", ""); State state; ASSERT_NO_FATAL_FAILURE(AddCatRule(&state)); ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest)); DepsLog deps_log; ASSERT_TRUE(deps_log.Load(deps_log_file_.path(), &state, &err)); ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err)); ASSERT_EQ("", err); Builder builder(&state, config_, NULL, &deps_log, &fs_, &status_, 0); builder.command_runner_.reset(&command_runner_); EXPECT_TRUE(builder.AddTarget("out2", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder.Build(&err), ExitSuccess); EXPECT_EQ("", err); // The out and validate actions should have been run as well as out2. ASSERT_EQ(command_runner_.commands_ran_.size(), size_t(3)); // out has to run first, as both out2 and validate depend on it. EXPECT_EQ("cat in > out", command_runner_.commands_ran_[0]); deps_log.Close(); builder.command_runner_.release(); } } TEST_F(BuildTest, ValidationCircular) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build out: cat in |@ out2\n" "build out2: cat in2 |@ out\n")); fs_.Create("in", ""); fs_.Create("in2", ""); string err; EXPECT_TRUE(builder_.AddTarget("out", &err)); EXPECT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_EQ("", err); EXPECT_EQ(2u, command_runner_.commands_ran_.size()); // Test touching "in" rebuilds "out". fs_.Tick(); fs_.Create("in", ""); err.clear(); command_runner_.commands_ran_.clear(); state_.Reset(); EXPECT_TRUE(builder_.AddTarget("out", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_EQ("", err); ASSERT_EQ(1u, command_runner_.commands_ran_.size()); EXPECT_EQ("cat in > out", command_runner_.commands_ran_[0]); // Test touching "in2" rebuilds "out2". fs_.Tick(); fs_.Create("in2", ""); err.clear(); command_runner_.commands_ran_.clear(); state_.Reset(); EXPECT_TRUE(builder_.AddTarget("out", &err)); ASSERT_EQ("", err); EXPECT_EQ(builder_.Build(&err), ExitSuccess); EXPECT_EQ("", err); ASSERT_EQ(1u, command_runner_.commands_ran_.size()); EXPECT_EQ("cat in2 > out2", command_runner_.commands_ran_[0]); } TEST_F(BuildTest, ValidationWithCircularDependency) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build out: cat in |@ validate\n" "build validate: cat validate_in | out\n" "build validate_in: cat validate\n")); fs_.Create("in", ""); string err; EXPECT_FALSE(builder_.AddTarget("out", &err)); EXPECT_EQ("dependency cycle: validate -> validate_in -> validate", err); } ninja-1.13.2/src/canon_perftest.cc000066400000000000000000000030451510764045400170150ustar00rootroot00000000000000// Copyright 2012 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include #include #include "util.h" #include "metrics.h" using namespace std; const char kPath[] = "../../third_party/WebKit/Source/WebCore/" "platform/leveldb/LevelDBWriteBatch.cpp"; int main() { vector times; char buf[200]; size_t len = strlen(kPath); strcpy(buf, kPath); for (int j = 0; j < 5; ++j) { const int kNumRepetitions = 2000000; int64_t start = GetTimeMillis(); uint64_t slash_bits; for (int i = 0; i < kNumRepetitions; ++i) { CanonicalizePath(buf, &len, &slash_bits); } int delta = (int)(GetTimeMillis() - start); times.push_back(delta); } int min = times[0]; int max = times[0]; float total = 0; for (size_t i = 0; i < times.size(); ++i) { total += times[i]; if (times[i] < min) min = times[i]; else if (times[i] > max) max = times[i]; } printf("min %dms max %dms avg %.1fms\n", min, max, total / times.size()); } ninja-1.13.2/src/clean.cc000066400000000000000000000165361510764045400150760ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "clean.h" #include #include #include "disk_interface.h" #include "graph.h" #include "state.h" #include "util.h" using namespace std; Cleaner::Cleaner(State* state, const BuildConfig& config, DiskInterface* disk_interface) : state_(state), config_(config), dyndep_loader_(state, disk_interface), cleaned_files_count_(0), disk_interface_(disk_interface), status_(0) { } int Cleaner::RemoveFile(const string& path) { return disk_interface_->RemoveFile(path); } bool Cleaner::FileExists(const string& path) { string err; TimeStamp mtime = disk_interface_->Stat(path, &err); if (mtime == -1) Error("%s", err.c_str()); return mtime > 0; // Treat Stat() errors as "file does not exist". } void Cleaner::Report(const string& path) { ++cleaned_files_count_; if (IsVerbose()) printf("Remove %s\n", path.c_str()); } void Cleaner::Remove(const string& path) { if (!IsAlreadyRemoved(path)) { removed_.insert(path); if (config_.dry_run) { if (FileExists(path)) Report(path); } else { int ret = RemoveFile(path); if (ret == 0) Report(path); else if (ret == -1) status_ = 1; } } } bool Cleaner::IsAlreadyRemoved(const string& path) { set::iterator i = removed_.find(path); return (i != removed_.end()); } void Cleaner::RemoveEdgeFiles(Edge* edge) { string depfile = edge->GetUnescapedDepfile(); if (!depfile.empty()) Remove(depfile); string rspfile = edge->GetUnescapedRspfile(); if (!rspfile.empty()) Remove(rspfile); } void Cleaner::PrintHeader() { if (config_.verbosity == BuildConfig::QUIET) return; printf("Cleaning..."); if (IsVerbose()) printf("\n"); else printf(" "); fflush(stdout); } void Cleaner::PrintFooter() { if (config_.verbosity == BuildConfig::QUIET) return; printf("%d files.\n", cleaned_files_count_); } int Cleaner::CleanAll(bool generator) { Reset(); PrintHeader(); LoadDyndeps(); for (vector::iterator e = state_->edges_.begin(); e != state_->edges_.end(); ++e) { // Do not try to remove phony targets if ((*e)->is_phony()) continue; // Do not remove generator's files unless generator specified. if (!generator && (*e)->GetBindingBool("generator")) continue; for (vector::iterator out_node = (*e)->outputs_.begin(); out_node != (*e)->outputs_.end(); ++out_node) { Remove((*out_node)->path()); } RemoveEdgeFiles(*e); } PrintFooter(); return status_; } int Cleaner::CleanDead(const BuildLog::Entries& entries) { Reset(); PrintHeader(); LoadDyndeps(); for (BuildLog::Entries::const_iterator i = entries.begin(); i != entries.end(); ++i) { Node* n = state_->LookupNode(i->first); // Detecting stale outputs works as follows: // // - If it has no Node, it is not in the build graph, or the deps log // anymore, hence is stale. // // - If it isn't an output or input for any edge, it comes from a stale // entry in the deps log, but no longer referenced from the build // graph. // if (!n || (!n->in_edge() && n->out_edges().empty())) { Remove(i->first.AsString()); } } PrintFooter(); return status_; } void Cleaner::DoCleanTarget(Node* target) { if (Edge* e = target->in_edge()) { // Do not try to remove phony targets if (!e->is_phony()) { Remove(target->path()); RemoveEdgeFiles(e); } for (vector::iterator n = e->inputs_.begin(); n != e->inputs_.end(); ++n) { Node* next = *n; // call DoCleanTarget recursively if this node has not been visited if (cleaned_.count(next) == 0) { DoCleanTarget(next); } } } // mark this target to be cleaned already cleaned_.insert(target); } int Cleaner::CleanTarget(Node* target) { assert(target); Reset(); PrintHeader(); LoadDyndeps(); DoCleanTarget(target); PrintFooter(); return status_; } int Cleaner::CleanTarget(const char* target) { assert(target); Reset(); Node* node = state_->LookupNode(target); if (node) { CleanTarget(node); } else { Error("unknown target '%s'", target); status_ = 1; } return status_; } int Cleaner::CleanTargets(int target_count, char* targets[]) { Reset(); PrintHeader(); LoadDyndeps(); for (int i = 0; i < target_count; ++i) { string target_name = targets[i]; if (target_name.empty()) { Error("failed to canonicalize '': empty path"); status_ = 1; continue; } uint64_t slash_bits; CanonicalizePath(&target_name, &slash_bits); Node* target = state_->LookupNode(target_name); if (target) { if (IsVerbose()) printf("Target %s\n", target_name.c_str()); DoCleanTarget(target); } else { Error("unknown target '%s'", target_name.c_str()); status_ = 1; } } PrintFooter(); return status_; } void Cleaner::DoCleanRule(const Rule* rule) { assert(rule); for (vector::iterator e = state_->edges_.begin(); e != state_->edges_.end(); ++e) { if ((*e)->rule().name() == rule->name()) { for (vector::iterator out_node = (*e)->outputs_.begin(); out_node != (*e)->outputs_.end(); ++out_node) { Remove((*out_node)->path()); RemoveEdgeFiles(*e); } } } } int Cleaner::CleanRule(const Rule* rule) { assert(rule); Reset(); PrintHeader(); LoadDyndeps(); DoCleanRule(rule); PrintFooter(); return status_; } int Cleaner::CleanRule(const char* rule) { assert(rule); Reset(); const Rule* r = state_->bindings_.LookupRule(rule); if (r) { CleanRule(r); } else { Error("unknown rule '%s'", rule); status_ = 1; } return status_; } int Cleaner::CleanRules(int rule_count, char* rules[]) { assert(rules); Reset(); PrintHeader(); LoadDyndeps(); for (int i = 0; i < rule_count; ++i) { const char* rule_name = rules[i]; const Rule* rule = state_->bindings_.LookupRule(rule_name); if (rule) { if (IsVerbose()) printf("Rule %s\n", rule_name); DoCleanRule(rule); } else { Error("unknown rule '%s'", rule_name); status_ = 1; } } PrintFooter(); return status_; } void Cleaner::Reset() { status_ = 0; cleaned_files_count_ = 0; removed_.clear(); cleaned_.clear(); } void Cleaner::LoadDyndeps() { // Load dyndep files that exist, before they are cleaned. for (vector::iterator e = state_->edges_.begin(); e != state_->edges_.end(); ++e) { Node* dyndep; if ((dyndep = (*e)->dyndep_) && dyndep->dyndep_pending()) { // Capture and ignore errors loading the dyndep file. // We clean as much of the graph as we know. std::string err; dyndep_loader_.LoadDyndeps(dyndep, &err); } } } ninja-1.13.2/src/clean.h000066400000000000000000000071071510764045400147320ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef NINJA_CLEAN_H_ #define NINJA_CLEAN_H_ #include #include #include "build.h" #include "dyndep.h" #include "build_log.h" struct State; struct Node; struct Rule; struct DiskInterface; struct Cleaner { /// Build a cleaner object with the given @a disk_interface Cleaner(State* state, const BuildConfig& config, DiskInterface* disk_interface); /// Clean the given @a target and all the file built for it. /// @return non-zero if an error occurs. int CleanTarget(Node* target); /// Clean the given target @a target. /// @return non-zero if an error occurs. int CleanTarget(const char* target); /// Clean the given target @a targets. /// @return non-zero if an error occurs. int CleanTargets(int target_count, char* targets[]); /// Clean all built files, except for files created by generator rules. /// @param generator If set, also clean files created by generator rules. /// @return non-zero if an error occurs. int CleanAll(bool generator = false); /// Clean all the file built with the given rule @a rule. /// @return non-zero if an error occurs. int CleanRule(const Rule* rule); /// Clean the file produced by the given @a rule. /// @return non-zero if an error occurs. int CleanRule(const char* rule); /// Clean the file produced by the given @a rules. /// @return non-zero if an error occurs. int CleanRules(int rule_count, char* rules[]); /// Clean the files produced by previous builds that are no longer in the /// manifest. /// @return non-zero if an error occurs. int CleanDead(const BuildLog::Entries& entries); /// @return the number of file cleaned. int cleaned_files_count() const { return cleaned_files_count_; } /// @return whether the cleaner is in verbose mode. bool IsVerbose() const { return (config_.verbosity != BuildConfig::QUIET && (config_.verbosity == BuildConfig::VERBOSE || config_.dry_run)); } private: /// Remove the file @a path. /// @return whether the file has been removed. int RemoveFile(const std::string& path); /// @returns whether the file @a path exists. bool FileExists(const std::string& path); void Report(const std::string& path); /// Remove the given @a path file only if it has not been already removed. void Remove(const std::string& path); /// @return whether the given @a path has already been removed. bool IsAlreadyRemoved(const std::string& path); /// Remove the depfile and rspfile for an Edge. void RemoveEdgeFiles(Edge* edge); /// Helper recursive method for CleanTarget(). void DoCleanTarget(Node* target); void PrintHeader(); void PrintFooter(); void DoCleanRule(const Rule* rule); void Reset(); /// Load dependencies from dyndep bindings. void LoadDyndeps(); State* state_; const BuildConfig& config_; DyndepLoader dyndep_loader_; std::set removed_; std::set cleaned_; int cleaned_files_count_; DiskInterface* disk_interface_; int status_; }; #endif // NINJA_CLEAN_H_ ninja-1.13.2/src/clean_test.cc000066400000000000000000000415651510764045400161350ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "clean.h" #include "build.h" #include "util.h" #include "test.h" #ifndef _WIN32 #include #endif using namespace std; namespace { const char kTestFilename[] = "CleanTest-tempfile"; struct CleanTest : public StateTestWithBuiltinRules { VirtualFileSystem fs_; BuildConfig config_; virtual void SetUp() { config_.verbosity = BuildConfig::QUIET; } }; TEST_F(CleanTest, CleanAll) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build in1: cat src1\n" "build out1: cat in1\n" "build in2: cat src2\n" "build out2: cat in2\n")); fs_.Create("in1", ""); fs_.Create("out1", ""); fs_.Create("in2", ""); fs_.Create("out2", ""); Cleaner cleaner(&state_, config_, &fs_); ASSERT_EQ(0, cleaner.cleaned_files_count()); EXPECT_EQ(0, cleaner.CleanAll()); EXPECT_EQ(4, cleaner.cleaned_files_count()); EXPECT_EQ(4u, fs_.files_removed_.size()); // Check they are removed. string err; EXPECT_EQ(0, fs_.Stat("in1", &err)); EXPECT_EQ(0, fs_.Stat("out1", &err)); EXPECT_EQ(0, fs_.Stat("in2", &err)); EXPECT_EQ(0, fs_.Stat("out2", &err)); fs_.files_removed_.clear(); EXPECT_EQ(0, cleaner.CleanAll()); EXPECT_EQ(0, cleaner.cleaned_files_count()); EXPECT_EQ(0u, fs_.files_removed_.size()); } TEST_F(CleanTest, CleanAllDryRun) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build in1: cat src1\n" "build out1: cat in1\n" "build in2: cat src2\n" "build out2: cat in2\n")); fs_.Create("in1", ""); fs_.Create("out1", ""); fs_.Create("in2", ""); fs_.Create("out2", ""); config_.dry_run = true; Cleaner cleaner(&state_, config_, &fs_); ASSERT_EQ(0, cleaner.cleaned_files_count()); EXPECT_EQ(0, cleaner.CleanAll()); EXPECT_EQ(4, cleaner.cleaned_files_count()); EXPECT_EQ(0u, fs_.files_removed_.size()); // Check they are not removed. string err; EXPECT_LT(0, fs_.Stat("in1", &err)); EXPECT_LT(0, fs_.Stat("out1", &err)); EXPECT_LT(0, fs_.Stat("in2", &err)); EXPECT_LT(0, fs_.Stat("out2", &err)); fs_.files_removed_.clear(); EXPECT_EQ(0, cleaner.CleanAll()); EXPECT_EQ(4, cleaner.cleaned_files_count()); EXPECT_EQ(0u, fs_.files_removed_.size()); } TEST_F(CleanTest, CleanTarget) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build in1: cat src1\n" "build out1: cat in1\n" "build in2: cat src2\n" "build out2: cat in2\n")); fs_.Create("in1", ""); fs_.Create("out1", ""); fs_.Create("in2", ""); fs_.Create("out2", ""); Cleaner cleaner(&state_, config_, &fs_); ASSERT_EQ(0, cleaner.cleaned_files_count()); ASSERT_EQ(0, cleaner.CleanTarget("out1")); EXPECT_EQ(2, cleaner.cleaned_files_count()); EXPECT_EQ(2u, fs_.files_removed_.size()); // Check they are removed. string err; EXPECT_EQ(0, fs_.Stat("in1", &err)); EXPECT_EQ(0, fs_.Stat("out1", &err)); EXPECT_LT(0, fs_.Stat("in2", &err)); EXPECT_LT(0, fs_.Stat("out2", &err)); fs_.files_removed_.clear(); ASSERT_EQ(0, cleaner.CleanTarget("out1")); EXPECT_EQ(0, cleaner.cleaned_files_count()); EXPECT_EQ(0u, fs_.files_removed_.size()); } TEST_F(CleanTest, CleanTargetDryRun) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build in1: cat src1\n" "build out1: cat in1\n" "build in2: cat src2\n" "build out2: cat in2\n")); fs_.Create("in1", ""); fs_.Create("out1", ""); fs_.Create("in2", ""); fs_.Create("out2", ""); config_.dry_run = true; Cleaner cleaner(&state_, config_, &fs_); ASSERT_EQ(0, cleaner.cleaned_files_count()); ASSERT_EQ(0, cleaner.CleanTarget("out1")); EXPECT_EQ(2, cleaner.cleaned_files_count()); EXPECT_EQ(0u, fs_.files_removed_.size()); // Check they are not removed. string err; EXPECT_LT(0, fs_.Stat("in1", &err)); EXPECT_LT(0, fs_.Stat("out1", &err)); EXPECT_LT(0, fs_.Stat("in2", &err)); EXPECT_LT(0, fs_.Stat("out2", &err)); fs_.files_removed_.clear(); ASSERT_EQ(0, cleaner.CleanTarget("out1")); EXPECT_EQ(2, cleaner.cleaned_files_count()); EXPECT_EQ(0u, fs_.files_removed_.size()); } TEST_F(CleanTest, CleanRule) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule cat_e\n" " command = cat -e $in > $out\n" "build in1: cat_e src1\n" "build out1: cat in1\n" "build in2: cat_e src2\n" "build out2: cat in2\n")); fs_.Create("in1", ""); fs_.Create("out1", ""); fs_.Create("in2", ""); fs_.Create("out2", ""); Cleaner cleaner(&state_, config_, &fs_); ASSERT_EQ(0, cleaner.cleaned_files_count()); ASSERT_EQ(0, cleaner.CleanRule("cat_e")); EXPECT_EQ(2, cleaner.cleaned_files_count()); EXPECT_EQ(2u, fs_.files_removed_.size()); // Check they are removed. string err; EXPECT_EQ(0, fs_.Stat("in1", &err)); EXPECT_LT(0, fs_.Stat("out1", &err)); EXPECT_EQ(0, fs_.Stat("in2", &err)); EXPECT_LT(0, fs_.Stat("out2", &err)); fs_.files_removed_.clear(); ASSERT_EQ(0, cleaner.CleanRule("cat_e")); EXPECT_EQ(0, cleaner.cleaned_files_count()); EXPECT_EQ(0u, fs_.files_removed_.size()); } TEST_F(CleanTest, CleanRuleDryRun) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule cat_e\n" " command = cat -e $in > $out\n" "build in1: cat_e src1\n" "build out1: cat in1\n" "build in2: cat_e src2\n" "build out2: cat in2\n")); fs_.Create("in1", ""); fs_.Create("out1", ""); fs_.Create("in2", ""); fs_.Create("out2", ""); config_.dry_run = true; Cleaner cleaner(&state_, config_, &fs_); ASSERT_EQ(0, cleaner.cleaned_files_count()); ASSERT_EQ(0, cleaner.CleanRule("cat_e")); EXPECT_EQ(2, cleaner.cleaned_files_count()); EXPECT_EQ(0u, fs_.files_removed_.size()); // Check they are not removed. string err; EXPECT_LT(0, fs_.Stat("in1", &err)); EXPECT_LT(0, fs_.Stat("out1", &err)); EXPECT_LT(0, fs_.Stat("in2", &err)); EXPECT_LT(0, fs_.Stat("out2", &err)); fs_.files_removed_.clear(); ASSERT_EQ(0, cleaner.CleanRule("cat_e")); EXPECT_EQ(2, cleaner.cleaned_files_count()); EXPECT_EQ(0u, fs_.files_removed_.size()); } TEST_F(CleanTest, CleanRuleGenerator) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule regen\n" " command = cat $in > $out\n" " generator = 1\n" "build out1: cat in1\n" "build out2: regen in2\n")); fs_.Create("out1", ""); fs_.Create("out2", ""); Cleaner cleaner(&state_, config_, &fs_); EXPECT_EQ(0, cleaner.CleanAll()); EXPECT_EQ(1, cleaner.cleaned_files_count()); EXPECT_EQ(1u, fs_.files_removed_.size()); fs_.Create("out1", ""); EXPECT_EQ(0, cleaner.CleanAll(/*generator=*/true)); EXPECT_EQ(2, cleaner.cleaned_files_count()); EXPECT_EQ(2u, fs_.files_removed_.size()); } TEST_F(CleanTest, CleanDepFile) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule cc\n" " command = cc $in > $out\n" " depfile = $out.d\n" "build out1: cc in1\n")); fs_.Create("out1", ""); fs_.Create("out1.d", ""); Cleaner cleaner(&state_, config_, &fs_); EXPECT_EQ(0, cleaner.CleanAll()); EXPECT_EQ(2, cleaner.cleaned_files_count()); EXPECT_EQ(2u, fs_.files_removed_.size()); } TEST_F(CleanTest, CleanDepFileOnCleanTarget) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule cc\n" " command = cc $in > $out\n" " depfile = $out.d\n" "build out1: cc in1\n")); fs_.Create("out1", ""); fs_.Create("out1.d", ""); Cleaner cleaner(&state_, config_, &fs_); EXPECT_EQ(0, cleaner.CleanTarget("out1")); EXPECT_EQ(2, cleaner.cleaned_files_count()); EXPECT_EQ(2u, fs_.files_removed_.size()); } TEST_F(CleanTest, CleanDepFileOnCleanRule) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule cc\n" " command = cc $in > $out\n" " depfile = $out.d\n" "build out1: cc in1\n")); fs_.Create("out1", ""); fs_.Create("out1.d", ""); Cleaner cleaner(&state_, config_, &fs_); EXPECT_EQ(0, cleaner.CleanRule("cc")); EXPECT_EQ(2, cleaner.cleaned_files_count()); EXPECT_EQ(2u, fs_.files_removed_.size()); } TEST_F(CleanTest, CleanDyndep) { // Verify that a dyndep file can be loaded to discover a new output // to be cleaned. ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build out: cat in || dd\n" " dyndep = dd\n" )); fs_.Create("in", ""); fs_.Create("dd", "ninja_dyndep_version = 1\n" "build out | out.imp: dyndep\n" ); fs_.Create("out", ""); fs_.Create("out.imp", ""); Cleaner cleaner(&state_, config_, &fs_); ASSERT_EQ(0, cleaner.cleaned_files_count()); EXPECT_EQ(0, cleaner.CleanAll()); EXPECT_EQ(2, cleaner.cleaned_files_count()); EXPECT_EQ(2u, fs_.files_removed_.size()); string err; EXPECT_EQ(0, fs_.Stat("out", &err)); EXPECT_EQ(0, fs_.Stat("out.imp", &err)); } TEST_F(CleanTest, CleanDyndepMissing) { // Verify that a missing dyndep file is tolerated. ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build out: cat in || dd\n" " dyndep = dd\n" )); fs_.Create("in", ""); fs_.Create("out", ""); fs_.Create("out.imp", ""); Cleaner cleaner(&state_, config_, &fs_); ASSERT_EQ(0, cleaner.cleaned_files_count()); EXPECT_EQ(0, cleaner.CleanAll()); EXPECT_EQ(1, cleaner.cleaned_files_count()); EXPECT_EQ(1u, fs_.files_removed_.size()); string err; EXPECT_EQ(0, fs_.Stat("out", &err)); EXPECT_EQ(1, fs_.Stat("out.imp", &err)); } TEST_F(CleanTest, CleanRspFile) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule cc\n" " command = cc $in > $out\n" " rspfile = $rspfile\n" " rspfile_content=$in\n" "build out1: cc in1\n" " rspfile = cc1.rsp\n")); fs_.Create("out1", ""); fs_.Create("cc1.rsp", ""); Cleaner cleaner(&state_, config_, &fs_); EXPECT_EQ(0, cleaner.CleanAll()); EXPECT_EQ(2, cleaner.cleaned_files_count()); EXPECT_EQ(2u, fs_.files_removed_.size()); } TEST_F(CleanTest, CleanRsp) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule cat_rsp \n" " command = cat $rspfile > $out\n" " rspfile = $rspfile\n" " rspfile_content = $in\n" "build in1: cat src1\n" "build out1: cat in1\n" "build in2: cat_rsp src2\n" " rspfile=in2.rsp\n" "build out2: cat_rsp in2\n" " rspfile=out2.rsp\n" )); fs_.Create("in1", ""); fs_.Create("out1", ""); fs_.Create("in2.rsp", ""); fs_.Create("out2.rsp", ""); fs_.Create("in2", ""); fs_.Create("out2", ""); Cleaner cleaner(&state_, config_, &fs_); ASSERT_EQ(0, cleaner.cleaned_files_count()); ASSERT_EQ(0, cleaner.CleanTarget("out1")); EXPECT_EQ(2, cleaner.cleaned_files_count()); ASSERT_EQ(0, cleaner.CleanTarget("in2")); EXPECT_EQ(2, cleaner.cleaned_files_count()); ASSERT_EQ(0, cleaner.CleanRule("cat_rsp")); EXPECT_EQ(2, cleaner.cleaned_files_count()); EXPECT_EQ(6u, fs_.files_removed_.size()); // Check they are removed. string err; EXPECT_EQ(0, fs_.Stat("in1", &err)); EXPECT_EQ(0, fs_.Stat("out1", &err)); EXPECT_EQ(0, fs_.Stat("in2", &err)); EXPECT_EQ(0, fs_.Stat("out2", &err)); EXPECT_EQ(0, fs_.Stat("in2.rsp", &err)); EXPECT_EQ(0, fs_.Stat("out2.rsp", &err)); } TEST_F(CleanTest, CleanFailure) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build dir: cat src1\n")); fs_.MakeDir("dir"); Cleaner cleaner(&state_, config_, &fs_); EXPECT_NE(0, cleaner.CleanAll()); } TEST_F(CleanTest, CleanPhony) { string err; ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build phony: phony t1 t2\n" "build t1: cat\n" "build t2: cat\n")); fs_.Create("phony", ""); fs_.Create("t1", ""); fs_.Create("t2", ""); // Check that CleanAll does not remove "phony". Cleaner cleaner(&state_, config_, &fs_); EXPECT_EQ(0, cleaner.CleanAll()); EXPECT_EQ(2, cleaner.cleaned_files_count()); EXPECT_LT(0, fs_.Stat("phony", &err)); fs_.Create("t1", ""); fs_.Create("t2", ""); // Check that CleanTarget does not remove "phony". EXPECT_EQ(0, cleaner.CleanTarget("phony")); EXPECT_EQ(2, cleaner.cleaned_files_count()); EXPECT_LT(0, fs_.Stat("phony", &err)); } TEST_F(CleanTest, CleanDepFileAndRspFileWithSpaces) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule cc_dep\n" " command = cc $in > $out\n" " depfile = $out.d\n" "rule cc_rsp\n" " command = cc $in > $out\n" " rspfile = $out.rsp\n" " rspfile_content = $in\n" "build out$ 1: cc_dep in$ 1\n" "build out$ 2: cc_rsp in$ 1\n" )); fs_.Create("out 1", ""); fs_.Create("out 2", ""); fs_.Create("out 1.d", ""); fs_.Create("out 2.rsp", ""); Cleaner cleaner(&state_, config_, &fs_); EXPECT_EQ(0, cleaner.CleanAll()); EXPECT_EQ(4, cleaner.cleaned_files_count()); EXPECT_EQ(4u, fs_.files_removed_.size()); string err; EXPECT_EQ(0, fs_.Stat("out 1", &err)); EXPECT_EQ(0, fs_.Stat("out 2", &err)); EXPECT_EQ(0, fs_.Stat("out 1.d", &err)); EXPECT_EQ(0, fs_.Stat("out 2.rsp", &err)); } struct CleanDeadTest : public CleanTest, public BuildLogUser{ virtual void SetUp() { // In case a crashing test left a stale file behind. platformAwareUnlink(kTestFilename); CleanTest::SetUp(); } virtual void TearDown() { platformAwareUnlink(kTestFilename); } virtual bool IsPathDead(StringPiece) const { return false; } }; TEST_F(CleanDeadTest, CleanDead) { State state; ASSERT_NO_FATAL_FAILURE(AssertParse(&state, "rule cat\n" " command = cat $in > $out\n" "build out1: cat in\n" "build out2: cat in\n" )); ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build out2: cat in\n" )); fs_.Create("in", ""); fs_.Create("out1", ""); fs_.Create("out2", ""); BuildLog log1; string err; EXPECT_TRUE(log1.OpenForWrite(kTestFilename, *this, &err)); ASSERT_EQ("", err); log1.RecordCommand(state.edges_[0], 15, 18); log1.RecordCommand(state.edges_[1], 20, 25); log1.Close(); BuildLog log2; EXPECT_TRUE(log2.Load(kTestFilename, &err)); ASSERT_EQ("", err); ASSERT_EQ(2u, log2.entries().size()); ASSERT_TRUE(log2.LookupByOutput("out1")); ASSERT_TRUE(log2.LookupByOutput("out2")); // First use the manifest that describe how to build out1. Cleaner cleaner1(&state, config_, &fs_); EXPECT_EQ(0, cleaner1.CleanDead(log2.entries())); EXPECT_EQ(0, cleaner1.cleaned_files_count()); EXPECT_EQ(0u, fs_.files_removed_.size()); EXPECT_NE(0, fs_.Stat("in", &err)); EXPECT_NE(0, fs_.Stat("out1", &err)); EXPECT_NE(0, fs_.Stat("out2", &err)); // Then use the manifest that does not build out1 anymore. Cleaner cleaner2(&state_, config_, &fs_); EXPECT_EQ(0, cleaner2.CleanDead(log2.entries())); EXPECT_EQ(1, cleaner2.cleaned_files_count()); EXPECT_EQ(1u, fs_.files_removed_.size()); EXPECT_EQ("out1", *(fs_.files_removed_.begin())); EXPECT_NE(0, fs_.Stat("in", &err)); EXPECT_EQ(0, fs_.Stat("out1", &err)); EXPECT_NE(0, fs_.Stat("out2", &err)); // Nothing to do now. EXPECT_EQ(0, cleaner2.CleanDead(log2.entries())); EXPECT_EQ(0, cleaner2.cleaned_files_count()); EXPECT_EQ(1u, fs_.files_removed_.size()); EXPECT_EQ("out1", *(fs_.files_removed_.begin())); EXPECT_NE(0, fs_.Stat("in", &err)); EXPECT_EQ(0, fs_.Stat("out1", &err)); EXPECT_NE(0, fs_.Stat("out2", &err)); log2.Close(); } TEST_F(CleanDeadTest, CleanDeadPreservesInputs) { State state; ASSERT_NO_FATAL_FAILURE(AssertParse(&state, "rule cat\n" " command = cat $in > $out\n" "build out1: cat in\n" "build out2: cat in\n" )); // This manifest does not build out1 anymore, but makes // it an implicit input. CleanDead should detect this // and preserve it. ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build out2: cat in | out1\n" )); fs_.Create("in", ""); fs_.Create("out1", ""); fs_.Create("out2", ""); BuildLog log1; string err; EXPECT_TRUE(log1.OpenForWrite(kTestFilename, *this, &err)); ASSERT_EQ("", err); log1.RecordCommand(state.edges_[0], 15, 18); log1.RecordCommand(state.edges_[1], 20, 25); log1.Close(); BuildLog log2; EXPECT_TRUE(log2.Load(kTestFilename, &err)); ASSERT_EQ("", err); ASSERT_EQ(2u, log2.entries().size()); ASSERT_TRUE(log2.LookupByOutput("out1")); ASSERT_TRUE(log2.LookupByOutput("out2")); // First use the manifest that describe how to build out1. Cleaner cleaner1(&state, config_, &fs_); EXPECT_EQ(0, cleaner1.CleanDead(log2.entries())); EXPECT_EQ(0, cleaner1.cleaned_files_count()); EXPECT_EQ(0u, fs_.files_removed_.size()); EXPECT_NE(0, fs_.Stat("in", &err)); EXPECT_NE(0, fs_.Stat("out1", &err)); EXPECT_NE(0, fs_.Stat("out2", &err)); // Then use the manifest that does not build out1 anymore. Cleaner cleaner2(&state_, config_, &fs_); EXPECT_EQ(0, cleaner2.CleanDead(log2.entries())); EXPECT_EQ(0, cleaner2.cleaned_files_count()); EXPECT_EQ(0u, fs_.files_removed_.size()); EXPECT_NE(0, fs_.Stat("in", &err)); EXPECT_NE(0, fs_.Stat("out1", &err)); EXPECT_NE(0, fs_.Stat("out2", &err)); // Nothing to do now. EXPECT_EQ(0, cleaner2.CleanDead(log2.entries())); EXPECT_EQ(0, cleaner2.cleaned_files_count()); EXPECT_EQ(0u, fs_.files_removed_.size()); EXPECT_NE(0, fs_.Stat("in", &err)); EXPECT_NE(0, fs_.Stat("out1", &err)); EXPECT_NE(0, fs_.Stat("out2", &err)); log2.Close(); } } // anonymous namespace ninja-1.13.2/src/clparser.cc000066400000000000000000000074711510764045400156250ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "clparser.h" #include #include #include #include "metrics.h" #include "string_piece_util.h" #ifdef _WIN32 #include "includes_normalize.h" #include "string_piece.h" #else #include "util.h" #endif using namespace std; namespace { /// Return true if \a input ends with \a needle. bool EndsWith(const string& input, const string& needle) { return (input.size() >= needle.size() && input.substr(input.size() - needle.size()) == needle); } } // anonymous namespace // static string CLParser::FilterShowIncludes(const string& line, const string& deps_prefix) { const string kDepsPrefixEnglish = "Note: including file: "; const char* in = line.c_str(); const char* end = in + line.size(); const string& prefix = deps_prefix.empty() ? kDepsPrefixEnglish : deps_prefix; if (end - in > (int)prefix.size() && memcmp(in, prefix.c_str(), (int)prefix.size()) == 0) { in += prefix.size(); while (*in == ' ') ++in; return line.substr(in - line.c_str()); } return ""; } // static bool CLParser::IsSystemInclude(string path) { transform(path.begin(), path.end(), path.begin(), ToLowerASCII); // TODO: this is a heuristic, perhaps there's a better way? return (path.find("program files") != string::npos || path.find("microsoft visual studio") != string::npos); } // static bool CLParser::FilterInputFilename(string line) { transform(line.begin(), line.end(), line.begin(), ToLowerASCII); // TODO: other extensions, like .asm? return EndsWith(line, ".c") || EndsWith(line, ".cc") || EndsWith(line, ".cxx") || EndsWith(line, ".cpp") || EndsWith(line, ".c++"); } // static bool CLParser::Parse(const string& output, const string& deps_prefix, string* filtered_output, string* err) { METRIC_RECORD("CLParser::Parse"); // Loop over all lines in the output to process them. assert(&output != filtered_output); size_t start = 0; bool seen_show_includes = false; #ifdef _WIN32 IncludesNormalize normalizer("."); #endif while (start < output.size()) { size_t end = output.find_first_of("\r\n", start); if (end == string::npos) end = output.size(); string line = output.substr(start, end - start); string include = FilterShowIncludes(line, deps_prefix); if (!include.empty()) { seen_show_includes = true; string normalized; #ifdef _WIN32 if (!normalizer.Normalize(include, &normalized, err)) return false; #else // TODO: should this make the path relative to cwd? normalized = include; uint64_t slash_bits; CanonicalizePath(&normalized, &slash_bits); #endif if (!IsSystemInclude(normalized)) includes_.insert(normalized); } else if (!seen_show_includes && FilterInputFilename(line)) { // Drop it. // TODO: if we support compiling multiple output files in a single // cl.exe invocation, we should stash the filename. } else { filtered_output->append(line); filtered_output->append("\n"); } if (end < output.size() && output[end] == '\r') ++end; if (end < output.size() && output[end] == '\n') ++end; start = end; } return true; } ninja-1.13.2/src/clparser.h000066400000000000000000000040641510764045400154620ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef NINJA_CLPARSER_H_ #define NINJA_CLPARSER_H_ #include #include /// Visual Studio's cl.exe requires some massaging to work with Ninja; /// for example, it emits include information on stderr in a funny /// format when building with /showIncludes. This class parses this /// output. struct CLParser { /// Parse a line of cl.exe output and extract /showIncludes info. /// If a dependency is extracted, returns a nonempty string. /// Exposed for testing. static std::string FilterShowIncludes(const std::string& line, const std::string& deps_prefix); /// Return true if a mentioned include file is a system path. /// Filtering these out reduces dependency information considerably. static bool IsSystemInclude(std::string path); /// Parse a line of cl.exe output and return true if it looks like /// it's printing an input filename. This is a heuristic but it appears /// to be the best we can do. /// Exposed for testing. static bool FilterInputFilename(std::string line); /// Parse the full output of cl, filling filtered_output with the text that /// should be printed (if any). Returns true on success, or false with err /// filled. output must not be the same object as filtered_object. bool Parse(const std::string& output, const std::string& deps_prefix, std::string* filtered_output, std::string* err); std::set includes_; }; #endif // NINJA_CLPARSER_H_ ninja-1.13.2/src/clparser_perftest.cc000066400000000000000000000352421510764045400175360ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include #include #include "clparser.h" #include "metrics.h" using namespace std; int main(int argc, char* argv[]) { // Output of /showIncludes from #include string perf_testdata = "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\iostream\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\istream\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\ostream\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\ios\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\xlocnum\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\climits\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\yvals.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\xkeycheck.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\crtdefs.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vcruntime.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\sal.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\ConcurrencySal.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vadefs.h\r\n" "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vcruntime.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\use_ansi.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\limits.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vcruntime.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\cmath\r\n" "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\math.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\xtgmath.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\xtr1common\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\cstdlib\r\n" "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\stdlib.h\r\n" "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_malloc.h\r\n" "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_search.h\r\n" "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\stddef.h\r\n" "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_wstdlib.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\cstdio\r\n" "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\stdio.h\r\n" "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_wstdio.h\r\n" "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_stdio_config.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\streambuf\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\xiosbase\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\xlocale\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\cstring\r\n" "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\string.h\r\n" "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_memory.h\r\n" "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_memcpy_s.h\r\n" "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\errno.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vcruntime_string.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vcruntime.h\r\n" "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_wstring.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\stdexcept\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\exception\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\type_traits\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\xstddef\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\cstddef\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\initializer_list\r\n" "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\malloc.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vcruntime_exception.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\eh.h\r\n" "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_terminate.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\xstring\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\xmemory0\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\cstdint\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\stdint.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vcruntime.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\limits\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\ymath.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\cfloat\r\n" "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\float.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\cwchar\r\n" "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\wchar.h\r\n" "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_wconio.h\r\n" "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_wctype.h\r\n" "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_wdirect.h\r\n" "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_wio.h\r\n" "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_share.h\r\n" "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_wprocess.h\r\n" "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_wtime.h\r\n" "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\sys/stat.h\r\n" "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\sys/types.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\new\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vcruntime_new.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vcruntime.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\xutility\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\utility\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\iosfwd\r\n" "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\crtdbg.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vcruntime_new_debug.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\xatomic0.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\intrin.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vcruntime.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\setjmp.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vcruntime.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\immintrin.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\wmmintrin.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\nmmintrin.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\smmintrin.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\tmmintrin.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\pmmintrin.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\emmintrin.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\xmmintrin.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\mmintrin.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\ammintrin.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\mm3dnow.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vcruntime.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\typeinfo\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vcruntime_typeinfo.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vcruntime.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\xlocinfo\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\xlocinfo.h\r\n" "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\ctype.h\r\n" "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\locale.h\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\xfacet\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\system_error\r\n" "Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\cerrno\r\n" "Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\share.h\r\n"; for (int limit = 1 << 10; limit < (1<<20); limit *= 2) { int64_t start = GetTimeMillis(); for (int rep = 0; rep < limit; ++rep) { string output; string err; CLParser parser; if (!parser.Parse(perf_testdata, "", &output, &err)) { printf("%s\n", err.c_str()); return 1; } } int64_t end = GetTimeMillis(); if (end - start > 2000) { int delta_ms = (int)(end - start); printf("Parse %d times in %dms avg %.1fus\n", limit, delta_ms, float(delta_ms * 1000) / limit); break; } } return 0; } ninja-1.13.2/src/clparser_test.cc000066400000000000000000000102731510764045400166560ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "clparser.h" #include "test.h" #include "util.h" using namespace std; TEST(CLParserTest, ShowIncludes) { ASSERT_EQ("", CLParser::FilterShowIncludes("", "")); ASSERT_EQ("", CLParser::FilterShowIncludes("Sample compiler output", "")); ASSERT_EQ("c:\\Some Files\\foobar.h", CLParser::FilterShowIncludes("Note: including file: " "c:\\Some Files\\foobar.h", "")); ASSERT_EQ("c:\\initspaces.h", CLParser::FilterShowIncludes("Note: including file: " "c:\\initspaces.h", "")); ASSERT_EQ("c:\\initspaces.h", CLParser::FilterShowIncludes("Non-default prefix: inc file: " "c:\\initspaces.h", "Non-default prefix: inc file:")); } TEST(CLParserTest, FilterInputFilename) { ASSERT_TRUE(CLParser::FilterInputFilename("foobar.cc")); ASSERT_TRUE(CLParser::FilterInputFilename("foo bar.cc")); ASSERT_TRUE(CLParser::FilterInputFilename("baz.c")); ASSERT_TRUE(CLParser::FilterInputFilename("FOOBAR.CC")); ASSERT_FALSE(CLParser::FilterInputFilename( "src\\cl_helper.cc(166) : fatal error C1075: end " "of file found ...")); } TEST(CLParserTest, ParseSimple) { CLParser parser; string output, err; ASSERT_TRUE(parser.Parse( "foo\r\n" "Note: inc file prefix: foo.h\r\n" "bar\r\n", "Note: inc file prefix:", &output, &err)); ASSERT_EQ("foo\nbar\n", output); ASSERT_EQ(1u, parser.includes_.size()); ASSERT_EQ("foo.h", *parser.includes_.begin()); } TEST(CLParserTest, ParseFilenameFilter) { CLParser parser; string output, err; ASSERT_TRUE(parser.Parse( "foo.cc\r\n" "cl: warning\r\n", "", &output, &err)); ASSERT_EQ("cl: warning\n", output); } TEST(CLParserTest, NoFilenameFilterAfterShowIncludes) { CLParser parser; string output, err; ASSERT_TRUE(parser.Parse( "foo.cc\r\n" "Note: including file: foo.h\r\n" "something something foo.cc\r\n", "", &output, &err)); ASSERT_EQ("something something foo.cc\n", output); } TEST(CLParserTest, ParseSystemInclude) { CLParser parser; string output, err; ASSERT_TRUE(parser.Parse( "Note: including file: c:\\Program Files\\foo.h\r\n" "Note: including file: d:\\Microsoft Visual Studio\\bar.h\r\n" "Note: including file: path.h\r\n", "", &output, &err)); // We should have dropped the first two includes because they look like // system headers. ASSERT_EQ("", output); ASSERT_EQ(1u, parser.includes_.size()); ASSERT_EQ("path.h", *parser.includes_.begin()); } TEST(CLParserTest, DuplicatedHeader) { CLParser parser; string output, err; ASSERT_TRUE(parser.Parse( "Note: including file: foo.h\r\n" "Note: including file: bar.h\r\n" "Note: including file: foo.h\r\n", "", &output, &err)); // We should have dropped one copy of foo.h. ASSERT_EQ("", output); ASSERT_EQ(2u, parser.includes_.size()); } TEST(CLParserTest, DuplicatedHeaderPathConverted) { CLParser parser; string output, err; // This isn't inline in the Parse() call below because the #ifdef in // a macro expansion would confuse MSVC2013's preprocessor. const char kInput[] = "Note: including file: sub/./foo.h\r\n" "Note: including file: bar.h\r\n" #ifdef _WIN32 "Note: including file: sub\\foo.h\r\n"; #else "Note: including file: sub/foo.h\r\n"; #endif ASSERT_TRUE(parser.Parse(kInput, "", &output, &err)); // We should have dropped one copy of foo.h. ASSERT_EQ("", output); ASSERT_EQ(2u, parser.includes_.size()); } ninja-1.13.2/src/command_collector.h000066400000000000000000000040171510764045400173310ustar00rootroot00000000000000// Copyright 2024 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef NINJA_COMMAND_COLLECTOR_H_ #define NINJA_COMMAND_COLLECTOR_H_ #include #include #include #include "graph.h" /// Collects the transitive set of edges that lead into a given set /// of starting nodes. Used to implement the `compdb-targets` tool. /// /// When collecting inputs, the outputs of phony edges are always ignored /// from the result, but are followed by the dependency walk. /// /// Usage is: /// - Create instance. /// - Call CollectFrom() for each root node to collect edges from. /// - Call TakeResult() to retrieve the list of edges. /// struct CommandCollector { void CollectFrom(const Node* node) { assert(node); if (!visited_nodes_.insert(node).second) return; Edge* edge = node->in_edge(); if (!edge || !visited_edges_.insert(edge).second) return; for (Node* input_node : edge->inputs_) CollectFrom(input_node); if (!edge->is_phony()) in_edges.push_back(edge); } private: std::unordered_set visited_nodes_; std::unordered_set visited_edges_; /// we use a vector to preserve order from requisites to their dependents. /// This may help LSP server performance in languages that support modules, /// but it also ensures that the output of `-t compdb-targets foo` is /// consistent, which is useful in regression tests. public: std::vector in_edges; }; #endif // NINJA_COMMAND_COLLECTOR_H_ ninja-1.13.2/src/debug_flags.cc000066400000000000000000000014671510764045400162530ustar00rootroot00000000000000// Copyright 2012 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include #include #include #include #include "graph.h" bool g_explaining = false; bool g_keep_depfile = false; bool g_keep_rsp = false; bool g_experimental_statcache = true; ninja-1.13.2/src/debug_flags.h000066400000000000000000000015271510764045400161120ustar00rootroot00000000000000// Copyright 2012 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef NINJA_EXPLAIN_H_ #define NINJA_EXPLAIN_H_ #include struct Edge; struct Node; extern bool g_explaining; extern bool g_keep_depfile; extern bool g_keep_rsp; extern bool g_experimental_statcache; #endif // NINJA_EXPLAIN_H_ ninja-1.13.2/src/depfile_parser.cc000066400000000000000000000252051510764045400167710ustar00rootroot00000000000000/* Generated by re2c */ // Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "depfile_parser.h" #include "util.h" #include using namespace std; DepfileParser::DepfileParser(DepfileParserOptions options) : options_(options) { } // A note on backslashes in Makefiles, from reading the docs: // Backslash-newline is the line continuation character. // Backslash-# escapes a # (otherwise meaningful as a comment start). // Backslash-% escapes a % (otherwise meaningful as a special). // Finally, quoting the GNU manual, "Backslashes that are not in danger // of quoting ‘%’ characters go unmolested." // How do you end a line with a backslash? The netbsd Make docs suggest // reading the result of a shell command echoing a backslash! // // Rather than implement all of above, we follow what GCC/Clang produces: // Backslashes escape a space or hash sign. // When a space is preceded by 2N+1 backslashes, it is represents N backslashes // followed by space. // When a space is preceded by 2N backslashes, it represents 2N backslashes at // the end of a filename. // A hash sign is escaped by a single backslash. All other backslashes remain // unchanged. // // If anyone actually has depfiles that rely on the more complicated // behavior we can adjust this. bool DepfileParser::Parse(string* content, string* err) { // in: current parser input point. // end: end of input. // parsing_targets: whether we are parsing targets or dependencies. char* in = &(*content)[0]; char* end = in + content->size(); bool have_target = false; bool parsing_targets = true; bool poisoned_input = false; bool is_empty = true; while (in < end) { bool have_newline = false; // out: current output point (typically same as in, but can fall behind // as we de-escape backslashes). char* out = in; // filename: start of the current parsed filename. char* filename = out; for (;;) { // start: beginning of the current parsed span. const char* start = in; char* yymarker = NULL; { unsigned char yych; static const unsigned char yybm[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 128, 0, 0, 128, 128, 128, 128, 128, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 0, 0, 128, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 0, 128, 0, 128, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 0, 128, 128, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, }; yych = *in; if (yybm[0+yych] & 128) { goto yy5; } if (yych <= '\r') { if (yych <= '\t') { if (yych >= 0x01) goto yy1; } else { if (yych <= '\n') goto yy3; if (yych <= '\f') goto yy1; goto yy4; } } else { if (yych <= '$') { if (yych <= '#') goto yy1; goto yy7; } else { if (yych <= '>') goto yy1; if (yych <= '\\') goto yy8; goto yy1; } } ++in; { break; } yy1: ++in; yy2: { // For any other character (e.g. whitespace), swallow it here, // allowing the outer logic to loop around again. break; } yy3: ++in; { // A newline ends the current file name and the current rule. have_newline = true; break; } yy4: yych = *++in; if (yych == '\n') goto yy3; goto yy2; yy5: yych = *++in; if (yybm[0+yych] & 128) { goto yy5; } yy6: { // Got a span of plain text. int len = (int)(in - start); // Need to shift it over if we're overwriting backslashes. if (out < start) memmove(out, start, len); out += len; continue; } yy7: yych = *++in; if (yych == '$') goto yy9; goto yy2; yy8: yych = *(yymarker = ++in); if (yych <= ' ') { if (yych <= '\n') { if (yych <= 0x00) goto yy2; if (yych <= '\t') goto yy10; goto yy11; } else { if (yych == '\r') goto yy12; if (yych <= 0x1F) goto yy10; goto yy13; } } else { if (yych <= '9') { if (yych == '#') goto yy14; goto yy10; } else { if (yych <= ':') goto yy15; if (yych == '\\') goto yy17; goto yy10; } } yy9: ++in; { // De-escape dollar character. *out++ = '$'; continue; } yy10: ++in; goto yy6; yy11: ++in; { // A line continuation ends the current file name. break; } yy12: yych = *++in; if (yych == '\n') goto yy11; in = yymarker; goto yy2; yy13: ++in; { // 2N+1 backslashes plus space -> N backslashes plus space. int len = (int)(in - start); int n = len / 2 - 1; if (out < start) memset(out, '\\', n); out += n; *out++ = ' '; continue; } yy14: ++in; { // De-escape hash sign, but preserve other leading backslashes. int len = (int)(in - start); if (len > 2 && out < start) memset(out, '\\', len - 2); out += len - 2; *out++ = '#'; continue; } yy15: yych = *++in; if (yych <= '\f') { if (yych <= 0x00) goto yy18; if (yych <= 0x08) goto yy16; if (yych <= '\n') goto yy18; } else { if (yych <= '\r') goto yy18; if (yych == ' ') goto yy18; } yy16: { // De-escape colon sign, but preserve other leading backslashes. // Regular expression uses lookahead to make sure that no whitespace // nor EOF follows. In that case it'd be the : at the end of a target int len = (int)(in - start); if (len > 2 && out < start) memset(out, '\\', len - 2); out += len - 2; *out++ = ':'; continue; } yy17: yych = *++in; if (yych <= ' ') { if (yych <= '\n') { if (yych <= 0x00) goto yy6; if (yych <= '\t') goto yy10; goto yy6; } else { if (yych == '\r') goto yy6; if (yych <= 0x1F) goto yy10; goto yy19; } } else { if (yych <= '9') { if (yych == '#') goto yy14; goto yy10; } else { if (yych <= ':') goto yy15; if (yych == '\\') goto yy20; goto yy10; } } yy18: ++in; { // Backslash followed by : and whitespace. // It is therefore normal text and not an escaped colon int len = (int)(in - start - 1); // Need to shift it over if we're overwriting backslashes. if (out < start) memmove(out, start, len); out += len; if (*(in - 1) == '\n') have_newline = true; break; } yy19: ++in; { // 2N backslashes plus space -> 2N backslashes, end of filename. int len = (int)(in - start); if (out < start) memset(out, '\\', len - 1); out += len - 1; break; } yy20: yych = *++in; if (yych <= ' ') { if (yych <= '\n') { if (yych <= 0x00) goto yy6; if (yych <= '\t') goto yy10; goto yy6; } else { if (yych == '\r') goto yy6; if (yych <= 0x1F) goto yy10; goto yy13; } } else { if (yych <= '9') { if (yych == '#') goto yy14; goto yy10; } else { if (yych <= ':') goto yy15; if (yych == '\\') goto yy17; goto yy10; } } } } int len = (int)(out - filename); const bool is_dependency = !parsing_targets; if (len > 0 && filename[len - 1] == ':') { len--; // Strip off trailing colon, if any. parsing_targets = false; have_target = true; } if (len > 0) { is_empty = false; StringPiece piece = StringPiece(filename, len); // If we've seen this as an input before, skip it. std::vector::iterator pos = std::find(ins_.begin(), ins_.end(), piece); if (pos == ins_.end()) { if (is_dependency) { if (poisoned_input) { *err = "inputs may not also have inputs"; return false; } // New input. ins_.push_back(piece); } else { // Check for a new output. if (std::find(outs_.begin(), outs_.end(), piece) == outs_.end()) outs_.push_back(piece); } } else if (!is_dependency) { // We've passed an input on the left side; reject new inputs. poisoned_input = true; } } if (have_newline) { // A newline ends a rule so the next filename will be a new target. parsing_targets = true; poisoned_input = false; } } if (!have_target && !is_empty) { *err = "expected ':' in depfile"; return false; } return true; } ninja-1.13.2/src/depfile_parser.h000066400000000000000000000025101510764045400166250ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef NINJA_DEPFILE_PARSER_H_ #define NINJA_DEPFILE_PARSER_H_ #include #include #include "string_piece.h" struct DepfileParserOptions { DepfileParserOptions() {} }; /// Parser for the dependency information emitted by gcc's -M flags. struct DepfileParser { explicit DepfileParser(DepfileParserOptions options = DepfileParserOptions()); /// Parse an input file. Input must be NUL-terminated. /// Warning: may mutate the content in-place and parsed StringPieces are /// pointers within it. bool Parse(std::string* content, std::string* err); std::vector outs_; std::vector ins_; DepfileParserOptions options_; }; #endif // NINJA_DEPFILE_PARSER_H_ ninja-1.13.2/src/depfile_parser.in.cc000066400000000000000000000151421510764045400173750ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "depfile_parser.h" #include "util.h" #include using namespace std; DepfileParser::DepfileParser(DepfileParserOptions options) : options_(options) { } // A note on backslashes in Makefiles, from reading the docs: // Backslash-newline is the line continuation character. // Backslash-# escapes a # (otherwise meaningful as a comment start). // Backslash-% escapes a % (otherwise meaningful as a special). // Finally, quoting the GNU manual, "Backslashes that are not in danger // of quoting ‘%’ characters go unmolested." // How do you end a line with a backslash? The netbsd Make docs suggest // reading the result of a shell command echoing a backslash! // // Rather than implement all of above, we follow what GCC/Clang produces: // Backslashes escape a space or hash sign. // When a space is preceded by 2N+1 backslashes, it is represents N backslashes // followed by space. // When a space is preceded by 2N backslashes, it represents 2N backslashes at // the end of a filename. // A hash sign is escaped by a single backslash. All other backslashes remain // unchanged. // // If anyone actually has depfiles that rely on the more complicated // behavior we can adjust this. bool DepfileParser::Parse(string* content, string* err) { // in: current parser input point. // end: end of input. // parsing_targets: whether we are parsing targets or dependencies. char* in = &(*content)[0]; char* end = in + content->size(); bool have_target = false; bool parsing_targets = true; bool poisoned_input = false; bool is_empty = true; while (in < end) { bool have_newline = false; // out: current output point (typically same as in, but can fall behind // as we de-escape backslashes). char* out = in; // filename: start of the current parsed filename. char* filename = out; for (;;) { // start: beginning of the current parsed span. const char* start = in; char* yymarker = NULL; /*!re2c re2c:define:YYCTYPE = "unsigned char"; re2c:define:YYCURSOR = in; re2c:define:YYLIMIT = end; re2c:define:YYMARKER = yymarker; re2c:yyfill:enable = 0; re2c:indent:top = 2; re2c:indent:string = " "; nul = "\000"; newline = '\r'?'\n'; '\\\\'* '\\ ' { // 2N+1 backslashes plus space -> N backslashes plus space. int len = (int)(in - start); int n = len / 2 - 1; if (out < start) memset(out, '\\', n); out += n; *out++ = ' '; continue; } '\\\\'+ ' ' { // 2N backslashes plus space -> 2N backslashes, end of filename. int len = (int)(in - start); if (out < start) memset(out, '\\', len - 1); out += len - 1; break; } '\\'+ '#' { // De-escape hash sign, but preserve other leading backslashes. int len = (int)(in - start); if (len > 2 && out < start) memset(out, '\\', len - 2); out += len - 2; *out++ = '#'; continue; } '\\'+ ':' [\x00\x20\r\n\t] { // Backslash followed by : and whitespace. // It is therefore normal text and not an escaped colon int len = (int)(in - start - 1); // Need to shift it over if we're overwriting backslashes. if (out < start) memmove(out, start, len); out += len; if (*(in - 1) == '\n') have_newline = true; break; } '\\'+ ':' { // De-escape colon sign, but preserve other leading backslashes. // Regular expression uses lookahead to make sure that no whitespace // nor EOF follows. In that case it'd be the : at the end of a target int len = (int)(in - start); if (len > 2 && out < start) memset(out, '\\', len - 2); out += len - 2; *out++ = ':'; continue; } '$$' { // De-escape dollar character. *out++ = '$'; continue; } '\\'+ [^\000\r\n] | [a-zA-Z0-9+?"'&,/_:.~()}{%=@\x5B\x5D!\x80-\xFF-]+ { // Got a span of plain text. int len = (int)(in - start); // Need to shift it over if we're overwriting backslashes. if (out < start) memmove(out, start, len); out += len; continue; } nul { break; } '\\' newline { // A line continuation ends the current file name. break; } newline { // A newline ends the current file name and the current rule. have_newline = true; break; } [^] { // For any other character (e.g. whitespace), swallow it here, // allowing the outer logic to loop around again. break; } */ } int len = (int)(out - filename); const bool is_dependency = !parsing_targets; if (len > 0 && filename[len - 1] == ':') { len--; // Strip off trailing colon, if any. parsing_targets = false; have_target = true; } if (len > 0) { is_empty = false; StringPiece piece = StringPiece(filename, len); // If we've seen this as an input before, skip it. std::vector::iterator pos = std::find(ins_.begin(), ins_.end(), piece); if (pos == ins_.end()) { if (is_dependency) { if (poisoned_input) { *err = "inputs may not also have inputs"; return false; } // New input. ins_.push_back(piece); } else { // Check for a new output. if (std::find(outs_.begin(), outs_.end(), piece) == outs_.end()) outs_.push_back(piece); } } else if (!is_dependency) { // We've passed an input on the left side; reject new inputs. poisoned_input = true; } } if (have_newline) { // A newline ends a rule so the next filename will be a new target. parsing_targets = true; poisoned_input = false; } } if (!have_target && !is_empty) { *err = "expected ':' in depfile"; return false; } return true; } ninja-1.13.2/src/depfile_parser_perftest.cc000066400000000000000000000040761510764045400207100ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include #include #include "depfile_parser.h" #include "util.h" #include "metrics.h" using namespace std; int main(int argc, char* argv[]) { if (argc < 2) { printf("usage: %s \n", argv[0]); return 1; } vector times; for (int i = 1; i < argc; ++i) { const char* filename = argv[i]; for (int limit = 1 << 10; limit < (1<<20); limit *= 2) { int64_t start = GetTimeMillis(); for (int rep = 0; rep < limit; ++rep) { string buf; string err; if (ReadFile(filename, &buf, &err) < 0) { printf("%s: %s\n", filename, err.c_str()); return 1; } DepfileParser parser; if (!parser.Parse(&buf, &err)) { printf("%s: %s\n", filename, err.c_str()); return 1; } } int64_t end = GetTimeMillis(); if (end - start > 100) { int delta = (int)(end - start); float time = delta*1000 / (float)limit; printf("%s: %.1fus\n", filename, time); times.push_back(time); break; } } } if (!times.empty()) { float min = times[0]; float max = times[0]; float total = 0; for (size_t i = 0; i < times.size(); ++i) { total += times[i]; if (times[i] < min) min = times[i]; else if (times[i] > max) max = times[i]; } printf("min %.1fus max %.1fus avg %.1fus\n", min, max, total / times.size()); } return 0; } ninja-1.13.2/src/depfile_parser_test.cc000066400000000000000000000321031510764045400200230ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "depfile_parser.h" #include "test.h" using namespace std; struct DepfileParserTest : public testing::Test { bool Parse(const char* input, string* err); DepfileParser parser_; string input_; }; bool DepfileParserTest::Parse(const char* input, string* err) { input_ = input; return parser_.Parse(&input_, err); } TEST_F(DepfileParserTest, Basic) { string err; EXPECT_TRUE(Parse( "build/ninja.o: ninja.cc ninja.h eval_env.h manifest_parser.h\n", &err)); ASSERT_EQ("", err); ASSERT_EQ(1u, parser_.outs_.size()); EXPECT_EQ("build/ninja.o", parser_.outs_[0].AsString()); EXPECT_EQ(4u, parser_.ins_.size()); } TEST_F(DepfileParserTest, EarlyNewlineAndWhitespace) { string err; EXPECT_TRUE(Parse( " \\\n" " out: in\n", &err)); ASSERT_EQ("", err); } TEST_F(DepfileParserTest, Continuation) { string err; EXPECT_TRUE(Parse( "foo.o: \\\n" " bar.h baz.h\n", &err)); ASSERT_EQ("", err); ASSERT_EQ(1u, parser_.outs_.size()); EXPECT_EQ("foo.o", parser_.outs_[0].AsString()); EXPECT_EQ(2u, parser_.ins_.size()); } TEST_F(DepfileParserTest, WindowsDrivePaths) { string err; EXPECT_TRUE(Parse("foo.o: //?/c:/bar.h\n", &err)); ASSERT_EQ("", err); ASSERT_EQ(1u, parser_.outs_.size()); EXPECT_EQ("foo.o", parser_.outs_[0].AsString()); EXPECT_EQ(1u, parser_.ins_.size()); EXPECT_EQ("//?/c:/bar.h", parser_.ins_[0].AsString()); } TEST_F(DepfileParserTest, AmpersandsAndQuotes) { string err; EXPECT_TRUE(Parse("foo&bar.o foo'bar.o foo\"bar.o: foo&bar.h foo'bar.h foo\"bar.h\n", &err)); ASSERT_EQ("", err); ASSERT_EQ(3u, parser_.outs_.size()); EXPECT_EQ("foo&bar.o", parser_.outs_[0].AsString()); EXPECT_EQ("foo'bar.o", parser_.outs_[1].AsString()); EXPECT_EQ("foo\"bar.o", parser_.outs_[2].AsString()); EXPECT_EQ(3u, parser_.ins_.size()); EXPECT_EQ("foo&bar.h", parser_.ins_[0].AsString()); EXPECT_EQ("foo'bar.h", parser_.ins_[1].AsString()); EXPECT_EQ("foo\"bar.h", parser_.ins_[2].AsString()); } TEST_F(DepfileParserTest, CarriageReturnContinuation) { string err; EXPECT_TRUE(Parse( "foo.o: \\\r\n" " bar.h baz.h\r\n", &err)); ASSERT_EQ("", err); ASSERT_EQ(1u, parser_.outs_.size()); EXPECT_EQ("foo.o", parser_.outs_[0].AsString()); EXPECT_EQ(2u, parser_.ins_.size()); } TEST_F(DepfileParserTest, BackSlashes) { string err; EXPECT_TRUE(Parse( "Project\\Dir\\Build\\Release8\\Foo\\Foo.res : \\\n" " Dir\\Library\\Foo.rc \\\n" " Dir\\Library\\Version\\Bar.h \\\n" " Dir\\Library\\Foo.ico \\\n" " Project\\Thing\\Bar.tlb \\\n", &err)); ASSERT_EQ("", err); ASSERT_EQ(1u, parser_.outs_.size()); EXPECT_EQ("Project\\Dir\\Build\\Release8\\Foo\\Foo.res", parser_.outs_[0].AsString()); EXPECT_EQ(4u, parser_.ins_.size()); } TEST_F(DepfileParserTest, Spaces) { string err; EXPECT_TRUE(Parse( "a\\ bc\\ def: a\\ b c d", &err)); ASSERT_EQ("", err); ASSERT_EQ(1u, parser_.outs_.size()); EXPECT_EQ("a bc def", parser_.outs_[0].AsString()); ASSERT_EQ(3u, parser_.ins_.size()); EXPECT_EQ("a b", parser_.ins_[0].AsString()); EXPECT_EQ("c", parser_.ins_[1].AsString()); EXPECT_EQ("d", parser_.ins_[2].AsString()); } TEST_F(DepfileParserTest, MultipleBackslashes) { // Successive 2N+1 backslashes followed by space (' ') are replaced by N >= 0 // backslashes and the space. A single backslash before hash sign is removed. // Other backslashes remain untouched (including 2N backslashes followed by // space). string err; EXPECT_TRUE(Parse( "a\\ b\\#c.h: \\\\\\\\\\ \\\\\\\\ \\\\share\\info\\\\#1", &err)); ASSERT_EQ("", err); ASSERT_EQ(1u, parser_.outs_.size()); EXPECT_EQ("a b#c.h", parser_.outs_[0].AsString()); ASSERT_EQ(3u, parser_.ins_.size()); EXPECT_EQ("\\\\ ", parser_.ins_[0].AsString()); EXPECT_EQ("\\\\\\\\", parser_.ins_[1].AsString()); EXPECT_EQ("\\\\share\\info\\#1", parser_.ins_[2].AsString()); } TEST_F(DepfileParserTest, Escapes) { // Put backslashes before a variety of characters, see which ones make // it through. string err; EXPECT_TRUE(Parse( "\\!\\@\\#$$\\%\\^\\&\\[\\]\\\\:", &err)); ASSERT_EQ("", err); ASSERT_EQ(1u, parser_.outs_.size()); EXPECT_EQ("\\!\\@#$\\%\\^\\&\\[\\]\\\\", parser_.outs_[0].AsString()); ASSERT_EQ(0u, parser_.ins_.size()); } TEST_F(DepfileParserTest, EscapedColons) { std::string err; // Tests for correct parsing of depfiles produced on Windows // by both Clang, GCC pre 10 and GCC 10 EXPECT_TRUE(Parse( "c\\:\\gcc\\x86_64-w64-mingw32\\include\\stddef.o: \\\n" " c:\\gcc\\x86_64-w64-mingw32\\include\\stddef.h \n", &err)); ASSERT_EQ("", err); ASSERT_EQ(1u, parser_.outs_.size()); EXPECT_EQ("c:\\gcc\\x86_64-w64-mingw32\\include\\stddef.o", parser_.outs_[0].AsString()); ASSERT_EQ(1u, parser_.ins_.size()); EXPECT_EQ("c:\\gcc\\x86_64-w64-mingw32\\include\\stddef.h", parser_.ins_[0].AsString()); } TEST_F(DepfileParserTest, EscapedTargetColon) { std::string err; EXPECT_TRUE(Parse( "foo1\\: x\n" "foo1\\:\n" "foo1\\:\r\n" "foo1\\:\t\n" "foo1\\:", &err)); ASSERT_EQ("", err); ASSERT_EQ(1u, parser_.outs_.size()); EXPECT_EQ("foo1\\", parser_.outs_[0].AsString()); ASSERT_EQ(1u, parser_.ins_.size()); EXPECT_EQ("x", parser_.ins_[0].AsString()); } TEST_F(DepfileParserTest, SpecialChars) { // See filenames like istreambuf.iterator_op!= in // https://github.com/google/libcxx/tree/master/test/iterators/stream.iterators/istreambuf.iterator/ string err; EXPECT_TRUE(Parse( "C:/Program\\ Files\\ (x86)/Microsoft\\ crtdefs.h: \\\n" " en@quot.header~ t+t-x!=1 \\\n" " openldap/slapd.d/cn=config/cn=schema/cn={0}core.ldif\\\n" " Fu\303\244ball\\\n" " a[1]b@2%c", &err)); ASSERT_EQ("", err); ASSERT_EQ(1u, parser_.outs_.size()); EXPECT_EQ("C:/Program Files (x86)/Microsoft crtdefs.h", parser_.outs_[0].AsString()); ASSERT_EQ(5u, parser_.ins_.size()); EXPECT_EQ("en@quot.header~", parser_.ins_[0].AsString()); EXPECT_EQ("t+t-x!=1", parser_.ins_[1].AsString()); EXPECT_EQ("openldap/slapd.d/cn=config/cn=schema/cn={0}core.ldif", parser_.ins_[2].AsString()); EXPECT_EQ("Fu\303\244ball", parser_.ins_[3].AsString()); EXPECT_EQ("a[1]b@2%c", parser_.ins_[4].AsString()); } TEST_F(DepfileParserTest, UnifyMultipleOutputs) { // check that multiple duplicate targets are properly unified string err; EXPECT_TRUE(Parse("foo foo: x y z", &err)); ASSERT_EQ(1u, parser_.outs_.size()); ASSERT_EQ("foo", parser_.outs_[0].AsString()); ASSERT_EQ(3u, parser_.ins_.size()); EXPECT_EQ("x", parser_.ins_[0].AsString()); EXPECT_EQ("y", parser_.ins_[1].AsString()); EXPECT_EQ("z", parser_.ins_[2].AsString()); } TEST_F(DepfileParserTest, MultipleDifferentOutputs) { // check that multiple different outputs are accepted by the parser string err; EXPECT_TRUE(Parse("foo bar: x y z", &err)); ASSERT_EQ(2u, parser_.outs_.size()); ASSERT_EQ("foo", parser_.outs_[0].AsString()); ASSERT_EQ("bar", parser_.outs_[1].AsString()); ASSERT_EQ(3u, parser_.ins_.size()); EXPECT_EQ("x", parser_.ins_[0].AsString()); EXPECT_EQ("y", parser_.ins_[1].AsString()); EXPECT_EQ("z", parser_.ins_[2].AsString()); } TEST_F(DepfileParserTest, MultipleEmptyRules) { string err; EXPECT_TRUE(Parse("foo: x\n" "foo: \n" "foo:\n", &err)); ASSERT_EQ(1u, parser_.outs_.size()); ASSERT_EQ("foo", parser_.outs_[0].AsString()); ASSERT_EQ(1u, parser_.ins_.size()); EXPECT_EQ("x", parser_.ins_[0].AsString()); } TEST_F(DepfileParserTest, UnifyMultipleRulesLF) { string err; EXPECT_TRUE(Parse("foo: x\n" "foo: y\n" "foo \\\n" "foo: z\n", &err)); ASSERT_EQ(1u, parser_.outs_.size()); ASSERT_EQ("foo", parser_.outs_[0].AsString()); ASSERT_EQ(3u, parser_.ins_.size()); EXPECT_EQ("x", parser_.ins_[0].AsString()); EXPECT_EQ("y", parser_.ins_[1].AsString()); EXPECT_EQ("z", parser_.ins_[2].AsString()); } TEST_F(DepfileParserTest, UnifyMultipleRulesCRLF) { string err; EXPECT_TRUE(Parse("foo: x\r\n" "foo: y\r\n" "foo \\\r\n" "foo: z\r\n", &err)); ASSERT_EQ(1u, parser_.outs_.size()); ASSERT_EQ("foo", parser_.outs_[0].AsString()); ASSERT_EQ(3u, parser_.ins_.size()); EXPECT_EQ("x", parser_.ins_[0].AsString()); EXPECT_EQ("y", parser_.ins_[1].AsString()); EXPECT_EQ("z", parser_.ins_[2].AsString()); } TEST_F(DepfileParserTest, UnifyMixedRulesLF) { string err; EXPECT_TRUE(Parse("foo: x\\\n" " y\n" "foo \\\n" "foo: z\n", &err)); ASSERT_EQ(1u, parser_.outs_.size()); ASSERT_EQ("foo", parser_.outs_[0].AsString()); ASSERT_EQ(3u, parser_.ins_.size()); EXPECT_EQ("x", parser_.ins_[0].AsString()); EXPECT_EQ("y", parser_.ins_[1].AsString()); EXPECT_EQ("z", parser_.ins_[2].AsString()); } TEST_F(DepfileParserTest, UnifyMixedRulesCRLF) { string err; EXPECT_TRUE(Parse("foo: x\\\r\n" " y\r\n" "foo \\\r\n" "foo: z\r\n", &err)); ASSERT_EQ(1u, parser_.outs_.size()); ASSERT_EQ("foo", parser_.outs_[0].AsString()); ASSERT_EQ(3u, parser_.ins_.size()); EXPECT_EQ("x", parser_.ins_[0].AsString()); EXPECT_EQ("y", parser_.ins_[1].AsString()); EXPECT_EQ("z", parser_.ins_[2].AsString()); } TEST_F(DepfileParserTest, IndentedRulesLF) { string err; EXPECT_TRUE(Parse(" foo: x\n" " foo: y\n" " foo: z\n", &err)); ASSERT_EQ(1u, parser_.outs_.size()); ASSERT_EQ("foo", parser_.outs_[0].AsString()); ASSERT_EQ(3u, parser_.ins_.size()); EXPECT_EQ("x", parser_.ins_[0].AsString()); EXPECT_EQ("y", parser_.ins_[1].AsString()); EXPECT_EQ("z", parser_.ins_[2].AsString()); } TEST_F(DepfileParserTest, IndentedRulesCRLF) { string err; EXPECT_TRUE(Parse(" foo: x\r\n" " foo: y\r\n" " foo: z\r\n", &err)); ASSERT_EQ(1u, parser_.outs_.size()); ASSERT_EQ("foo", parser_.outs_[0].AsString()); ASSERT_EQ(3u, parser_.ins_.size()); EXPECT_EQ("x", parser_.ins_[0].AsString()); EXPECT_EQ("y", parser_.ins_[1].AsString()); EXPECT_EQ("z", parser_.ins_[2].AsString()); } TEST_F(DepfileParserTest, TolerateMP) { string err; EXPECT_TRUE(Parse("foo: x y z\n" "x:\n" "y:\n" "z:\n", &err)); ASSERT_EQ(1u, parser_.outs_.size()); ASSERT_EQ("foo", parser_.outs_[0].AsString()); ASSERT_EQ(3u, parser_.ins_.size()); EXPECT_EQ("x", parser_.ins_[0].AsString()); EXPECT_EQ("y", parser_.ins_[1].AsString()); EXPECT_EQ("z", parser_.ins_[2].AsString()); } TEST_F(DepfileParserTest, MultipleRulesTolerateMP) { string err; EXPECT_TRUE(Parse("foo: x\n" "x:\n" "foo: y\n" "y:\n" "foo: z\n" "z:\n", &err)); ASSERT_EQ(1u, parser_.outs_.size()); ASSERT_EQ("foo", parser_.outs_[0].AsString()); ASSERT_EQ(3u, parser_.ins_.size()); EXPECT_EQ("x", parser_.ins_[0].AsString()); EXPECT_EQ("y", parser_.ins_[1].AsString()); EXPECT_EQ("z", parser_.ins_[2].AsString()); } TEST_F(DepfileParserTest, MultipleRulesDifferentOutputs) { // check that multiple different outputs are accepted by the parser // when spread across multiple rules string err; EXPECT_TRUE(Parse("foo: x y\n" "bar: y z\n", &err)); ASSERT_EQ(2u, parser_.outs_.size()); ASSERT_EQ("foo", parser_.outs_[0].AsString()); ASSERT_EQ("bar", parser_.outs_[1].AsString()); ASSERT_EQ(3u, parser_.ins_.size()); EXPECT_EQ("x", parser_.ins_[0].AsString()); EXPECT_EQ("y", parser_.ins_[1].AsString()); EXPECT_EQ("z", parser_.ins_[2].AsString()); } TEST_F(DepfileParserTest, BuggyMP) { std::string err; EXPECT_FALSE(Parse("foo: x y z\n" "x: alsoin\n" "y:\n" "z:\n", &err)); ASSERT_EQ("inputs may not also have inputs", err); } TEST_F(DepfileParserTest, EmptyFile) { std::string err; EXPECT_TRUE(Parse("", &err)); ASSERT_EQ(0u, parser_.outs_.size()); ASSERT_EQ(0u, parser_.ins_.size()); } TEST_F(DepfileParserTest, EmptyLines) { std::string err; EXPECT_TRUE(Parse("\n\n", &err)); ASSERT_EQ(0u, parser_.outs_.size()); ASSERT_EQ(0u, parser_.ins_.size()); } TEST_F(DepfileParserTest, MissingColon) { // The file is not empty but is missing a colon separator. std::string err; EXPECT_FALSE(Parse("foo.o foo.c\n", &err)); EXPECT_EQ("expected ':' in depfile", err); } ninja-1.13.2/src/deps_log.cc000066400000000000000000000324771510764045400156120ustar00rootroot00000000000000// Copyright 2012 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "deps_log.h" #include #include #include #include #include #ifndef _WIN32 #include #elif defined(_MSC_VER) && (_MSC_VER < 1900) typedef __int32 int32_t; typedef unsigned __int32 uint32_t; #endif #include "graph.h" #include "metrics.h" #include "state.h" #include "util.h" using namespace std; // The version is stored as 4 bytes after the signature and also serves as a // byte order mark. Signature and version combined are 16 bytes long. static const char kFileSignature[] = "# ninjadeps\n"; static const size_t kFileSignatureSize = sizeof(kFileSignature) - 1u; static const int32_t kCurrentVersion = 4; // Record size is currently limited to less than the full 32 bit, due to // internal buffers having to have this size. static constexpr size_t kMaxRecordSize = (1 << 19) - 1; DepsLog::~DepsLog() { Close(); } bool DepsLog::OpenForWrite(const string& path, string* err) { if (needs_recompaction_) { if (!Recompact(path, err)) return false; } assert(!file_); file_path_ = path; // we don't actually open the file right now, but will do // so on the first write attempt return true; } bool DepsLog::RecordDeps(Node* node, TimeStamp mtime, const vector& nodes) { return RecordDeps(node, mtime, static_cast(nodes.size()), nodes.data()); } bool DepsLog::RecordDeps(Node* node, TimeStamp mtime, int node_count, Node* const* nodes) { // Track whether there's any new data to be recorded. bool made_change = false; // Assign ids to all nodes that are missing one. if (node->id() < 0) { if (!RecordId(node)) return false; made_change = true; } for (int i = 0; i < node_count; ++i) { if (nodes[i]->id() < 0) { if (!RecordId(nodes[i])) return false; made_change = true; } } // See if the new data is different than the existing data, if any. if (!made_change) { Deps* deps = GetDeps(node); if (!deps || deps->mtime != mtime || deps->node_count != node_count) { made_change = true; } else { for (int i = 0; i < node_count; ++i) { if (deps->nodes[i] != nodes[i]) { made_change = true; break; } } } } // Don't write anything if there's no new info. if (!made_change) return true; // Update on-disk representation. unsigned size = 4 * (1 + 2 + node_count); if (size > kMaxRecordSize) { errno = ERANGE; return false; } if (!OpenForWriteIfNeeded()) { return false; } size |= 0x80000000; // Deps record: set high bit. if (fwrite(&size, 4, 1, file_) < 1) return false; int id = node->id(); if (fwrite(&id, 4, 1, file_) < 1) return false; uint32_t mtime_part = static_cast(mtime & 0xffffffff); if (fwrite(&mtime_part, 4, 1, file_) < 1) return false; mtime_part = static_cast((mtime >> 32) & 0xffffffff); if (fwrite(&mtime_part, 4, 1, file_) < 1) return false; for (int i = 0; i < node_count; ++i) { id = nodes[i]->id(); if (fwrite(&id, 4, 1, file_) < 1) return false; } if (fflush(file_) != 0) return false; // Update in-memory representation. Deps* deps = new Deps(mtime, node_count); for (int i = 0; i < node_count; ++i) deps->nodes[i] = nodes[i]; UpdateDeps(node->id(), deps); return true; } void DepsLog::Close() { OpenForWriteIfNeeded(); // create the file even if nothing has been recorded if (file_) fclose(file_); file_ = NULL; } LoadStatus DepsLog::Load(const string& path, State* state, string* err) { METRIC_RECORD(".ninja_deps load"); char buf[kMaxRecordSize + 1]; FILE* f = fopen(path.c_str(), "rb"); if (!f) { if (errno == ENOENT) return LOAD_NOT_FOUND; *err = strerror(errno); return LOAD_ERROR; } bool valid_header = fread(buf, kFileSignatureSize, 1, f) == 1 && !memcmp(buf, kFileSignature, kFileSignatureSize); int32_t version = 0; bool valid_version = fread(&version, 4, 1, f) == 1 && version == kCurrentVersion; // Note: For version differences, this should migrate to the new format. // But the v1 format could sometimes (rarely) end up with invalid data, so // don't migrate v1 to v3 to force a rebuild. (v2 only existed for a few days, // and there was no release with it, so pretend that it never happened.) if (!valid_header || !valid_version) { if (version == 1) *err = "deps log version change; rebuilding"; else *err = "bad deps log signature or version; starting over"; fclose(f); platformAwareUnlink(path.c_str()); // Don't report this as a failure. An empty deps log will cause // us to rebuild the outputs anyway. return LOAD_SUCCESS; } long offset = ftell(f); bool read_failed = false; int unique_dep_record_count = 0; int total_dep_record_count = 0; for (;;) { unsigned size; if (fread(&size, sizeof(size), 1, f) < 1) { if (!feof(f)) read_failed = true; break; } bool is_deps = (size >> 31) != 0; size = size & 0x7FFFFFFF; if (size > kMaxRecordSize || fread(buf, size, 1, f) < 1) { read_failed = true; break; } offset += size + sizeof(size); if (is_deps) { if ((size % 4) != 0) { read_failed = true; break; } int* deps_data = reinterpret_cast(buf); int out_id = deps_data[0]; TimeStamp mtime; mtime = (TimeStamp)(((uint64_t)(unsigned int)deps_data[2] << 32) | (uint64_t)(unsigned int)deps_data[1]); deps_data += 3; int deps_count = (size / 4) - 3; for (int i = 0; i < deps_count; ++i) { int node_id = deps_data[i]; if (node_id >= (int)nodes_.size() || !nodes_[node_id]) { read_failed = true; break; } } if (read_failed) break; Deps* deps = new Deps(mtime, deps_count); for (int i = 0; i < deps_count; ++i) { deps->nodes[i] = nodes_[deps_data[i]]; } total_dep_record_count++; if (!UpdateDeps(out_id, deps)) ++unique_dep_record_count; } else { int path_size = size - 4; if (path_size <= 0) { read_failed = true; break; } // There can be up to 3 bytes of padding. if (buf[path_size - 1] == '\0') --path_size; if (buf[path_size - 1] == '\0') --path_size; if (buf[path_size - 1] == '\0') --path_size; StringPiece subpath(buf, path_size); // It is not necessary to pass in a correct slash_bits here. It will // either be a Node that's in the manifest (in which case it will already // have a correct slash_bits that GetNode will look up), or it is an // implicit dependency from a .d which does not affect the build command // (and so need not have its slashes maintained). Node* node = state->GetNode(subpath, 0); // Check that the expected index matches the actual index. This can only // happen if two ninja processes write to the same deps log concurrently. // (This uses unary complement to make the checksum look less like a // dependency record entry.) unsigned checksum = *reinterpret_cast(buf + size - 4); int expected_id = ~checksum; int id = static_cast(nodes_.size()); if (id != expected_id || node->id() >= 0) { read_failed = true; break; } node->set_id(id); nodes_.push_back(node); } } if (read_failed) { // An error occurred while loading; try to recover by truncating the // file to the last fully-read record. if (ferror(f)) { *err = strerror(ferror(f)); } else { *err = "premature end of file"; } fclose(f); if (!Truncate(path, offset, err)) return LOAD_ERROR; // The truncate succeeded; we'll just report the load error as a // warning because the build can proceed. *err += "; recovering"; return LOAD_SUCCESS; } fclose(f); // Rebuild the log if there are too many dead records. int kMinCompactionEntryCount = 1000; int kCompactionRatio = 3; if (total_dep_record_count > kMinCompactionEntryCount && total_dep_record_count > unique_dep_record_count * kCompactionRatio) { needs_recompaction_ = true; } return LOAD_SUCCESS; } DepsLog::Deps* DepsLog::GetDeps(Node* node) { // Abort if the node has no id (never referenced in the deps) or if // there's no deps recorded for the node. if (node->id() < 0 || node->id() >= (int)deps_.size()) return NULL; return deps_[node->id()]; } Node* DepsLog::GetFirstReverseDepsNode(Node* node) { for (size_t id = 0; id < deps_.size(); ++id) { Deps* deps = deps_[id]; if (!deps) continue; for (int i = 0; i < deps->node_count; ++i) { if (deps->nodes[i] == node) return nodes_[id]; } } return NULL; } bool DepsLog::Recompact(const string& path, string* err) { METRIC_RECORD(".ninja_deps recompact"); Close(); string temp_path = path + ".recompact"; // OpenForWrite() opens for append. Make sure it's not appending to a // left-over file from a previous recompaction attempt that crashed somehow. platformAwareUnlink(temp_path.c_str()); DepsLog new_log; if (!new_log.OpenForWrite(temp_path, err)) return false; // Clear all known ids so that new ones can be reassigned. The new indices // will refer to the ordering in new_log, not in the current log. for (vector::iterator i = nodes_.begin(); i != nodes_.end(); ++i) (*i)->set_id(-1); // Write out all deps again. for (int old_id = 0; old_id < (int)deps_.size(); ++old_id) { Deps* deps = deps_[old_id]; if (!deps) continue; // If nodes_[old_id] is a leaf, it has no deps. if (!IsDepsEntryLiveFor(nodes_[old_id])) continue; if (!new_log.RecordDeps(nodes_[old_id], deps->mtime, deps->node_count, deps->nodes)) { new_log.Close(); return false; } } new_log.Close(); // All nodes now have ids that refer to new_log, so steal its data. deps_.swap(new_log.deps_); nodes_.swap(new_log.nodes_); if (platformAwareUnlink(path.c_str()) < 0) { *err = strerror(errno); return false; } if (rename(temp_path.c_str(), path.c_str()) < 0) { *err = strerror(errno); return false; } return true; } bool DepsLog::IsDepsEntryLiveFor(const Node* node) { // Skip entries that don't have in-edges or whose edges don't have a // "deps" attribute. They were in the deps log from previous builds, but // the the files they were for were removed from the build and their deps // entries are no longer needed. // (Without the check for "deps", a chain of two or more nodes that each // had deps wouldn't be collected in a single recompaction.) return node->in_edge() && !node->in_edge()->GetBinding("deps").empty(); } bool DepsLog::UpdateDeps(int out_id, Deps* deps) { if (out_id >= (int)deps_.size()) deps_.resize(out_id + 1); bool delete_old = deps_[out_id] != NULL; if (delete_old) delete deps_[out_id]; deps_[out_id] = deps; return delete_old; } bool DepsLog::RecordId(Node* node) { int path_size = static_cast(node->path().size()); assert(path_size > 0 && "Trying to record empty path Node!"); int padding = (4 - path_size % 4) % 4; // Pad path to 4 byte boundary. unsigned size = path_size + padding + 4; if (size > kMaxRecordSize) { errno = ERANGE; return false; } if (!OpenForWriteIfNeeded()) { return false; } if (fwrite(&size, 4, 1, file_) < 1) return false; if (fwrite(node->path().data(), path_size, 1, file_) < 1) { return false; } if (padding && fwrite("\0\0", padding, 1, file_) < 1) return false; int id = static_cast(nodes_.size()); unsigned checksum = ~(unsigned)id; if (fwrite(&checksum, 4, 1, file_) < 1) return false; if (fflush(file_) != 0) return false; node->set_id(id); nodes_.push_back(node); return true; } bool DepsLog::OpenForWriteIfNeeded() { if (file_path_.empty()) { return true; } file_ = fopen(file_path_.c_str(), "ab"); if (!file_) { return false; } // Set the buffer size to this and flush the file buffer after every record // to make sure records aren't written partially. if (setvbuf(file_, NULL, _IOFBF, kMaxRecordSize + 1) != 0) { return false; } SetCloseOnExec(fileno(file_)); // Opening a file in append mode doesn't set the file pointer to the file's // end on Windows. Do that explicitly. fseek(file_, 0, SEEK_END); if (ftell(file_) == 0) { if (fwrite(kFileSignature, sizeof(kFileSignature) - 1, 1, file_) < 1) { return false; } if (fwrite(&kCurrentVersion, 4, 1, file_) < 1) { return false; } } if (fflush(file_) != 0) { return false; } file_path_.clear(); return true; } ninja-1.13.2/src/deps_log.h000066400000000000000000000121171510764045400154410ustar00rootroot00000000000000// Copyright 2012 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef NINJA_DEPS_LOG_H_ #define NINJA_DEPS_LOG_H_ #include #include #include #include "load_status.h" #include "timestamp.h" struct Node; struct State; /// As build commands run they can output extra dependency information /// (e.g. header dependencies for C source) dynamically. DepsLog collects /// that information at build time and uses it for subsequent builds. /// /// The on-disk format is based on two primary design constraints: /// - it must be written to as a stream (during the build, which may be /// interrupted); /// - it can be read all at once on startup. (Alternative designs, where /// it contains indexing information, were considered and discarded as /// too complicated to implement; if the file is small than reading it /// fully on startup is acceptable.) /// Here are some stats from the Windows Chrome dependency files, to /// help guide the design space. The total text in the files sums to /// 90mb so some compression is warranted to keep load-time fast. /// There's about 10k files worth of dependencies that reference about /// 40k total paths totalling 2mb of unique strings. /// /// Based on these stats, here's the current design. /// The file is structured as version header followed by a sequence of records. /// Each record is either a path string or a dependency list. /// Numbering the path strings in file order gives them dense integer ids. /// A dependency list maps an output id to a list of input ids. /// /// Concretely, a record is: /// four bytes record length, high bit indicates record type /// (but max record sizes are capped at 512kB) /// path records contain the string name of the path, followed by up to 3 /// padding bytes to align on 4 byte boundaries, followed by the /// one's complement of the expected index of the record (to detect /// concurrent writes of multiple ninja processes to the log). /// dependency records are an array of 4-byte integers /// [output path id, /// output path mtime (lower 4 bytes), output path mtime (upper 4 bytes), /// input path id, input path id...] /// (The mtime is compared against the on-disk output path mtime /// to verify the stored data is up-to-date.) /// If two records reference the same output the latter one in the file /// wins, allowing updates to just be appended to the file. A separate /// repacking step can run occasionally to remove dead records. struct DepsLog { DepsLog() : needs_recompaction_(false), file_(NULL) {} ~DepsLog(); // Writing (build-time) interface. bool OpenForWrite(const std::string& path, std::string* err); bool RecordDeps(Node* node, TimeStamp mtime, const std::vector& nodes); bool RecordDeps(Node* node, TimeStamp mtime, int node_count, Node* const* nodes); void Close(); // Reading (startup-time) interface. struct Deps { Deps(int64_t mtime, int node_count) : mtime(mtime), node_count(node_count), nodes(new Node*[node_count]) {} ~Deps() { delete [] nodes; } TimeStamp mtime; int node_count; Node** nodes; }; LoadStatus Load(const std::string& path, State* state, std::string* err); Deps* GetDeps(Node* node); Node* GetFirstReverseDepsNode(Node* node); /// Rewrite the known log entries, throwing away old data. bool Recompact(const std::string& path, std::string* err); /// Returns if the deps entry for a node is still reachable from the manifest. /// /// The deps log can contain deps entries for files that were built in the /// past but are no longer part of the manifest. This function returns if /// this is the case for a given node. This function is slow, don't call /// it from code that runs on every build. static bool IsDepsEntryLiveFor(const Node* node); /// Used for tests. const std::vector& nodes() const { return nodes_; } const std::vector& deps() const { return deps_; } private: // Updates the in-memory representation. Takes ownership of |deps|. // Returns true if a prior deps record was deleted. bool UpdateDeps(int out_id, Deps* deps); // Write a node name record, assigning it an id. bool RecordId(Node* node); /// Should be called before using file_. When false is returned, errno will /// be set. bool OpenForWriteIfNeeded(); bool needs_recompaction_; FILE* file_; std::string file_path_; /// Maps id -> Node. std::vector nodes_; /// Maps id -> deps of that id. std::vector deps_; friend struct DepsLogTest; }; #endif // NINJA_DEPS_LOG_H_ ninja-1.13.2/src/deps_log_test.cc000066400000000000000000000466361510764045400166530ustar00rootroot00000000000000// Copyright 2012 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "deps_log.h" #include #ifndef _WIN32 #include #endif #include "disk_interface.h" #include "graph.h" #include "util.h" #include "test.h" using namespace std; namespace { const char kTestFilename[] = "DepsLogTest-tempfile"; struct DepsLogTest : public testing::Test { virtual void SetUp() { // In case a crashing test left a stale file behind. platformAwareUnlink(kTestFilename); } virtual void TearDown() { platformAwareUnlink(kTestFilename); } }; TEST_F(DepsLogTest, WriteRead) { State state1; DepsLog log1; string err; EXPECT_TRUE(log1.OpenForWrite(kTestFilename, &err)); ASSERT_EQ("", err); { vector deps; deps.push_back(state1.GetNode("foo.h", 0)); deps.push_back(state1.GetNode("bar.h", 0)); log1.RecordDeps(state1.GetNode("out.o", 0), 1, deps); deps.clear(); deps.push_back(state1.GetNode("foo.h", 0)); deps.push_back(state1.GetNode("bar2.h", 0)); log1.RecordDeps(state1.GetNode("out2.o", 0), 2, deps); DepsLog::Deps* log_deps = log1.GetDeps(state1.GetNode("out.o", 0)); ASSERT_TRUE(log_deps); ASSERT_EQ(1, log_deps->mtime); ASSERT_EQ(2, log_deps->node_count); ASSERT_EQ("foo.h", log_deps->nodes[0]->path()); ASSERT_EQ("bar.h", log_deps->nodes[1]->path()); } log1.Close(); State state2; DepsLog log2; EXPECT_TRUE(log2.Load(kTestFilename, &state2, &err)); ASSERT_EQ("", err); ASSERT_EQ(log1.nodes().size(), log2.nodes().size()); for (int i = 0; i < (int)log1.nodes().size(); ++i) { Node* node1 = log1.nodes()[i]; Node* node2 = log2.nodes()[i]; ASSERT_EQ(i, node1->id()); ASSERT_EQ(node1->id(), node2->id()); } // Spot-check the entries in log2. DepsLog::Deps* log_deps = log2.GetDeps(state2.GetNode("out2.o", 0)); ASSERT_TRUE(log_deps); ASSERT_EQ(2, log_deps->mtime); ASSERT_EQ(2, log_deps->node_count); ASSERT_EQ("foo.h", log_deps->nodes[0]->path()); ASSERT_EQ("bar2.h", log_deps->nodes[1]->path()); } TEST_F(DepsLogTest, LotsOfDeps) { const int kNumDeps = 100000; // More than 64k. State state1; DepsLog log1; string err; EXPECT_TRUE(log1.OpenForWrite(kTestFilename, &err)); ASSERT_EQ("", err); { vector deps; for (int i = 0; i < kNumDeps; ++i) { char buf[32]; sprintf(buf, "file%d.h", i); deps.push_back(state1.GetNode(buf, 0)); } log1.RecordDeps(state1.GetNode("out.o", 0), 1, deps); DepsLog::Deps* log_deps = log1.GetDeps(state1.GetNode("out.o", 0)); ASSERT_EQ(kNumDeps, log_deps->node_count); } log1.Close(); State state2; DepsLog log2; EXPECT_TRUE(log2.Load(kTestFilename, &state2, &err)); ASSERT_EQ("", err); DepsLog::Deps* log_deps = log2.GetDeps(state2.GetNode("out.o", 0)); ASSERT_EQ(kNumDeps, log_deps->node_count); } // Verify that adding the same deps twice doesn't grow the file. TEST_F(DepsLogTest, DoubleEntry) { // Write some deps to the file and grab its size. int file_size; { State state; DepsLog log; string err; EXPECT_TRUE(log.OpenForWrite(kTestFilename, &err)); ASSERT_EQ("", err); vector deps; deps.push_back(state.GetNode("foo.h", 0)); deps.push_back(state.GetNode("bar.h", 0)); log.RecordDeps(state.GetNode("out.o", 0), 1, deps); log.Close(); #ifdef __USE_LARGEFILE64 struct stat64 st; ASSERT_EQ(0, stat64(kTestFilename, &st)); #else struct stat st; ASSERT_EQ(0, stat(kTestFilename, &st)); #endif file_size = (int)st.st_size; ASSERT_GT(file_size, 0); } // Now reload the file, and read the same deps. { State state; DepsLog log; string err; EXPECT_TRUE(log.Load(kTestFilename, &state, &err)); EXPECT_TRUE(log.OpenForWrite(kTestFilename, &err)); ASSERT_EQ("", err); vector deps; deps.push_back(state.GetNode("foo.h", 0)); deps.push_back(state.GetNode("bar.h", 0)); log.RecordDeps(state.GetNode("out.o", 0), 1, deps); log.Close(); #ifdef __USE_LARGEFILE64 struct stat64 st; ASSERT_EQ(0, stat64(kTestFilename, &st)); #else struct stat st; ASSERT_EQ(0, stat(kTestFilename, &st)); #endif int file_size_2 = (int)st.st_size; ASSERT_EQ(file_size, file_size_2); } } // Verify that adding the new deps works and can be compacted away. TEST_F(DepsLogTest, Recompact) { const char kManifest[] = "rule cc\n" " command = cc\n" " deps = gcc\n" "build out.o: cc\n" "build other_out.o: cc\n"; // Write some deps to the file and grab its size. int file_size; { State state; ASSERT_NO_FATAL_FAILURE(AssertParse(&state, kManifest)); DepsLog log; string err; ASSERT_TRUE(log.OpenForWrite(kTestFilename, &err)); ASSERT_EQ("", err); vector deps; deps.push_back(state.GetNode("foo.h", 0)); deps.push_back(state.GetNode("bar.h", 0)); log.RecordDeps(state.GetNode("out.o", 0), 1, deps); deps.clear(); deps.push_back(state.GetNode("foo.h", 0)); deps.push_back(state.GetNode("baz.h", 0)); log.RecordDeps(state.GetNode("other_out.o", 0), 1, deps); log.Close(); #ifdef __USE_LARGEFILE64 struct stat64 st; ASSERT_EQ(0, stat64(kTestFilename, &st)); #else struct stat st; ASSERT_EQ(0, stat(kTestFilename, &st)); #endif file_size = (int)st.st_size; ASSERT_GT(file_size, 0); } // Now reload the file, and add slightly different deps. int file_size_2; { State state; ASSERT_NO_FATAL_FAILURE(AssertParse(&state, kManifest)); DepsLog log; string err; ASSERT_TRUE(log.Load(kTestFilename, &state, &err)); ASSERT_TRUE(log.OpenForWrite(kTestFilename, &err)); ASSERT_EQ("", err); vector deps; deps.push_back(state.GetNode("foo.h", 0)); log.RecordDeps(state.GetNode("out.o", 0), 1, deps); log.Close(); #ifdef __USE_LARGEFILE64 struct stat64 st; ASSERT_EQ(0, stat64(kTestFilename, &st)); #else struct stat st; ASSERT_EQ(0, stat(kTestFilename, &st)); #endif file_size_2 = (int)st.st_size; // The file should grow to record the new deps. ASSERT_GT(file_size_2, file_size); } // Now reload the file, verify the new deps have replaced the old, then // recompact. int file_size_3; { State state; ASSERT_NO_FATAL_FAILURE(AssertParse(&state, kManifest)); DepsLog log; string err; ASSERT_TRUE(log.Load(kTestFilename, &state, &err)); Node* out = state.GetNode("out.o", 0); DepsLog::Deps* deps = log.GetDeps(out); ASSERT_TRUE(deps); ASSERT_EQ(1, deps->mtime); ASSERT_EQ(1, deps->node_count); ASSERT_EQ("foo.h", deps->nodes[0]->path()); Node* other_out = state.GetNode("other_out.o", 0); deps = log.GetDeps(other_out); ASSERT_TRUE(deps); ASSERT_EQ(1, deps->mtime); ASSERT_EQ(2, deps->node_count); ASSERT_EQ("foo.h", deps->nodes[0]->path()); ASSERT_EQ("baz.h", deps->nodes[1]->path()); ASSERT_TRUE(log.Recompact(kTestFilename, &err)); // The in-memory deps graph should still be valid after recompaction. deps = log.GetDeps(out); ASSERT_TRUE(deps); ASSERT_EQ(1, deps->mtime); ASSERT_EQ(1, deps->node_count); ASSERT_EQ("foo.h", deps->nodes[0]->path()); ASSERT_EQ(out, log.nodes()[out->id()]); deps = log.GetDeps(other_out); ASSERT_TRUE(deps); ASSERT_EQ(1, deps->mtime); ASSERT_EQ(2, deps->node_count); ASSERT_EQ("foo.h", deps->nodes[0]->path()); ASSERT_EQ("baz.h", deps->nodes[1]->path()); ASSERT_EQ(other_out, log.nodes()[other_out->id()]); // The file should have shrunk a bit for the smaller deps. #ifdef __USE_LARGEFILE64 struct stat64 st; ASSERT_EQ(0, stat64(kTestFilename, &st)); #else struct stat st; ASSERT_EQ(0, stat(kTestFilename, &st)); #endif file_size_3 = (int)st.st_size; ASSERT_LT(file_size_3, file_size_2); } // Now reload the file and recompact with an empty manifest. The previous // entries should be removed. { State state; // Intentionally not parsing kManifest here. DepsLog log; string err; ASSERT_TRUE(log.Load(kTestFilename, &state, &err)); Node* out = state.GetNode("out.o", 0); DepsLog::Deps* deps = log.GetDeps(out); ASSERT_TRUE(deps); ASSERT_EQ(1, deps->mtime); ASSERT_EQ(1, deps->node_count); ASSERT_EQ("foo.h", deps->nodes[0]->path()); Node* other_out = state.GetNode("other_out.o", 0); deps = log.GetDeps(other_out); ASSERT_TRUE(deps); ASSERT_EQ(1, deps->mtime); ASSERT_EQ(2, deps->node_count); ASSERT_EQ("foo.h", deps->nodes[0]->path()); ASSERT_EQ("baz.h", deps->nodes[1]->path()); ASSERT_TRUE(log.Recompact(kTestFilename, &err)); // The previous entries should have been removed. deps = log.GetDeps(out); ASSERT_FALSE(deps); deps = log.GetDeps(other_out); ASSERT_FALSE(deps); // The .h files pulled in via deps should no longer have ids either. ASSERT_EQ(-1, state.LookupNode("foo.h")->id()); ASSERT_EQ(-1, state.LookupNode("baz.h")->id()); // The file should have shrunk more. #ifdef __USE_LARGEFILE64 struct stat64 st; ASSERT_EQ(0, stat64(kTestFilename, &st)); #else struct stat st; ASSERT_EQ(0, stat(kTestFilename, &st)); #endif int file_size_4 = (int)st.st_size; ASSERT_LT(file_size_4, file_size_3); } } // Verify that invalid file headers cause a new build. TEST_F(DepsLogTest, InvalidHeader) { const char *kInvalidHeaders[] = { "", // Empty file. "# ninjad", // Truncated first line. "# ninjadeps\n", // No version int. "# ninjadeps\n\001\002", // Truncated version int. "# ninjadeps\n\001\002\003\004" // Invalid version int. }; for (size_t i = 0; i < sizeof(kInvalidHeaders) / sizeof(kInvalidHeaders[0]); ++i) { FILE* deps_log = fopen(kTestFilename, "wb"); ASSERT_TRUE(deps_log != NULL); ASSERT_EQ( strlen(kInvalidHeaders[i]), fwrite(kInvalidHeaders[i], 1, strlen(kInvalidHeaders[i]), deps_log)); ASSERT_EQ(0 ,fclose(deps_log)); string err; DepsLog log; State state; ASSERT_TRUE(log.Load(kTestFilename, &state, &err)); EXPECT_EQ("bad deps log signature or version; starting over", err); } } // Simulate what happens when loading a truncated log file. TEST_F(DepsLogTest, Truncated) { // Create a file with some entries. { State state; DepsLog log; string err; EXPECT_TRUE(log.OpenForWrite(kTestFilename, &err)); ASSERT_EQ("", err); vector deps; deps.push_back(state.GetNode("foo.h", 0)); deps.push_back(state.GetNode("bar.h", 0)); log.RecordDeps(state.GetNode("out.o", 0), 1, deps); deps.clear(); deps.push_back(state.GetNode("foo.h", 0)); deps.push_back(state.GetNode("bar2.h", 0)); log.RecordDeps(state.GetNode("out2.o", 0), 2, deps); log.Close(); } // Get the file size. #ifdef __USE_LARGEFILE64 struct stat64 st; ASSERT_EQ(0, stat64(kTestFilename, &st)); #else struct stat st; ASSERT_EQ(0, stat(kTestFilename, &st)); #endif // Try reloading at truncated sizes. // Track how many nodes/deps were found; they should decrease with // smaller sizes. int node_count = 5; int deps_count = 2; for (int size = (int)st.st_size; size > 0; --size) { string err; ASSERT_TRUE(Truncate(kTestFilename, size, &err)); State state; DepsLog log; EXPECT_TRUE(log.Load(kTestFilename, &state, &err)); if (!err.empty()) { // At some point the log will be so short as to be unparsable. break; } ASSERT_GE(node_count, (int)log.nodes().size()); node_count = static_cast(log.nodes().size()); // Count how many non-NULL deps entries there are. int new_deps_count = 0; for (vector::const_iterator i = log.deps().begin(); i != log.deps().end(); ++i) { if (*i) ++new_deps_count; } ASSERT_GE(deps_count, new_deps_count); deps_count = new_deps_count; } } // Run the truncation-recovery logic. TEST_F(DepsLogTest, TruncatedRecovery) { // Create a file with some entries. { State state; DepsLog log; string err; EXPECT_TRUE(log.OpenForWrite(kTestFilename, &err)); ASSERT_EQ("", err); vector deps; deps.push_back(state.GetNode("foo.h", 0)); deps.push_back(state.GetNode("bar.h", 0)); log.RecordDeps(state.GetNode("out.o", 0), 1, deps); deps.clear(); deps.push_back(state.GetNode("foo.h", 0)); deps.push_back(state.GetNode("bar2.h", 0)); log.RecordDeps(state.GetNode("out2.o", 0), 2, deps); log.Close(); } // Shorten the file, corrupting the last record. { #ifdef __USE_LARGEFILE64 struct stat64 st; ASSERT_EQ(0, stat64(kTestFilename, &st)); #else struct stat st; ASSERT_EQ(0, stat(kTestFilename, &st)); #endif string err; ASSERT_TRUE(Truncate(kTestFilename, st.st_size - 2, &err)); } // Load the file again, add an entry. { State state; DepsLog log; string err; EXPECT_TRUE(log.Load(kTestFilename, &state, &err)); ASSERT_EQ("premature end of file; recovering", err); err.clear(); // The truncated entry should've been discarded. EXPECT_EQ(NULL, log.GetDeps(state.GetNode("out2.o", 0))); EXPECT_TRUE(log.OpenForWrite(kTestFilename, &err)); ASSERT_EQ("", err); // Add a new entry. vector deps; deps.push_back(state.GetNode("foo.h", 0)); deps.push_back(state.GetNode("bar2.h", 0)); log.RecordDeps(state.GetNode("out2.o", 0), 3, deps); log.Close(); } // Load the file a third time to verify appending after a mangled // entry doesn't break things. { State state; DepsLog log; string err; EXPECT_TRUE(log.Load(kTestFilename, &state, &err)); // The truncated entry should exist. DepsLog::Deps* deps = log.GetDeps(state.GetNode("out2.o", 0)); ASSERT_TRUE(deps); } } TEST_F(DepsLogTest, ReverseDepsNodes) { State state; DepsLog log; string err; EXPECT_TRUE(log.OpenForWrite(kTestFilename, &err)); ASSERT_EQ("", err); vector deps; deps.push_back(state.GetNode("foo.h", 0)); deps.push_back(state.GetNode("bar.h", 0)); log.RecordDeps(state.GetNode("out.o", 0), 1, deps); deps.clear(); deps.push_back(state.GetNode("foo.h", 0)); deps.push_back(state.GetNode("bar2.h", 0)); log.RecordDeps(state.GetNode("out2.o", 0), 2, deps); log.Close(); Node* rev_deps = log.GetFirstReverseDepsNode(state.GetNode("foo.h", 0)); EXPECT_TRUE(rev_deps == state.GetNode("out.o", 0) || rev_deps == state.GetNode("out2.o", 0)); rev_deps = log.GetFirstReverseDepsNode(state.GetNode("bar.h", 0)); EXPECT_TRUE(rev_deps == state.GetNode("out.o", 0)); } TEST_F(DepsLogTest, MalformedDepsLog) { std::string err; { State state; DepsLog log; EXPECT_TRUE(log.OpenForWrite(kTestFilename, &err)); ASSERT_EQ("", err); // First, create a valid log file. std::vector deps; deps.push_back(state.GetNode("foo.hh", 0)); deps.push_back(state.GetNode("bar.hpp", 0)); log.RecordDeps(state.GetNode("out.o", 0), 1, deps); log.Close(); } // Now read its value, validate it a little. RealDiskInterface disk; std::string original_contents; ASSERT_EQ(FileReader::Okay, disk.ReadFile(kTestFilename, &original_contents, &err)); const size_t version_offset = 12; ASSERT_EQ("# ninjadeps\n", original_contents.substr(0, version_offset)); ASSERT_EQ('\x04', original_contents[version_offset + 0]); ASSERT_EQ('\x00', original_contents[version_offset + 1]); ASSERT_EQ('\x00', original_contents[version_offset + 2]); ASSERT_EQ('\x00', original_contents[version_offset + 3]); // clang-format off static const uint8_t kFirstRecord[] = { // size field == 0x0000000c 0x0c, 0x00, 0x00, 0x00, // name field = 'out.o' + 3 bytes of padding. 'o', 'u', 't', '.', 'o', 0x00, 0x00, 0x00, // checksum = ~0 0xff, 0xff, 0xff, 0xff, }; // clang-format on const size_t kFirstRecordLen = sizeof(kFirstRecord); const size_t first_offset = version_offset + 4; #define COMPARE_RECORD(start_pos, reference, len) \ ASSERT_EQ(original_contents.substr(start_pos, len), std::string(reinterpret_cast(reference), len)) COMPARE_RECORD(first_offset, kFirstRecord, kFirstRecordLen); const size_t second_offset = first_offset + kFirstRecordLen; // clang-format off static const uint8_t kSecondRecord[] = { // size field == 0x0000000c 0x0c, 0x00, 0x00, 0x00, // name field = 'foo.hh' + 2 bytes of padding. 'f', 'o', 'o', '.', 'h', 'h', 0x00, 0x00, // checksum = ~1 0xfe, 0xff, 0xff, 0xff, }; // clang-format on const size_t kSecondRecordLen = sizeof(kSecondRecord); COMPARE_RECORD(second_offset, kSecondRecord, kSecondRecordLen); // Then start generating corrupted versions and trying to load them. const char kBadLogFile[] = "DepsLogTest-corrupted.tempfile"; // Helper lambda to rewrite the bad log file with new content. auto write_bad_log_file = [&disk, &kBadLogFile](const std::string& bad_contents) -> bool { (void)disk.RemoveFile(kBadLogFile); return disk.WriteFile(kBadLogFile, bad_contents, false); }; // First, corrupt the header. std::string bad_contents = original_contents; bad_contents[0] = '@'; ASSERT_TRUE(write_bad_log_file(bad_contents)) << strerror(errno); { State state; DepsLog log; err.clear(); ASSERT_EQ(LOAD_SUCCESS, log.Load(kBadLogFile, &state, &err)); ASSERT_EQ("bad deps log signature or version; starting over", err); } // Second, truncate the version. bad_contents = original_contents.substr(0, version_offset + 3); ASSERT_TRUE(write_bad_log_file(bad_contents)) << strerror(errno); { State state; DepsLog log; err.clear(); ASSERT_EQ(LOAD_SUCCESS, log.Load(kBadLogFile, &state, &err)); ASSERT_EQ("bad deps log signature or version; starting over", err); } // Truncate first record's |size| field. The loader should recover. bad_contents = original_contents.substr(0, version_offset + 4 + 3); ASSERT_TRUE(write_bad_log_file(bad_contents)) << strerror(errno); { State state; DepsLog log; err.clear(); ASSERT_EQ(LOAD_SUCCESS, log.Load(kBadLogFile, &state, &err)); ASSERT_EQ("", err); } // Corrupt first record |size| value. bad_contents = original_contents; bad_contents[first_offset + 0] = '\x55'; bad_contents[first_offset + 1] = '\xaa'; bad_contents[first_offset + 2] = '\xff'; bad_contents[first_offset + 3] = '\xff'; ASSERT_TRUE(write_bad_log_file(bad_contents)) << strerror(errno); { State state; DepsLog log; err.clear(); ASSERT_EQ(LOAD_SUCCESS, log.Load(kBadLogFile, &state, &err)); ASSERT_EQ("premature end of file; recovering", err); } // Make first record |size| less than 4. bad_contents = original_contents; bad_contents[first_offset] = '\x01'; ASSERT_TRUE(write_bad_log_file(bad_contents)) << strerror(errno); { State state; DepsLog log; err.clear(); ASSERT_EQ(LOAD_SUCCESS, log.Load(kBadLogFile, &state, &err)); ASSERT_EQ("premature end of file; recovering", err); } } } // anonymous namespace ninja-1.13.2/src/disk_interface.cc000066400000000000000000000264131510764045400167610ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "disk_interface.h" #include #include #include #include #include #include #ifdef _WIN32 #include // _mkdir #include #include #else #include #endif #include "metrics.h" #include "util.h" using namespace std; namespace { string DirName(const string& path) { #ifdef _WIN32 static const char kPathSeparators[] = "\\/"; #else static const char kPathSeparators[] = "/"; #endif static const char* const kEnd = kPathSeparators + sizeof(kPathSeparators) - 1; string::size_type slash_pos = path.find_last_of(kPathSeparators); if (slash_pos == string::npos) return string(); // Nothing to do. while (slash_pos > 0 && std::find(kPathSeparators, kEnd, path[slash_pos - 1]) != kEnd) --slash_pos; return path.substr(0, slash_pos); } int MakeDir(const string& path) { #ifdef _WIN32 return _mkdir(path.c_str()); #else return mkdir(path.c_str(), 0777); #endif } #ifdef _WIN32 TimeStamp TimeStampFromFileTime(const FILETIME& filetime) { // FILETIME is in 100-nanosecond increments since the Windows epoch. // We don't much care about epoch correctness but we do want the // resulting value to fit in a 64-bit integer. uint64_t mtime = ((uint64_t)filetime.dwHighDateTime << 32) | ((uint64_t)filetime.dwLowDateTime); // 1600 epoch -> 2000 epoch (subtract 400 years). return (TimeStamp)mtime - 12622770400LL * (1000000000LL / 100); } TimeStamp StatSingleFile(const string& path, string* err) { WIN32_FILE_ATTRIBUTE_DATA attrs; if (!GetFileAttributesExA(path.c_str(), GetFileExInfoStandard, &attrs)) { DWORD win_err = GetLastError(); if (win_err == ERROR_FILE_NOT_FOUND || win_err == ERROR_PATH_NOT_FOUND) return 0; *err = "GetFileAttributesEx(" + path + "): " + GetLastErrorString(); return -1; } return TimeStampFromFileTime(attrs.ftLastWriteTime); } bool IsWindows7OrLater() { OSVERSIONINFOEX version_info = { sizeof(OSVERSIONINFOEX), 6, 1, 0, 0, {0}, 0, 0, 0, 0, 0}; DWORDLONG comparison = 0; VER_SET_CONDITION(comparison, VER_MAJORVERSION, VER_GREATER_EQUAL); VER_SET_CONDITION(comparison, VER_MINORVERSION, VER_GREATER_EQUAL); return VerifyVersionInfo( &version_info, VER_MAJORVERSION | VER_MINORVERSION, comparison); } bool StatAllFilesInDir(const string& dir, map* stamps, string* err) { // FindExInfoBasic is 30% faster than FindExInfoStandard. static bool can_use_basic_info = IsWindows7OrLater(); // This is not in earlier SDKs. const FINDEX_INFO_LEVELS kFindExInfoBasic = static_cast(1); FINDEX_INFO_LEVELS level = can_use_basic_info ? kFindExInfoBasic : FindExInfoStandard; WIN32_FIND_DATAA ffd; HANDLE find_handle = FindFirstFileExA((dir + "\\*").c_str(), level, &ffd, FindExSearchNameMatch, NULL, 0); if (find_handle == INVALID_HANDLE_VALUE) { DWORD win_err = GetLastError(); if (win_err == ERROR_FILE_NOT_FOUND || win_err == ERROR_PATH_NOT_FOUND || win_err == ERROR_DIRECTORY) return true; *err = "FindFirstFileExA(" + dir + "): " + GetLastErrorString(); return false; } do { string lowername = ffd.cFileName; if (lowername == "..") { // Seems to just copy the timestamp for ".." from ".", which is wrong. // This is the case at least on NTFS under Windows 7. continue; } transform(lowername.begin(), lowername.end(), lowername.begin(), ::tolower); stamps->insert(make_pair(lowername, TimeStampFromFileTime(ffd.ftLastWriteTime))); } while (FindNextFileA(find_handle, &ffd)); FindClose(find_handle); return true; } #endif // _WIN32 } // namespace // DiskInterface --------------------------------------------------------------- bool DiskInterface::MakeDirs(const string& path) { string dir = DirName(path); if (dir.empty()) return true; // Reached root; assume it's there. string err; TimeStamp mtime = Stat(dir, &err); if (mtime < 0) { Error("%s", err.c_str()); return false; } if (mtime > 0) return true; // Exists already; we're done. // Directory doesn't exist. Try creating its parent first. bool success = MakeDirs(dir); if (!success) return false; return MakeDir(dir); } // RealDiskInterface ----------------------------------------------------------- RealDiskInterface::RealDiskInterface() #ifdef _WIN32 : use_cache_(false), long_paths_enabled_(false) { // Probe ntdll.dll for RtlAreLongPathsEnabled, and call it if it exists. HINSTANCE ntdll_lib = ::GetModuleHandleW(L"ntdll"); if (ntdll_lib) { typedef BOOLEAN(WINAPI FunctionType)(); auto* func_ptr = FunctionCast( ::GetProcAddress(ntdll_lib, "RtlAreLongPathsEnabled")); if (func_ptr) { long_paths_enabled_ = (*func_ptr)(); } } } #else {} #endif TimeStamp RealDiskInterface::Stat(const string& path, string* err) const { METRIC_RECORD("node stat"); #ifdef _WIN32 // MSDN: "Naming Files, Paths, and Namespaces" // http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx if (!path.empty() && !AreLongPathsEnabled() && path[0] != '\\' && path.size() > MAX_PATH) { ostringstream err_stream; err_stream << "Stat(" << path << "): Filename longer than " << MAX_PATH << " characters"; *err = err_stream.str(); return -1; } if (!use_cache_) return StatSingleFile(path, err); string dir = DirName(path); string base(path.substr(dir.size() ? dir.size() + 1 : 0)); if (base == "..") { // StatAllFilesInDir does not report any information for base = "..". base = "."; dir = path; } string dir_lowercase = dir; transform(dir.begin(), dir.end(), dir_lowercase.begin(), ::tolower); transform(base.begin(), base.end(), base.begin(), ::tolower); Cache::iterator ci = cache_.find(dir_lowercase); if (ci == cache_.end()) { ci = cache_.insert(make_pair(dir_lowercase, DirCache())).first; if (!StatAllFilesInDir(dir.empty() ? "." : dir, &ci->second, err)) { cache_.erase(ci); return -1; } } DirCache::iterator di = ci->second.find(base); return di != ci->second.end() ? di->second : 0; #else #ifdef __USE_LARGEFILE64 struct stat64 st; if (stat64(path.c_str(), &st) < 0) { #else struct stat st; if (stat(path.c_str(), &st) < 0) { #endif if (errno == ENOENT || errno == ENOTDIR) return 0; *err = "stat(" + path + "): " + strerror(errno); return -1; } // Some users (Flatpak) set mtime to 0, this should be harmless // and avoids conflicting with our return value of 0 meaning // that it doesn't exist. if (st.st_mtime == 0) return 1; #if defined(_AIX) return (int64_t)st.st_mtime * 1000000000LL + st.st_mtime_n; #elif defined(__APPLE__) return ((int64_t)st.st_mtimespec.tv_sec * 1000000000LL + st.st_mtimespec.tv_nsec); #elif defined(st_mtime) // A macro, so we're likely on modern POSIX. return (int64_t)st.st_mtim.tv_sec * 1000000000LL + st.st_mtim.tv_nsec; #else return (int64_t)st.st_mtime * 1000000000LL + st.st_mtimensec; #endif #endif } bool RealDiskInterface::WriteFile(const string& path, const string& contents, bool crlf_on_windows) { FILE* fp = fopen(path.c_str(), #ifdef _WIN32 crlf_on_windows ? "w" : "wb"); #else "wb"); (void)crlf_on_windows; #endif if (fp == NULL) { Error("WriteFile(%s): Unable to create file. %s", path.c_str(), strerror(errno)); return false; } if (fwrite(contents.data(), 1, contents.length(), fp) < contents.length()) { Error("WriteFile(%s): Unable to write to the file. %s", path.c_str(), strerror(errno)); fclose(fp); return false; } if (fclose(fp) == EOF) { Error("WriteFile(%s): Unable to close the file. %s", path.c_str(), strerror(errno)); return false; } return true; } bool RealDiskInterface::MakeDir(const string& path) { if (::MakeDir(path) < 0) { if (errno == EEXIST) { return true; } Error("mkdir(%s): %s", path.c_str(), strerror(errno)); return false; } return true; } FileReader::Status RealDiskInterface::ReadFile(const string& path, string* contents, string* err) { switch (::ReadFile(path, contents, err)) { case 0: return Okay; case -ENOENT: return NotFound; default: return OtherError; } } int RealDiskInterface::RemoveFile(const string& path) { #ifdef _WIN32 DWORD attributes = GetFileAttributesA(path.c_str()); if (attributes == INVALID_FILE_ATTRIBUTES) { DWORD win_err = GetLastError(); if (win_err == ERROR_FILE_NOT_FOUND || win_err == ERROR_PATH_NOT_FOUND) { return 1; } } else if (attributes & FILE_ATTRIBUTE_READONLY) { // On non-Windows systems, remove() will happily delete read-only files. // On Windows Ninja should behave the same: // https://github.com/ninja-build/ninja/issues/1886 // Skip error checking. If this fails, accept whatever happens below. SetFileAttributesA(path.c_str(), attributes & ~FILE_ATTRIBUTE_READONLY); } if (attributes & FILE_ATTRIBUTE_DIRECTORY) { // remove() deletes both files and directories. On Windows we have to // select the correct function (DeleteFile will yield Permission Denied when // used on a directory) // This fixes the behavior of ninja -t clean in some cases // https://github.com/ninja-build/ninja/issues/828 if (!RemoveDirectoryA(path.c_str())) { DWORD win_err = GetLastError(); if (win_err == ERROR_FILE_NOT_FOUND || win_err == ERROR_PATH_NOT_FOUND) { return 1; } // Report remove(), not RemoveDirectory(), for cross-platform consistency. Error("remove(%s): %s", path.c_str(), GetLastErrorString().c_str()); return -1; } } else { if (!DeleteFileA(path.c_str())) { DWORD win_err = GetLastError(); if (win_err == ERROR_FILE_NOT_FOUND || win_err == ERROR_PATH_NOT_FOUND) { return 1; } // Report as remove(), not DeleteFile(), for cross-platform consistency. Error("remove(%s): %s", path.c_str(), GetLastErrorString().c_str()); return -1; } } #else if (remove(path.c_str()) < 0) { switch (errno) { case ENOENT: return 1; default: Error("remove(%s): %s", path.c_str(), strerror(errno)); return -1; } } #endif return 0; } void RealDiskInterface::AllowStatCache(bool allow) { #ifdef _WIN32 use_cache_ = allow; if (!use_cache_) cache_.clear(); #endif } #ifdef _WIN32 bool RealDiskInterface::AreLongPathsEnabled(void) const { return long_paths_enabled_; } #endif ninja-1.13.2/src/disk_interface.h000066400000000000000000000073151510764045400166230ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef NINJA_DISK_INTERFACE_H_ #define NINJA_DISK_INTERFACE_H_ #include #include #include "timestamp.h" /// Interface for reading files from disk. See DiskInterface for details. /// This base offers the minimum interface needed just to read files. struct FileReader { virtual ~FileReader() {} /// Result of ReadFile. enum Status { Okay, NotFound, OtherError }; /// Read and store in given string. On success, return Okay. /// On error, return another Status and fill |err|. virtual Status ReadFile(const std::string& path, std::string* contents, std::string* err) = 0; }; /// Interface for accessing the disk. /// /// Abstract so it can be mocked out for tests. The real implementation /// is RealDiskInterface. struct DiskInterface: public FileReader { /// stat() a file, returning the mtime, or 0 if missing and -1 on /// other errors. virtual TimeStamp Stat(const std::string& path, std::string* err) const = 0; /// Create a directory, returning false on failure. virtual bool MakeDir(const std::string& path) = 0; /// Create a file, with the specified name and contents /// If \a crlf_on_windows is true, \n will be converted to \r\n (only on /// Windows builds of Ninja). /// Returns true on success, false on failure virtual bool WriteFile(const std::string& path, const std::string& contents, bool crlf_on_windows) = 0; /// Remove the file named @a path. It behaves like 'rm -f path' so no errors /// are reported if it does not exists. /// @returns 0 if the file has been removed, /// 1 if the file does not exist, and /// -1 if an error occurs. virtual int RemoveFile(const std::string& path) = 0; /// Create all the parent directories for path; like mkdir -p /// `basename path`. bool MakeDirs(const std::string& path); }; /// Implementation of DiskInterface that actually hits the disk. struct RealDiskInterface : public DiskInterface { RealDiskInterface(); virtual ~RealDiskInterface() {} TimeStamp Stat(const std::string& path, std::string* err) const override; bool MakeDir(const std::string& path) override; bool WriteFile(const std::string& path, const std::string& contents, bool crlf_on_windows) override; Status ReadFile(const std::string& path, std::string* contents, std::string* err) override; int RemoveFile(const std::string& path) override; /// Whether stat information can be cached. Only has an effect on Windows. void AllowStatCache(bool allow); #ifdef _WIN32 /// Whether long paths are enabled. Only has an effect on Windows. bool AreLongPathsEnabled() const; #endif private: #ifdef _WIN32 /// Whether stat information can be cached. bool use_cache_; /// Whether long paths are enabled. bool long_paths_enabled_; typedef std::map DirCache; // TODO: Neither a map nor a hashmap seems ideal here. If the statcache // works out, come up with a better data structure. typedef std::map Cache; mutable Cache cache_; #endif }; #endif // NINJA_DISK_INTERFACE_H_ ninja-1.13.2/src/disk_interface_test.cc000066400000000000000000000246551510764045400200260ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include #include #ifdef _WIN32 #include #include #include #endif #include "disk_interface.h" #include "graph.h" #include "test.h" using namespace std; namespace { struct DiskInterfaceTest : public testing::Test { virtual void SetUp() { // These tests do real disk accesses, so create a temp dir. temp_dir_.CreateAndEnter("Ninja-DiskInterfaceTest"); } virtual void TearDown() { temp_dir_.Cleanup(); } bool Touch(const char* path) { FILE *f = fopen(path, "w"); if (!f) return false; return fclose(f) == 0; } ScopedTempDir temp_dir_; RealDiskInterface disk_; }; TEST_F(DiskInterfaceTest, StatMissingFile) { string err; EXPECT_EQ(0, disk_.Stat("nosuchfile", &err)); EXPECT_EQ("", err); // On Windows, the errno for a file in a nonexistent directory // is different. EXPECT_EQ(0, disk_.Stat("nosuchdir/nosuchfile", &err)); EXPECT_EQ("", err); // On POSIX systems, the errno is different if a component of the // path prefix is not a directory. ASSERT_TRUE(Touch("notadir")); EXPECT_EQ(0, disk_.Stat("notadir/nosuchfile", &err)); EXPECT_EQ("", err); } TEST_F(DiskInterfaceTest, StatMissingFileWithCache) { disk_.AllowStatCache(true); string err; // On Windows, the errno for FindFirstFileExA, which is used when the stat // cache is enabled, is different when the directory name is not a directory. ASSERT_TRUE(Touch("notadir")); EXPECT_EQ(0, disk_.Stat("notadir/nosuchfile", &err)); EXPECT_EQ("", err); } TEST_F(DiskInterfaceTest, StatBadPath) { string err; #ifdef _WIN32 string bad_path("cc:\\foo"); EXPECT_EQ(-1, disk_.Stat(bad_path, &err)); EXPECT_NE("", err); #else string too_long_name(512, 'x'); EXPECT_EQ(-1, disk_.Stat(too_long_name, &err)); EXPECT_NE("", err); #endif } TEST_F(DiskInterfaceTest, StatExistingFile) { string err; ASSERT_TRUE(Touch("file")); EXPECT_GT(disk_.Stat("file", &err), 1); EXPECT_EQ("", err); } #ifdef _WIN32 TEST_F(DiskInterfaceTest, StatExistingFileWithLongPath) { string err; char currentdir[32767]; _getcwd(currentdir, sizeof(currentdir)); const string filename = string(currentdir) + "\\filename_with_256_characters_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\ xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\ xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\ xxxxxxxxxxxxxxxxxxxxx"; const string prefixed = "\\\\?\\" + filename; ASSERT_TRUE(Touch(prefixed.c_str())); EXPECT_GT(disk_.Stat(disk_.AreLongPathsEnabled() ? filename : prefixed, &err), 1); EXPECT_EQ("", err); } #endif TEST_F(DiskInterfaceTest, StatExistingDir) { string err; ASSERT_TRUE(disk_.MakeDir("subdir")); ASSERT_TRUE(disk_.MakeDir("subdir/subsubdir")); EXPECT_GT(disk_.Stat("..", &err), 1); EXPECT_EQ("", err); EXPECT_GT(disk_.Stat(".", &err), 1); EXPECT_EQ("", err); EXPECT_GT(disk_.Stat("subdir", &err), 1); EXPECT_EQ("", err); EXPECT_GT(disk_.Stat("subdir/subsubdir", &err), 1); EXPECT_EQ("", err); EXPECT_EQ(disk_.Stat("subdir", &err), disk_.Stat("subdir/.", &err)); EXPECT_EQ(disk_.Stat("subdir", &err), disk_.Stat("subdir/subsubdir/..", &err)); EXPECT_EQ(disk_.Stat("subdir/subsubdir", &err), disk_.Stat("subdir/subsubdir/.", &err)); } #ifdef _WIN32 TEST_F(DiskInterfaceTest, StatCache) { string err; ASSERT_TRUE(Touch("file1")); ASSERT_TRUE(Touch("fiLE2")); ASSERT_TRUE(disk_.MakeDir("subdir")); ASSERT_TRUE(disk_.MakeDir("subdir/subsubdir")); ASSERT_TRUE(Touch("subdir\\subfile1")); ASSERT_TRUE(Touch("subdir\\SUBFILE2")); ASSERT_TRUE(Touch("subdir\\SUBFILE3")); disk_.AllowStatCache(false); TimeStamp parent_stat_uncached = disk_.Stat("..", &err); disk_.AllowStatCache(true); EXPECT_GT(disk_.Stat("FIle1", &err), 1); EXPECT_EQ("", err); EXPECT_GT(disk_.Stat("file1", &err), 1); EXPECT_EQ("", err); EXPECT_GT(disk_.Stat("subdir/subfile2", &err), 1); EXPECT_EQ("", err); EXPECT_GT(disk_.Stat("sUbdir\\suBFile1", &err), 1); EXPECT_EQ("", err); EXPECT_GT(disk_.Stat("..", &err), 1); EXPECT_EQ("", err); EXPECT_GT(disk_.Stat(".", &err), 1); EXPECT_EQ("", err); EXPECT_GT(disk_.Stat("subdir", &err), 1); EXPECT_EQ("", err); EXPECT_GT(disk_.Stat("subdir/subsubdir", &err), 1); EXPECT_EQ("", err); #ifndef _MSC_VER // TODO: Investigate why. Also see https://github.com/ninja-build/ninja/pull/1423 EXPECT_EQ(disk_.Stat("subdir", &err), disk_.Stat("subdir/.", &err)); EXPECT_EQ("", err); EXPECT_EQ(disk_.Stat("subdir", &err), disk_.Stat("subdir/subsubdir/..", &err)); #endif EXPECT_EQ("", err); EXPECT_EQ(disk_.Stat("..", &err), parent_stat_uncached); EXPECT_EQ("", err); EXPECT_EQ(disk_.Stat("subdir/subsubdir", &err), disk_.Stat("subdir/subsubdir/.", &err)); EXPECT_EQ("", err); // Test error cases. string bad_path("cc:\\foo"); EXPECT_EQ(-1, disk_.Stat(bad_path, &err)); EXPECT_NE("", err); err.clear(); EXPECT_EQ(-1, disk_.Stat(bad_path, &err)); EXPECT_NE("", err); err.clear(); EXPECT_EQ(0, disk_.Stat("nosuchfile", &err)); EXPECT_EQ("", err); EXPECT_EQ(0, disk_.Stat("nosuchdir/nosuchfile", &err)); EXPECT_EQ("", err); } #endif TEST_F(DiskInterfaceTest, ReadFile) { string err; std::string content; ASSERT_EQ(DiskInterface::NotFound, disk_.ReadFile("foobar", &content, &err)); EXPECT_EQ("", content); EXPECT_NE("", err); // actual value is platform-specific err.clear(); const char* kTestFile = "testfile"; FILE* f = fopen(kTestFile, "wb"); ASSERT_TRUE(f); const char* kTestContent = "test content\nok"; fprintf(f, "%s", kTestContent); ASSERT_EQ(0, fclose(f)); ASSERT_EQ(DiskInterface::Okay, disk_.ReadFile(kTestFile, &content, &err)); EXPECT_EQ(kTestContent, content); EXPECT_EQ("", err); } TEST_F(DiskInterfaceTest, MakeDirs) { string path = "path/with/double//slash/"; EXPECT_TRUE(disk_.MakeDirs(path)); FILE* f = fopen((path + "a_file").c_str(), "w"); EXPECT_TRUE(f); EXPECT_EQ(0, fclose(f)); #ifdef _WIN32 string path2 = "another\\with\\back\\\\slashes\\"; EXPECT_TRUE(disk_.MakeDirs(path2)); FILE* f2 = fopen((path2 + "a_file").c_str(), "w"); EXPECT_TRUE(f2); EXPECT_EQ(0, fclose(f2)); #endif } TEST_F(DiskInterfaceTest, RemoveFile) { const char* kFileName = "file-to-remove"; ASSERT_TRUE(Touch(kFileName)); EXPECT_EQ(0, disk_.RemoveFile(kFileName)); EXPECT_EQ(1, disk_.RemoveFile(kFileName)); EXPECT_EQ(1, disk_.RemoveFile("does not exist")); #ifdef _WIN32 ASSERT_TRUE(Touch(kFileName)); EXPECT_EQ(0, system((std::string("attrib +R ") + kFileName).c_str())); EXPECT_EQ(0, disk_.RemoveFile(kFileName)); EXPECT_EQ(1, disk_.RemoveFile(kFileName)); #endif } TEST_F(DiskInterfaceTest, RemoveDirectory) { const char* kDirectoryName = "directory-to-remove"; EXPECT_TRUE(disk_.MakeDir(kDirectoryName)); EXPECT_EQ(0, disk_.RemoveFile(kDirectoryName)); EXPECT_EQ(1, disk_.RemoveFile(kDirectoryName)); EXPECT_EQ(1, disk_.RemoveFile("does not exist")); } struct StatTest : public StateTestWithBuiltinRules, public DiskInterface { StatTest() : scan_(&state_, NULL, NULL, this, NULL, NULL) {} // DiskInterface implementation. TimeStamp Stat(const string& path, string* err) const override; bool WriteFile(const string& path, const string& contents, bool /*crlf_on_windows*/) override { assert(false); return true; } bool MakeDir(const string& path) override { assert(false); return false; } Status ReadFile(const string& path, string* contents, string* err) override { assert(false); return NotFound; } int RemoveFile(const string& path) override { assert(false); return 0; } DependencyScan scan_; map mtimes_; mutable vector stats_; }; TimeStamp StatTest::Stat(const string& path, string* err) const { stats_.push_back(path); map::const_iterator i = mtimes_.find(path); if (i == mtimes_.end()) return 0; // File not found. return i->second; } TEST_F(StatTest, Simple) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build out: cat in\n")); Node* out = GetNode("out"); string err; EXPECT_TRUE(out->Stat(this, &err)); EXPECT_EQ("", err); ASSERT_EQ(1u, stats_.size()); scan_.RecomputeDirty(out, NULL, NULL); ASSERT_EQ(2u, stats_.size()); ASSERT_EQ("out", stats_[0]); ASSERT_EQ("in", stats_[1]); } TEST_F(StatTest, TwoStep) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build out: cat mid\n" "build mid: cat in\n")); Node* out = GetNode("out"); string err; EXPECT_TRUE(out->Stat(this, &err)); EXPECT_EQ("", err); ASSERT_EQ(1u, stats_.size()); scan_.RecomputeDirty(out, NULL, NULL); ASSERT_EQ(3u, stats_.size()); ASSERT_EQ("out", stats_[0]); ASSERT_TRUE(GetNode("out")->dirty()); ASSERT_EQ("mid", stats_[1]); ASSERT_TRUE(GetNode("mid")->dirty()); ASSERT_EQ("in", stats_[2]); } TEST_F(StatTest, Tree) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build out: cat mid1 mid2\n" "build mid1: cat in11 in12\n" "build mid2: cat in21 in22\n")); Node* out = GetNode("out"); string err; EXPECT_TRUE(out->Stat(this, &err)); EXPECT_EQ("", err); ASSERT_EQ(1u, stats_.size()); scan_.RecomputeDirty(out, NULL, NULL); ASSERT_EQ(1u + 6u, stats_.size()); ASSERT_EQ("mid1", stats_[1]); ASSERT_TRUE(GetNode("mid1")->dirty()); ASSERT_EQ("in11", stats_[2]); } TEST_F(StatTest, Middle) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build out: cat mid\n" "build mid: cat in\n")); mtimes_["in"] = 1; mtimes_["mid"] = 0; // missing mtimes_["out"] = 1; Node* out = GetNode("out"); string err; EXPECT_TRUE(out->Stat(this, &err)); EXPECT_EQ("", err); ASSERT_EQ(1u, stats_.size()); scan_.RecomputeDirty(out, NULL, NULL); ASSERT_FALSE(GetNode("in")->dirty()); ASSERT_TRUE(GetNode("mid")->dirty()); ASSERT_TRUE(GetNode("out")->dirty()); } } // namespace ninja-1.13.2/src/dyndep.cc000066400000000000000000000076321510764045400152740ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "dyndep.h" #include #include #include "debug_flags.h" #include "disk_interface.h" #include "dyndep_parser.h" #include "explanations.h" #include "graph.h" #include "state.h" #include "util.h" using namespace std; bool DyndepLoader::LoadDyndeps(Node* node, std::string* err) const { DyndepFile ddf; return LoadDyndeps(node, &ddf, err); } bool DyndepLoader::LoadDyndeps(Node* node, DyndepFile* ddf, std::string* err) const { // We are loading the dyndep file now so it is no longer pending. node->set_dyndep_pending(false); // Load the dyndep information from the file. explanations_.Record(node, "loading dyndep file '%s'", node->path().c_str()); if (!LoadDyndepFile(node, ddf, err)) return false; // Update each edge that specified this node as its dyndep binding. std::vector const& out_edges = node->out_edges(); for (Edge* edge : out_edges) { if (edge->dyndep_ != node) continue; DyndepFile::iterator ddi = ddf->find(edge); if (ddi == ddf->end()) { *err = ("'" + edge->outputs_[0]->path() + "' " "not mentioned in its dyndep file " "'" + node->path() + "'"); return false; } ddi->second.used_ = true; Dyndeps const& dyndeps = ddi->second; if (!UpdateEdge(edge, &dyndeps, err)) { return false; } } // Reject extra outputs in dyndep file. for (const auto& dyndep_output : *ddf) { if (!dyndep_output.second.used_) { Edge* const edge = dyndep_output.first; *err = ("dyndep file '" + node->path() + "' mentions output " "'" + edge->outputs_[0]->path() + "' whose build statement " "does not have a dyndep binding for the file"); return false; } } return true; } bool DyndepLoader::UpdateEdge(Edge* edge, Dyndeps const* dyndeps, std::string* err) const { // Add dyndep-discovered bindings to the edge. // We know the edge already has its own binding // scope because it has a "dyndep" binding. if (dyndeps->restat_) edge->env_->AddBinding("restat", "1"); // Add the dyndep-discovered outputs to the edge. edge->outputs_.insert(edge->outputs_.end(), dyndeps->implicit_outputs_.begin(), dyndeps->implicit_outputs_.end()); edge->implicit_outs_ += dyndeps->implicit_outputs_.size(); // Add this edge as incoming to each new output. for (Node* node : dyndeps->implicit_outputs_) { if (node->in_edge()) { // This node already has an edge producing it. *err = "multiple rules generate " + node->path(); return false; } node->set_in_edge(edge); } // Add the dyndep-discovered inputs to the edge. edge->inputs_.insert(edge->inputs_.end() - edge->order_only_deps_, dyndeps->implicit_inputs_.begin(), dyndeps->implicit_inputs_.end()); edge->implicit_deps_ += dyndeps->implicit_inputs_.size(); // Add this edge as outgoing from each new input. for (Node* node : dyndeps->implicit_inputs_) node->AddOutEdge(edge); return true; } bool DyndepLoader::LoadDyndepFile(Node* file, DyndepFile* ddf, std::string* err) const { DyndepParser parser(state_, disk_interface_, ddf); return parser.Load(file->path(), err); } ninja-1.13.2/src/dyndep.h000066400000000000000000000044731510764045400151360ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef NINJA_DYNDEP_LOADER_H_ #define NINJA_DYNDEP_LOADER_H_ #include #include #include #include "explanations.h" struct DiskInterface; struct Edge; struct Node; struct State; /// Store dynamically-discovered dependency information for one edge. struct Dyndeps { Dyndeps() : used_(false), restat_(false) {} bool used_; bool restat_; std::vector implicit_inputs_; std::vector implicit_outputs_; }; /// Store data loaded from one dyndep file. Map from an edge /// to its dynamically-discovered dependency information. /// This is a struct rather than a typedef so that we can /// forward-declare it in other headers. struct DyndepFile: public std::map {}; /// DyndepLoader loads dynamically discovered dependencies, as /// referenced via the "dyndep" attribute in build files. struct DyndepLoader { DyndepLoader(State* state, DiskInterface* disk_interface, Explanations* explanations = nullptr) : state_(state), disk_interface_(disk_interface), explanations_(explanations) {} /// Load a dyndep file from the given node's path and update the /// build graph with the new information. One overload accepts /// a caller-owned 'DyndepFile' object in which to store the /// information loaded from the dyndep file. bool LoadDyndeps(Node* node, std::string* err) const; bool LoadDyndeps(Node* node, DyndepFile* ddf, std::string* err) const; private: bool LoadDyndepFile(Node* file, DyndepFile* ddf, std::string* err) const; bool UpdateEdge(Edge* edge, Dyndeps const* dyndeps, std::string* err) const; State* state_; DiskInterface* disk_interface_; mutable OptionalExplanations explanations_; }; #endif // NINJA_DYNDEP_LOADER_H_ ninja-1.13.2/src/dyndep_parser.cc000066400000000000000000000144471510764045400166520ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "dyndep_parser.h" #include #include "dyndep.h" #include "graph.h" #include "state.h" #include "util.h" #include "version.h" using namespace std; DyndepParser::DyndepParser(State* state, FileReader* file_reader, DyndepFile* dyndep_file) : Parser(state, file_reader) , dyndep_file_(dyndep_file) { } bool DyndepParser::Parse(const string& filename, const string& input, string* err) { lexer_.Start(filename, input); // Require a supported ninja_dyndep_version value immediately so // we can exit before encountering any syntactic surprises. bool haveDyndepVersion = false; for (;;) { Lexer::Token token = lexer_.ReadToken(); switch (token) { case Lexer::BUILD: { if (!haveDyndepVersion) return lexer_.Error("expected 'ninja_dyndep_version = ...'", err); if (!ParseEdge(err)) return false; break; } case Lexer::IDENT: { lexer_.UnreadToken(); if (haveDyndepVersion) return lexer_.Error(string("unexpected ") + Lexer::TokenName(token), err); if (!ParseDyndepVersion(err)) return false; haveDyndepVersion = true; break; } case Lexer::ERROR: return lexer_.Error(lexer_.DescribeLastError(), err); case Lexer::TEOF: if (!haveDyndepVersion) return lexer_.Error("expected 'ninja_dyndep_version = ...'", err); return true; case Lexer::NEWLINE: break; default: return lexer_.Error(string("unexpected ") + Lexer::TokenName(token), err); } } return false; // not reached } bool DyndepParser::ParseDyndepVersion(string* err) { string name; EvalString let_value; if (!ParseLet(&name, &let_value, err)) return false; if (name != "ninja_dyndep_version") { return lexer_.Error("expected 'ninja_dyndep_version = ...'", err); } string version = let_value.Evaluate(&env_); int major, minor; ParseVersion(version, &major, &minor); if (major != 1 || minor != 0) { return lexer_.Error( string("unsupported 'ninja_dyndep_version = ") + version + "'", err); } return true; } bool DyndepParser::ParseLet(string* key, EvalString* value, string* err) { if (!lexer_.ReadIdent(key)) return lexer_.Error("expected variable name", err); return (ExpectToken(Lexer::EQUALS, err) && lexer_.ReadVarValue(value, err)); } bool DyndepParser::ParseEdge(string* err) { // Parse one explicit output. We expect it to already have an edge. // We will record its dynamically-discovered dependency information. Dyndeps* dyndeps = NULL; { EvalString out0; if (!lexer_.ReadPath(&out0, err)) return false; if (out0.empty()) return lexer_.Error("expected path", err); string path = out0.Evaluate(&env_); if (path.empty()) return lexer_.Error("empty path", err); uint64_t slash_bits; CanonicalizePath(&path, &slash_bits); Node* node = state_->LookupNode(path); if (!node || !node->in_edge()) return lexer_.Error("no build statement exists for '" + path + "'", err); Edge* edge = node->in_edge(); std::pair res = dyndep_file_->insert(DyndepFile::value_type(edge, Dyndeps())); if (!res.second) return lexer_.Error("multiple statements for '" + path + "'", err); dyndeps = &res.first->second; } // Disallow explicit outputs. { EvalString out; if (!lexer_.ReadPath(&out, err)) return false; if (!out.empty()) return lexer_.Error("explicit outputs not supported", err); } // Parse implicit outputs, if any. vector outs; if (lexer_.PeekToken(Lexer::PIPE)) { for (;;) { EvalString out; if (!lexer_.ReadPath(&out, err)) return err; if (out.empty()) break; outs.push_back(out); } } if (!ExpectToken(Lexer::COLON, err)) return false; string rule_name; if (!lexer_.ReadIdent(&rule_name) || rule_name != "dyndep") return lexer_.Error("expected build command name 'dyndep'", err); // Disallow explicit inputs. { EvalString in; if (!lexer_.ReadPath(&in, err)) return false; if (!in.empty()) return lexer_.Error("explicit inputs not supported", err); } // Parse implicit inputs, if any. vector ins; if (lexer_.PeekToken(Lexer::PIPE)) { for (;;) { EvalString in; if (!lexer_.ReadPath(&in, err)) return err; if (in.empty()) break; ins.push_back(in); } } // Disallow order-only inputs. if (lexer_.PeekToken(Lexer::PIPE2)) return lexer_.Error("order-only inputs not supported", err); if (!ExpectToken(Lexer::NEWLINE, err)) return false; if (lexer_.PeekToken(Lexer::INDENT)) { string key; EvalString val; if (!ParseLet(&key, &val, err)) return false; if (key != "restat") return lexer_.Error("binding is not 'restat'", err); string value = val.Evaluate(&env_); dyndeps->restat_ = !value.empty(); } dyndeps->implicit_inputs_.reserve(ins.size()); for (const EvalString& in : ins) { string path = in.Evaluate(&env_); if (path.empty()) return lexer_.Error("empty path", err); uint64_t slash_bits; CanonicalizePath(&path, &slash_bits); Node* n = state_->GetNode(path, slash_bits); dyndeps->implicit_inputs_.push_back(n); } dyndeps->implicit_outputs_.reserve(outs.size()); for (const EvalString& out : outs) { string path = out.Evaluate(&env_); if (path.empty()) return lexer_.Error("empty path", err); uint64_t slash_bits; CanonicalizePath(&path, &slash_bits); Node* n = state_->GetNode(path, slash_bits); dyndeps->implicit_outputs_.push_back(n); } return true; } ninja-1.13.2/src/dyndep_parser.h000066400000000000000000000027001510764045400165010ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef NINJA_DYNDEP_PARSER_H_ #define NINJA_DYNDEP_PARSER_H_ #include "eval_env.h" #include "parser.h" struct DyndepFile; struct EvalString; /// Parses dyndep files. struct DyndepParser: public Parser { DyndepParser(State* state, FileReader* file_reader, DyndepFile* dyndep_file); /// Parse a text string of input. Used by tests. bool ParseTest(const std::string& input, std::string* err) { return Parse("input", input, err); } private: /// Parse a file, given its contents as a string. bool Parse(const std::string& filename, const std::string& input, std:: string* err); bool ParseDyndepVersion(std::string* err); bool ParseLet(std::string* key, EvalString* val, std::string* err); bool ParseEdge(std::string* err); DyndepFile* dyndep_file_; BindingEnv env_; }; #endif // NINJA_DYNDEP_PARSER_H_ ninja-1.13.2/src/dyndep_parser_test.cc000066400000000000000000000366441510764045400177140ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "dyndep_parser.h" #include #include #include "dyndep.h" #include "graph.h" #include "state.h" #include "test.h" using namespace std; struct DyndepParserTest : public testing::Test { void AssertParse(const char* input) { DyndepParser parser(&state_, &fs_, &dyndep_file_); string err; EXPECT_TRUE(parser.ParseTest(input, &err)); ASSERT_EQ("", err); } virtual void SetUp() { ::AssertParse(&state_, "rule touch\n" " command = touch $out\n" "build out otherout: touch\n"); } State state_; VirtualFileSystem fs_; DyndepFile dyndep_file_; }; TEST_F(DyndepParserTest, Empty) { const char kInput[] = ""; DyndepParser parser(&state_, &fs_, &dyndep_file_); string err; EXPECT_FALSE(parser.ParseTest(kInput, &err)); EXPECT_EQ("input:1: expected 'ninja_dyndep_version = ...'\n", err); } TEST_F(DyndepParserTest, Version1) { ASSERT_NO_FATAL_FAILURE(AssertParse( "ninja_dyndep_version = 1\n")); } TEST_F(DyndepParserTest, Version1Extra) { ASSERT_NO_FATAL_FAILURE(AssertParse( "ninja_dyndep_version = 1-extra\n")); } TEST_F(DyndepParserTest, Version1_0) { ASSERT_NO_FATAL_FAILURE(AssertParse( "ninja_dyndep_version = 1.0\n")); } TEST_F(DyndepParserTest, Version1_0Extra) { ASSERT_NO_FATAL_FAILURE(AssertParse( "ninja_dyndep_version = 1.0-extra\n")); } TEST_F(DyndepParserTest, CommentVersion) { ASSERT_NO_FATAL_FAILURE(AssertParse( "# comment\n" "ninja_dyndep_version = 1\n")); } TEST_F(DyndepParserTest, BlankLineVersion) { ASSERT_NO_FATAL_FAILURE(AssertParse( "\n" "ninja_dyndep_version = 1\n")); } TEST_F(DyndepParserTest, VersionCRLF) { ASSERT_NO_FATAL_FAILURE(AssertParse( "ninja_dyndep_version = 1\r\n")); } TEST_F(DyndepParserTest, CommentVersionCRLF) { ASSERT_NO_FATAL_FAILURE(AssertParse( "# comment\r\n" "ninja_dyndep_version = 1\r\n")); } TEST_F(DyndepParserTest, BlankLineVersionCRLF) { ASSERT_NO_FATAL_FAILURE(AssertParse( "\r\n" "ninja_dyndep_version = 1\r\n")); } TEST_F(DyndepParserTest, VersionUnexpectedEOF) { const char kInput[] = "ninja_dyndep_version = 1.0"; DyndepParser parser(&state_, &fs_, &dyndep_file_); string err; EXPECT_FALSE(parser.ParseTest(kInput, &err)); EXPECT_EQ("input:1: unexpected EOF\n" "ninja_dyndep_version = 1.0\n" " ^ near here", err); } TEST_F(DyndepParserTest, UnsupportedVersion0) { const char kInput[] = "ninja_dyndep_version = 0\n"; DyndepParser parser(&state_, &fs_, &dyndep_file_); string err; EXPECT_FALSE(parser.ParseTest(kInput, &err)); EXPECT_EQ("input:1: unsupported 'ninja_dyndep_version = 0'\n" "ninja_dyndep_version = 0\n" " ^ near here", err); } TEST_F(DyndepParserTest, UnsupportedVersion1_1) { const char kInput[] = "ninja_dyndep_version = 1.1\n"; DyndepParser parser(&state_, &fs_, &dyndep_file_); string err; EXPECT_FALSE(parser.ParseTest(kInput, &err)); EXPECT_EQ("input:1: unsupported 'ninja_dyndep_version = 1.1'\n" "ninja_dyndep_version = 1.1\n" " ^ near here", err); } TEST_F(DyndepParserTest, DuplicateVersion) { const char kInput[] = "ninja_dyndep_version = 1\n" "ninja_dyndep_version = 1\n"; DyndepParser parser(&state_, &fs_, &dyndep_file_); string err; EXPECT_FALSE(parser.ParseTest(kInput, &err)); EXPECT_EQ("input:2: unexpected identifier\n", err); } TEST_F(DyndepParserTest, MissingVersionOtherVar) { const char kInput[] = "not_ninja_dyndep_version = 1\n"; DyndepParser parser(&state_, &fs_, &dyndep_file_); string err; EXPECT_FALSE(parser.ParseTest(kInput, &err)); EXPECT_EQ("input:1: expected 'ninja_dyndep_version = ...'\n" "not_ninja_dyndep_version = 1\n" " ^ near here", err); } TEST_F(DyndepParserTest, MissingVersionBuild) { const char kInput[] = "build out: dyndep\n"; DyndepParser parser(&state_, &fs_, &dyndep_file_); string err; EXPECT_FALSE(parser.ParseTest(kInput, &err)); EXPECT_EQ("input:1: expected 'ninja_dyndep_version = ...'\n", err); } TEST_F(DyndepParserTest, UnexpectedEqual) { const char kInput[] = "= 1\n"; DyndepParser parser(&state_, &fs_, &dyndep_file_); string err; EXPECT_FALSE(parser.ParseTest(kInput, &err)); EXPECT_EQ("input:1: unexpected '='\n", err); } TEST_F(DyndepParserTest, UnexpectedIndent) { const char kInput[] = " = 1\n"; DyndepParser parser(&state_, &fs_, &dyndep_file_); string err; EXPECT_FALSE(parser.ParseTest(kInput, &err)); EXPECT_EQ("input:1: unexpected indent\n", err); } TEST_F(DyndepParserTest, OutDuplicate) { const char kInput[] = "ninja_dyndep_version = 1\n" "build out: dyndep\n" "build out: dyndep\n"; DyndepParser parser(&state_, &fs_, &dyndep_file_); string err; EXPECT_FALSE(parser.ParseTest(kInput, &err)); EXPECT_EQ("input:3: multiple statements for 'out'\n" "build out: dyndep\n" " ^ near here", err); } TEST_F(DyndepParserTest, OutDuplicateThroughOther) { const char kInput[] = "ninja_dyndep_version = 1\n" "build out: dyndep\n" "build otherout: dyndep\n"; DyndepParser parser(&state_, &fs_, &dyndep_file_); string err; EXPECT_FALSE(parser.ParseTest(kInput, &err)); EXPECT_EQ("input:3: multiple statements for 'otherout'\n" "build otherout: dyndep\n" " ^ near here", err); } TEST_F(DyndepParserTest, NoOutEOF) { const char kInput[] = "ninja_dyndep_version = 1\n" "build"; DyndepParser parser(&state_, &fs_, &dyndep_file_); string err; EXPECT_FALSE(parser.ParseTest(kInput, &err)); EXPECT_EQ("input:2: unexpected EOF\n" "build\n" " ^ near here", err); } TEST_F(DyndepParserTest, NoOutColon) { const char kInput[] = "ninja_dyndep_version = 1\n" "build :\n"; DyndepParser parser(&state_, &fs_, &dyndep_file_); string err; EXPECT_FALSE(parser.ParseTest(kInput, &err)); EXPECT_EQ("input:2: expected path\n" "build :\n" " ^ near here", err); } TEST_F(DyndepParserTest, OutNoStatement) { const char kInput[] = "ninja_dyndep_version = 1\n" "build missing: dyndep\n"; DyndepParser parser(&state_, &fs_, &dyndep_file_); string err; EXPECT_FALSE(parser.ParseTest(kInput, &err)); EXPECT_EQ("input:2: no build statement exists for 'missing'\n" "build missing: dyndep\n" " ^ near here", err); } TEST_F(DyndepParserTest, OutEOF) { const char kInput[] = "ninja_dyndep_version = 1\n" "build out"; DyndepParser parser(&state_, &fs_, &dyndep_file_); string err; EXPECT_FALSE(parser.ParseTest(kInput, &err)); EXPECT_EQ("input:2: unexpected EOF\n" "build out\n" " ^ near here", err); } TEST_F(DyndepParserTest, OutNoRule) { const char kInput[] = "ninja_dyndep_version = 1\n" "build out:"; DyndepParser parser(&state_, &fs_, &dyndep_file_); string err; EXPECT_FALSE(parser.ParseTest(kInput, &err)); EXPECT_EQ("input:2: expected build command name 'dyndep'\n" "build out:\n" " ^ near here", err); } TEST_F(DyndepParserTest, OutBadRule) { const char kInput[] = "ninja_dyndep_version = 1\n" "build out: touch"; DyndepParser parser(&state_, &fs_, &dyndep_file_); string err; EXPECT_FALSE(parser.ParseTest(kInput, &err)); EXPECT_EQ("input:2: expected build command name 'dyndep'\n" "build out: touch\n" " ^ near here", err); } TEST_F(DyndepParserTest, BuildEOF) { const char kInput[] = "ninja_dyndep_version = 1\n" "build out: dyndep"; DyndepParser parser(&state_, &fs_, &dyndep_file_); string err; EXPECT_FALSE(parser.ParseTest(kInput, &err)); EXPECT_EQ("input:2: unexpected EOF\n" "build out: dyndep\n" " ^ near here", err); } TEST_F(DyndepParserTest, ExplicitOut) { const char kInput[] = "ninja_dyndep_version = 1\n" "build out exp: dyndep\n"; DyndepParser parser(&state_, &fs_, &dyndep_file_); string err; EXPECT_FALSE(parser.ParseTest(kInput, &err)); EXPECT_EQ("input:2: explicit outputs not supported\n" "build out exp: dyndep\n" " ^ near here", err); } TEST_F(DyndepParserTest, ExplicitIn) { const char kInput[] = "ninja_dyndep_version = 1\n" "build out: dyndep exp\n"; DyndepParser parser(&state_, &fs_, &dyndep_file_); string err; EXPECT_FALSE(parser.ParseTest(kInput, &err)); EXPECT_EQ("input:2: explicit inputs not supported\n" "build out: dyndep exp\n" " ^ near here", err); } TEST_F(DyndepParserTest, OrderOnlyIn) { const char kInput[] = "ninja_dyndep_version = 1\n" "build out: dyndep ||\n"; DyndepParser parser(&state_, &fs_, &dyndep_file_); string err; EXPECT_FALSE(parser.ParseTest(kInput, &err)); EXPECT_EQ("input:2: order-only inputs not supported\n" "build out: dyndep ||\n" " ^ near here", err); } TEST_F(DyndepParserTest, BadBinding) { const char kInput[] = "ninja_dyndep_version = 1\n" "build out: dyndep\n" " not_restat = 1\n"; DyndepParser parser(&state_, &fs_, &dyndep_file_); string err; EXPECT_FALSE(parser.ParseTest(kInput, &err)); EXPECT_EQ("input:3: binding is not 'restat'\n" " not_restat = 1\n" " ^ near here", err); } TEST_F(DyndepParserTest, RestatTwice) { const char kInput[] = "ninja_dyndep_version = 1\n" "build out: dyndep\n" " restat = 1\n" " restat = 1\n"; DyndepParser parser(&state_, &fs_, &dyndep_file_); string err; EXPECT_FALSE(parser.ParseTest(kInput, &err)); EXPECT_EQ("input:4: unexpected indent\n", err); } TEST_F(DyndepParserTest, NoImplicit) { ASSERT_NO_FATAL_FAILURE(AssertParse( "ninja_dyndep_version = 1\n" "build out: dyndep\n")); EXPECT_EQ(1u, dyndep_file_.size()); DyndepFile::iterator i = dyndep_file_.find(state_.edges_[0]); ASSERT_NE(i, dyndep_file_.end()); EXPECT_EQ(false, i->second.restat_); EXPECT_EQ(0u, i->second.implicit_outputs_.size()); EXPECT_EQ(0u, i->second.implicit_inputs_.size()); } TEST_F(DyndepParserTest, EmptyImplicit) { ASSERT_NO_FATAL_FAILURE(AssertParse( "ninja_dyndep_version = 1\n" "build out | : dyndep |\n")); EXPECT_EQ(1u, dyndep_file_.size()); DyndepFile::iterator i = dyndep_file_.find(state_.edges_[0]); ASSERT_NE(i, dyndep_file_.end()); EXPECT_EQ(false, i->second.restat_); EXPECT_EQ(0u, i->second.implicit_outputs_.size()); EXPECT_EQ(0u, i->second.implicit_inputs_.size()); } TEST_F(DyndepParserTest, ImplicitIn) { ASSERT_NO_FATAL_FAILURE(AssertParse( "ninja_dyndep_version = 1\n" "build out: dyndep | impin\n")); EXPECT_EQ(1u, dyndep_file_.size()); DyndepFile::iterator i = dyndep_file_.find(state_.edges_[0]); ASSERT_NE(i, dyndep_file_.end()); EXPECT_EQ(false, i->second.restat_); EXPECT_EQ(0u, i->second.implicit_outputs_.size()); ASSERT_EQ(1u, i->second.implicit_inputs_.size()); EXPECT_EQ("impin", i->second.implicit_inputs_[0]->path()); } TEST_F(DyndepParserTest, ImplicitIns) { ASSERT_NO_FATAL_FAILURE(AssertParse( "ninja_dyndep_version = 1\n" "build out: dyndep | impin1 impin2\n")); EXPECT_EQ(1u, dyndep_file_.size()); DyndepFile::iterator i = dyndep_file_.find(state_.edges_[0]); ASSERT_NE(i, dyndep_file_.end()); EXPECT_EQ(false, i->second.restat_); EXPECT_EQ(0u, i->second.implicit_outputs_.size()); ASSERT_EQ(2u, i->second.implicit_inputs_.size()); EXPECT_EQ("impin1", i->second.implicit_inputs_[0]->path()); EXPECT_EQ("impin2", i->second.implicit_inputs_[1]->path()); } TEST_F(DyndepParserTest, ImplicitOut) { ASSERT_NO_FATAL_FAILURE(AssertParse( "ninja_dyndep_version = 1\n" "build out | impout: dyndep\n")); EXPECT_EQ(1u, dyndep_file_.size()); DyndepFile::iterator i = dyndep_file_.find(state_.edges_[0]); ASSERT_NE(i, dyndep_file_.end()); EXPECT_EQ(false, i->second.restat_); ASSERT_EQ(1u, i->second.implicit_outputs_.size()); EXPECT_EQ("impout", i->second.implicit_outputs_[0]->path()); EXPECT_EQ(0u, i->second.implicit_inputs_.size()); } TEST_F(DyndepParserTest, ImplicitOuts) { ASSERT_NO_FATAL_FAILURE(AssertParse( "ninja_dyndep_version = 1\n" "build out | impout1 impout2 : dyndep\n")); EXPECT_EQ(1u, dyndep_file_.size()); DyndepFile::iterator i = dyndep_file_.find(state_.edges_[0]); ASSERT_NE(i, dyndep_file_.end()); EXPECT_EQ(false, i->second.restat_); ASSERT_EQ(2u, i->second.implicit_outputs_.size()); EXPECT_EQ("impout1", i->second.implicit_outputs_[0]->path()); EXPECT_EQ("impout2", i->second.implicit_outputs_[1]->path()); EXPECT_EQ(0u, i->second.implicit_inputs_.size()); } TEST_F(DyndepParserTest, ImplicitInsAndOuts) { ASSERT_NO_FATAL_FAILURE(AssertParse( "ninja_dyndep_version = 1\n" "build out | impout1 impout2: dyndep | impin1 impin2\n")); EXPECT_EQ(1u, dyndep_file_.size()); DyndepFile::iterator i = dyndep_file_.find(state_.edges_[0]); ASSERT_NE(i, dyndep_file_.end()); EXPECT_EQ(false, i->second.restat_); ASSERT_EQ(2u, i->second.implicit_outputs_.size()); EXPECT_EQ("impout1", i->second.implicit_outputs_[0]->path()); EXPECT_EQ("impout2", i->second.implicit_outputs_[1]->path()); ASSERT_EQ(2u, i->second.implicit_inputs_.size()); EXPECT_EQ("impin1", i->second.implicit_inputs_[0]->path()); EXPECT_EQ("impin2", i->second.implicit_inputs_[1]->path()); } TEST_F(DyndepParserTest, Restat) { ASSERT_NO_FATAL_FAILURE(AssertParse( "ninja_dyndep_version = 1\n" "build out: dyndep\n" " restat = 1\n")); EXPECT_EQ(1u, dyndep_file_.size()); DyndepFile::iterator i = dyndep_file_.find(state_.edges_[0]); ASSERT_NE(i, dyndep_file_.end()); EXPECT_EQ(true, i->second.restat_); EXPECT_EQ(0u, i->second.implicit_outputs_.size()); EXPECT_EQ(0u, i->second.implicit_inputs_.size()); } TEST_F(DyndepParserTest, OtherOutput) { ASSERT_NO_FATAL_FAILURE(AssertParse( "ninja_dyndep_version = 1\n" "build otherout: dyndep\n")); EXPECT_EQ(1u, dyndep_file_.size()); DyndepFile::iterator i = dyndep_file_.find(state_.edges_[0]); ASSERT_NE(i, dyndep_file_.end()); EXPECT_EQ(false, i->second.restat_); EXPECT_EQ(0u, i->second.implicit_outputs_.size()); EXPECT_EQ(0u, i->second.implicit_inputs_.size()); } TEST_F(DyndepParserTest, MultipleEdges) { ::AssertParse(&state_, "build out2: touch\n"); ASSERT_EQ(2u, state_.edges_.size()); ASSERT_EQ(1u, state_.edges_[1]->outputs_.size()); EXPECT_EQ("out2", state_.edges_[1]->outputs_[0]->path()); EXPECT_EQ(0u, state_.edges_[0]->inputs_.size()); ASSERT_NO_FATAL_FAILURE(AssertParse( "ninja_dyndep_version = 1\n" "build out: dyndep\n" "build out2: dyndep\n" " restat = 1\n")); EXPECT_EQ(2u, dyndep_file_.size()); { DyndepFile::iterator i = dyndep_file_.find(state_.edges_[0]); ASSERT_NE(i, dyndep_file_.end()); EXPECT_EQ(false, i->second.restat_); EXPECT_EQ(0u, i->second.implicit_outputs_.size()); EXPECT_EQ(0u, i->second.implicit_inputs_.size()); } { DyndepFile::iterator i = dyndep_file_.find(state_.edges_[1]); ASSERT_NE(i, dyndep_file_.end()); EXPECT_EQ(true, i->second.restat_); EXPECT_EQ(0u, i->second.implicit_outputs_.size()); EXPECT_EQ(0u, i->second.implicit_inputs_.size()); } } ninja-1.13.2/src/edit_distance.cc000066400000000000000000000045061510764045400166050ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "edit_distance.h" #include #include int EditDistance(const StringPiece& s1, const StringPiece& s2, bool allow_replacements, int max_edit_distance) { // The algorithm implemented below is the "classic" // dynamic-programming algorithm for computing the Levenshtein // distance, which is described here: // // http://en.wikipedia.org/wiki/Levenshtein_distance // // Although the algorithm is typically described using an m x n // array, only one row plus one element are used at a time, so this // implementation just keeps one vector for the row. To update one entry, // only the entries to the left, top, and top-left are needed. The left // entry is in row[x-1], the top entry is what's in row[x] from the last // iteration, and the top-left entry is stored in previous. int m = static_cast(s1.len_); int n = static_cast(s2.len_); std::vector row(n + 1); for (int i = 1; i <= n; ++i) row[i] = i; for (int y = 1; y <= m; ++y) { row[0] = y; int best_this_row = row[0]; int previous = y - 1; for (int x = 1; x <= n; ++x) { int old_row = row[x]; if (allow_replacements) { row[x] = std::min(previous + (s1.str_[y - 1] == s2.str_[x - 1] ? 0 : 1), std::min(row[x - 1], row[x]) + 1); } else { if (s1.str_[y - 1] == s2.str_[x - 1]) row[x] = previous; else row[x] = std::min(row[x - 1], row[x]) + 1; } previous = old_row; best_this_row = std::min(best_this_row, row[x]); } if (max_edit_distance && best_this_row > max_edit_distance) return max_edit_distance + 1; } return row[n]; } ninja-1.13.2/src/edit_distance.h000066400000000000000000000016151510764045400164450ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef NINJA_EDIT_DISTANCE_H_ #define NINJA_EDIT_DISTANCE_H_ #include "string_piece.h" int EditDistance(const StringPiece& s1, const StringPiece& s2, bool allow_replacements = true, int max_edit_distance = 0); #endif // NINJA_EDIT_DISTANCE_H_ ninja-1.13.2/src/edit_distance_test.cc000066400000000000000000000033311510764045400176370ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "edit_distance.h" #include "test.h" TEST(EditDistanceTest, TestEmpty) { EXPECT_EQ(5, EditDistance("", "ninja")); EXPECT_EQ(5, EditDistance("ninja", "")); EXPECT_EQ(0, EditDistance("", "")); } TEST(EditDistanceTest, TestMaxDistance) { const bool allow_replacements = true; for (int max_distance = 1; max_distance < 7; ++max_distance) { EXPECT_EQ(max_distance + 1, EditDistance("abcdefghijklmnop", "ponmlkjihgfedcba", allow_replacements, max_distance)); } } TEST(EditDistanceTest, TestAllowReplacements) { bool allow_replacements = true; EXPECT_EQ(1, EditDistance("ninja", "njnja", allow_replacements)); EXPECT_EQ(1, EditDistance("njnja", "ninja", allow_replacements)); allow_replacements = false; EXPECT_EQ(2, EditDistance("ninja", "njnja", allow_replacements)); EXPECT_EQ(2, EditDistance("njnja", "ninja", allow_replacements)); } TEST(EditDistanceTest, TestBasics) { EXPECT_EQ(0, EditDistance("browser_tests", "browser_tests")); EXPECT_EQ(1, EditDistance("browser_test", "browser_tests")); EXPECT_EQ(1, EditDistance("browser_tests", "browser_test")); } ninja-1.13.2/src/elide_middle.cc000066400000000000000000000214651510764045400164110ustar00rootroot00000000000000// Copyright 2024 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "elide_middle.h" #include #include // Convenience class used to iterate over the ANSI color sequences // of an input string. Note that this ignores non-color related // ANSI sequences. Usage is: // // - Create instance, passing the input string to the constructor. // - Loop over each sequence with: // // AnsiColorSequenceIterator iter; // while (iter.HasSequence()) { // .. use iter.SequenceStart() and iter.SequenceEnd() // iter.NextSequence(); // } // struct AnsiColorSequenceIterator { // Constructor takes input string . AnsiColorSequenceIterator(const std::string& input) : input_(input.data()), input_end_(input_ + input.size()) { FindNextSequenceFrom(input_); } // Return true if an ANSI sequence was found. bool HasSequence() const { return cur_end_ != 0; } // Start of the current sequence. size_t SequenceStart() const { return cur_start_; } // End of the current sequence (index of the first character // following the sequence). size_t SequenceEnd() const { return cur_end_; } // Size of the current sequence in characters. size_t SequenceSize() const { return cur_end_ - cur_start_; } // Returns true if |input_index| belongs to the current sequence. bool SequenceContains(size_t input_index) const { return (input_index >= cur_start_ && input_index < cur_end_); } // Find the next sequence, if any, from the input. // Returns false is there is no more sequence. bool NextSequence() { if (FindNextSequenceFrom(input_ + cur_end_)) return true; cur_start_ = 0; cur_end_ = 0; return false; } // Reset iterator to start of input. void Reset() { cur_start_ = cur_end_ = 0; FindNextSequenceFrom(input_); } private: // Find the next sequence from the input, |from| being the starting position // for the search, and must be in the [input_, input_end_] interval. On // success, returns true after setting cur_start_ and cur_end_, on failure, // return false. bool FindNextSequenceFrom(const char* from) { assert(from >= input_ && from <= input_end_); auto* seq = static_cast(::memchr(from, '\x1b', input_end_ - from)); if (!seq) return false; // The smallest possible color sequence if '\x1c[0m` and has four // characters. if (seq + 4 > input_end_) return false; if (seq[1] != '[') return FindNextSequenceFrom(seq + 1); // Skip parameters (digits + ; separator) auto is_parameter_char = [](char ch) -> bool { return (ch >= '0' && ch <= '9') || ch == ';'; }; const char* end = seq + 2; while (is_parameter_char(end[0])) { if (++end == input_end_) return false; // Incomplete sequence (no command). } if (*end++ != 'm') { // Not a color sequence. Restart the search after the first // character following the [, in case this was a 3-char ANSI // sequence (which is ignored here). return FindNextSequenceFrom(seq + 3); } // Found it! cur_start_ = seq - input_; cur_end_ = end - input_; return true; } size_t cur_start_ = 0; size_t cur_end_ = 0; const char* input_; const char* input_end_; }; // A class used to iterate over all characters of an input string, // and return its visible position in the terminal, and whether that // specific character is visible (or otherwise part of an ANSI color sequence). // // Example sequence and iterations, where 'ANSI' represents an ANSI Color // sequence, and | is used to express concatenation // // |abcd|ANSI|efgh|ANSI|ijk| input string // // 11 1111 111 // 0123 4567 8901 2345 678 input indices // // 1 // 0123 4444 4567 8888 890 visible positions // // TTTT FFFF TTTT FFFF TTT is_visible // // Usage is: // // VisibleInputCharsIterator iter(input); // while (iter.HasChar()) { // ... use iter.InputIndex() to get input index of current char. // ... use iter.VisiblePosition() to get its visible position. // ... use iter.IsVisible() to check whether the current char is visible. // // NextChar(); // } // struct VisibleInputCharsIterator { VisibleInputCharsIterator(const std::string& input) : input_size_(input.size()), ansi_iter_(input) {} // Return true if there is a character in the sequence. bool HasChar() const { return input_index_ < input_size_; } // Return current input index. size_t InputIndex() const { return input_index_; } // Return current visible position. size_t VisiblePosition() const { return visible_pos_; } // Return true if the current input character is visible // (i.e. not part of an ANSI color sequence). bool IsVisible() const { return !ansi_iter_.SequenceContains(input_index_); } // Find next character from the input. void NextChar() { visible_pos_ += IsVisible(); if (++input_index_ == ansi_iter_.SequenceEnd()) { ansi_iter_.NextSequence(); } } private: size_t input_size_; size_t input_index_ = 0; size_t visible_pos_ = 0; AnsiColorSequenceIterator ansi_iter_; }; void ElideMiddleInPlace(std::string& str, size_t max_width) { if (str.size() <= max_width) { return; } // Look for an ESC character. If there is none, use a fast path // that avoids any intermediate allocations. if (str.find('\x1b') == std::string::npos) { const int ellipsis_width = 3; // Space for "...". // If max width is too small, do not keep anything from the input. if (max_width <= ellipsis_width) { str.assign("...", max_width); return; } // Keep only |max_width - ellipsis_size| visible characters from the input // which will be split into two spans separated by "...". const size_t remaining_size = max_width - ellipsis_width; const size_t left_span_size = remaining_size / 2; const size_t right_span_size = remaining_size - left_span_size; // Replace the gap in the input between the spans with "..." const size_t gap_start = left_span_size; const size_t gap_end = str.size() - right_span_size; str.replace(gap_start, gap_end - gap_start, "..."); return; } // Compute visible width. size_t visible_width = str.size(); for (AnsiColorSequenceIterator ansi(str); ansi.HasSequence(); ansi.NextSequence()) { visible_width -= ansi.SequenceSize(); } if (visible_width <= max_width) return; // Compute the widths of the ellipsis, left span and right span // visible space. const size_t ellipsis_width = max_width < 3 ? max_width : 3; const size_t visible_left_span_size = (max_width - ellipsis_width) / 2; const size_t visible_right_span_size = (max_width - ellipsis_width) - visible_left_span_size; // Compute the gap of visible characters that will be replaced by // the ellipsis in visible space. const size_t visible_gap_start = visible_left_span_size; const size_t visible_gap_end = visible_width - visible_right_span_size; std::string result; result.reserve(str.size()); // Parse the input chars info to: // // 1) Append any characters belonging to the left span (visible or not). // // 2) Add the ellipsis ("..." truncated to ellipsis_width). // Note that its color is inherited from the left span chars // which will never end with an ANSI sequence. // // 3) Append any ANSI sequence that appears inside the gap. This // ensures the characters after the ellipsis appear with // the right color, // // 4) Append any remaining characters (visible or not) to the result. // VisibleInputCharsIterator iter(str); // Step 1 - determine left span length in input chars. for (; iter.HasChar(); iter.NextChar()) { if (iter.VisiblePosition() == visible_gap_start) break; } result.append(str.begin(), str.begin() + iter.InputIndex()); // Step 2 - Append the possibly-truncated ellipsis. result.append("...", ellipsis_width); // Step 3 - Append elided ANSI sequences to the result. for (; iter.HasChar(); iter.NextChar()) { if (iter.VisiblePosition() == visible_gap_end) break; if (!iter.IsVisible()) result.push_back(str[iter.InputIndex()]); } // Step 4 - Append anything else. result.append(str.begin() + iter.InputIndex(), str.end()); str = std::move(result); } ninja-1.13.2/src/elide_middle.h000066400000000000000000000020701510764045400162420ustar00rootroot00000000000000// Copyright 2024 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef NINJA_ELIDE_MIDDLE_H_ #define NINJA_ELIDE_MIDDLE_H_ #include #include /// Elide the given string @a str with '...' in the middle if the length /// exceeds @a max_width. Note that this handles ANSI color sequences /// properly (non-color related sequences are ignored, but using them /// would wreak the cursor position or terminal state anyway). void ElideMiddleInPlace(std::string& str, size_t max_width); #endif // NINJA_ELIDE_MIDDLE_H_ ninja-1.13.2/src/elide_middle_perftest.cc000066400000000000000000000036561510764045400203270ustar00rootroot00000000000000// Copyright 2024 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include #include #include #include #include #include "elide_middle.h" #include "metrics.h" static const char* kTestInputs[] = { "01234567890123456789", "012345\x1B[0;35m67890123456789", "abcd\x1b[1;31mefg\x1b[0mhlkmnopqrstuvwxyz", }; int main() { std::vector times; int64_t kMaxTimeMillis = 5 * 1000; int64_t base_time = GetTimeMillis(); const int kRuns = 100; for (int j = 0; j < kRuns; ++j) { int64_t start = GetTimeMillis(); if (start >= base_time + kMaxTimeMillis) break; const int kNumRepetitions = 2000; for (int count = kNumRepetitions; count > 0; --count) { for (const char* input : kTestInputs) { size_t input_len = ::strlen(input); for (size_t max_width = input_len; max_width > 0; --max_width) { std::string str(input, input_len); ElideMiddleInPlace(str, max_width); } } } int delta = (int)(GetTimeMillis() - start); times.push_back(delta); } int min = times[0]; int max = times[0]; float total = 0; for (size_t i = 0; i < times.size(); ++i) { total += times[i]; if (times[i] < min) min = times[i]; else if (times[i] > max) max = times[i]; } printf("min %dms max %dms avg %.1fms\n", min, max, total / times.size()); return 0; } ninja-1.13.2/src/elide_middle_test.cc000066400000000000000000000075331510764045400174500ustar00rootroot00000000000000// Copyright 2024 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "elide_middle.h" #include "test.h" namespace { std::string ElideMiddle(const std::string& str, size_t width) { std::string result = str; ElideMiddleInPlace(result, width); return result; } } // namespace TEST(ElideMiddle, NothingToElide) { std::string input = "Nothing to elide in this short string."; EXPECT_EQ(input, ElideMiddle(input, 80)); EXPECT_EQ(input, ElideMiddle(input, 38)); EXPECT_EQ("", ElideMiddle(input, 0)); EXPECT_EQ(".", ElideMiddle(input, 1)); EXPECT_EQ("..", ElideMiddle(input, 2)); EXPECT_EQ("...", ElideMiddle(input, 3)); } TEST(ElideMiddle, ElideInTheMiddle) { std::string input = "01234567890123456789"; EXPECT_EQ("...9", ElideMiddle(input, 4)); EXPECT_EQ("0...9", ElideMiddle(input, 5)); EXPECT_EQ("012...789", ElideMiddle(input, 9)); EXPECT_EQ("012...6789", ElideMiddle(input, 10)); EXPECT_EQ("0123...6789", ElideMiddle(input, 11)); EXPECT_EQ("01234567...23456789", ElideMiddle(input, 19)); EXPECT_EQ("01234567890123456789", ElideMiddle(input, 20)); } // A few ANSI escape sequences. These macros make the following // test easier to read and understand. #define MAGENTA "\x1B[0;35m" #define NOTHING "\33[m" #define RED "\x1b[1;31m" #define RESET "\x1b[0m" TEST(ElideMiddle, ElideAnsiEscapeCodes) { std::string input = "012345" MAGENTA "67890123456789"; EXPECT_EQ("012..." MAGENTA "6789", ElideMiddle(input, 10)); EXPECT_EQ("012345" MAGENTA "67...23456789", ElideMiddle(input, 19)); EXPECT_EQ("Nothing " NOTHING " string.", ElideMiddle("Nothing " NOTHING " string.", 18)); EXPECT_EQ("0" NOTHING "12...6789", ElideMiddle("0" NOTHING "1234567890123456789", 10)); input = "abcd" RED "efg" RESET "hlkmnopqrstuvwxyz"; EXPECT_EQ("" RED RESET, ElideMiddle(input, 0)); EXPECT_EQ("." RED RESET, ElideMiddle(input, 1)); EXPECT_EQ(".." RED RESET, ElideMiddle(input, 2)); EXPECT_EQ("..." RED RESET, ElideMiddle(input, 3)); EXPECT_EQ("..." RED RESET "z", ElideMiddle(input, 4)); EXPECT_EQ("a..." RED RESET "z", ElideMiddle(input, 5)); EXPECT_EQ("a..." RED RESET "yz", ElideMiddle(input, 6)); EXPECT_EQ("ab..." RED RESET "yz", ElideMiddle(input, 7)); EXPECT_EQ("ab..." RED RESET "xyz", ElideMiddle(input, 8)); EXPECT_EQ("abc..." RED RESET "xyz", ElideMiddle(input, 9)); EXPECT_EQ("abc..." RED RESET "wxyz", ElideMiddle(input, 10)); EXPECT_EQ("abcd..." RED RESET "wxyz", ElideMiddle(input, 11)); EXPECT_EQ("abcd..." RED RESET "vwxyz", ElideMiddle(input, 12)); EXPECT_EQ("abcd" RED "ef..." RESET "uvwxyz", ElideMiddle(input, 15)); EXPECT_EQ("abcd" RED "ef..." RESET "tuvwxyz", ElideMiddle(input, 16)); EXPECT_EQ("abcd" RED "efg..." RESET "tuvwxyz", ElideMiddle(input, 17)); EXPECT_EQ("abcd" RED "efg..." RESET "stuvwxyz", ElideMiddle(input, 18)); EXPECT_EQ("abcd" RED "efg" RESET "h...stuvwxyz", ElideMiddle(input, 19)); input = "abcdef" RED "A" RESET "BC"; EXPECT_EQ("..." RED RESET "C", ElideMiddle(input, 4)); EXPECT_EQ("a..." RED RESET "C", ElideMiddle(input, 5)); EXPECT_EQ("a..." RED RESET "BC", ElideMiddle(input, 6)); EXPECT_EQ("ab..." RED RESET "BC", ElideMiddle(input, 7)); EXPECT_EQ("ab..." RED "A" RESET "BC", ElideMiddle(input, 8)); EXPECT_EQ("abcdef" RED "A" RESET "BC", ElideMiddle(input, 9)); } #undef RESET #undef RED #undef NOTHING #undef MAGENTA ninja-1.13.2/src/eval_env.cc000066400000000000000000000114301510764045400155770ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include #include "eval_env.h" using namespace std; string BindingEnv::LookupVariable(const string& var) { map::iterator i = bindings_.find(var); if (i != bindings_.end()) return i->second; if (parent_) return parent_->LookupVariable(var); return ""; } void BindingEnv::AddBinding(const string& key, const string& val) { bindings_[key] = val; } void BindingEnv::AddRule(std::unique_ptr rule) { assert(LookupRuleCurrentScope(rule->name()) == NULL); rules_[rule->name()] = std::move(rule); } const Rule* BindingEnv::LookupRuleCurrentScope(const string& rule_name) { auto i = rules_.find(rule_name); if (i == rules_.end()) return NULL; return i->second.get(); } const Rule* BindingEnv::LookupRule(const string& rule_name) { auto i = rules_.find(rule_name); if (i != rules_.end()) return i->second.get(); if (parent_) return parent_->LookupRule(rule_name); return NULL; } void Rule::AddBinding(const string& key, const EvalString& val) { bindings_[key] = val; } const EvalString* Rule::GetBinding(const string& key) const { Bindings::const_iterator i = bindings_.find(key); if (i == bindings_.end()) return NULL; return &i->second; } std::unique_ptr Rule::Phony() { auto rule = std::unique_ptr(new Rule("phony")); rule->phony_ = true; return rule; } bool Rule::IsPhony() const { return phony_; } // static bool Rule::IsReservedBinding(const string& var) { return var == "command" || var == "depfile" || var == "dyndep" || var == "description" || var == "deps" || var == "generator" || var == "pool" || var == "restat" || var == "rspfile" || var == "rspfile_content" || var == "msvc_deps_prefix"; } const map>& BindingEnv::GetRules() const { return rules_; } string BindingEnv::LookupWithFallback(const string& var, const EvalString* eval, Env* env) { map::iterator i = bindings_.find(var); if (i != bindings_.end()) return i->second; if (eval) return eval->Evaluate(env); if (parent_) return parent_->LookupVariable(var); return ""; } string EvalString::Evaluate(Env* env) const { if (parsed_.empty()) { return single_token_; } string result; for (TokenList::const_iterator i = parsed_.begin(); i != parsed_.end(); ++i) { if (i->second == RAW) result.append(i->first); else result.append(env->LookupVariable(i->first)); } return result; } void EvalString::AddText(StringPiece text) { if (parsed_.empty()) { single_token_.append(text.begin(), text.end()); } else if (!parsed_.empty() && parsed_.back().second == RAW) { parsed_.back().first.append(text.begin(), text.end()); } else { parsed_.push_back(std::make_pair(text.AsString(), RAW)); } } void EvalString::AddSpecial(StringPiece text) { if (parsed_.empty() && !single_token_.empty()) { // Going from one to two tokens, so we can no longer apply // our single_token_ optimization and need to push everything // onto the vector. parsed_.push_back(std::make_pair(std::move(single_token_), RAW)); } parsed_.push_back(std::make_pair(text.AsString(), SPECIAL)); } string EvalString::Serialize() const { string result; if (parsed_.empty() && !single_token_.empty()) { result.append("["); result.append(single_token_); result.append("]"); } else { for (const auto& pair : parsed_) { result.append("["); if (pair.second == SPECIAL) result.append("$"); result.append(pair.first.begin(), pair.first.end()); result.append("]"); } } return result; } string EvalString::Unparse() const { string result; if (parsed_.empty() && !single_token_.empty()) { result.append(single_token_.begin(), single_token_.end()); } else { for (TokenList::const_iterator i = parsed_.begin(); i != parsed_.end(); ++i) { bool special = (i->second == SPECIAL); if (special) result.append("${"); result.append(i->first.begin(), i->first.end()); if (special) result.append("}"); } } return result; } ninja-1.13.2/src/eval_env.h000066400000000000000000000076311510764045400154510ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef NINJA_EVAL_ENV_H_ #define NINJA_EVAL_ENV_H_ #include #include #include #include #include "string_piece.h" struct Rule; /// An interface for a scope for variable (e.g. "$foo") lookups. struct Env { virtual ~Env() {} virtual std::string LookupVariable(const std::string& var) = 0; }; /// A tokenized string that contains variable references. /// Can be evaluated relative to an Env. struct EvalString { /// @return The evaluated string with variable expanded using value found in /// environment @a env. std::string Evaluate(Env* env) const; /// @return The string with variables not expanded. std::string Unparse() const; void Clear() { parsed_.clear(); single_token_.clear(); } bool empty() const { return parsed_.empty() && single_token_.empty(); } void AddText(StringPiece text); void AddSpecial(StringPiece text); /// Construct a human-readable representation of the parsed state, /// for use in tests. std::string Serialize() const; private: enum TokenType { RAW, SPECIAL }; typedef std::vector > TokenList; TokenList parsed_; // If we hold only a single RAW token, then we keep it here instead of // pushing it on TokenList. This saves a bunch of allocations for // what is a common case. If parsed_ is nonempty, then this value // must be ignored. std::string single_token_; }; /// An invocable build command and associated metadata (description, etc.). struct Rule { explicit Rule(const std::string& name) : name_(name) {} static std::unique_ptr Phony(); bool IsPhony() const; const std::string& name() const { return name_; } void AddBinding(const std::string& key, const EvalString& val); static bool IsReservedBinding(const std::string& var); const EvalString* GetBinding(const std::string& key) const; private: // Allow the parsers to reach into this object and fill out its fields. friend struct ManifestParser; std::string name_; typedef std::map Bindings; Bindings bindings_; bool phony_ = false; }; /// An Env which contains a mapping of variables to values /// as well as a pointer to a parent scope. struct BindingEnv : public Env { BindingEnv() : parent_(NULL) {} explicit BindingEnv(BindingEnv* parent) : parent_(parent) {} virtual ~BindingEnv() {} virtual std::string LookupVariable(const std::string& var); void AddRule(std::unique_ptr rule); const Rule* LookupRule(const std::string& rule_name); const Rule* LookupRuleCurrentScope(const std::string& rule_name); const std::map>& GetRules() const; void AddBinding(const std::string& key, const std::string& val); /// This is tricky. Edges want lookup scope to go in this order: /// 1) value set on edge itself (edge_->env_) /// 2) value set on rule, with expansion in the edge's scope /// 3) value set on enclosing scope of edge (edge_->env_->parent_) /// This function takes as parameters the necessary info to do (2). std::string LookupWithFallback(const std::string& var, const EvalString* eval, Env* env); private: std::map bindings_; std::map> rules_; BindingEnv* parent_; }; #endif // NINJA_EVAL_ENV_H_ ninja-1.13.2/src/exit_status.h000066400000000000000000000020011510764045400162100ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef NINJA_EXIT_STATUS_H_ #define NINJA_EXIT_STATUS_H_ // The underlying type of the ExitStatus enum, used to represent a platform-specific // process exit code. #ifdef _WIN32 #define EXIT_STATUS_TYPE unsigned long #else // !_WIN32 #define EXIT_STATUS_TYPE int #endif // !_WIN32 enum ExitStatus : EXIT_STATUS_TYPE { ExitSuccess=0, ExitFailure, ExitInterrupted=130, }; #endif // NINJA_EXIT_STATUS_H_ ninja-1.13.2/src/explanations.h000066400000000000000000000051211510764045400163470ustar00rootroot00000000000000// Copyright 2024 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include #include #include #include #include /// A class used to record a list of explanation strings associated /// with a given 'item' pointer. This is used to implement the /// `-d explain` feature. struct Explanations { public: /// Record an explanation for |item| if this instance is enabled. void Record(const void* item, const char* fmt, ...) { va_list args; va_start(args, fmt); RecordArgs(item, fmt, args); va_end(args); } /// Same as Record(), but uses a va_list to pass formatting arguments. void RecordArgs(const void* item, const char* fmt, va_list args) { char buffer[1024]; vsnprintf(buffer, sizeof(buffer), fmt, args); map_[item].emplace_back(buffer); } /// Lookup the explanations recorded for |item|, and append them /// to |*out|, if any. void LookupAndAppend(const void* item, std::vector* out) { auto it = map_.find(item); if (it == map_.end()) return; for (const auto& explanation : it->second) out->push_back(explanation); } private: std::unordered_map> map_; }; /// Convenience wrapper for an Explanations pointer, which can be null /// if no explanations need to be recorded. struct OptionalExplanations { OptionalExplanations(Explanations* explanations) : explanations_(explanations) {} void Record(const void* item, const char* fmt, ...) { if (explanations_) { va_list args; va_start(args, fmt); explanations_->RecordArgs(item, fmt, args); va_end(args); } } void RecordArgs(const void* item, const char* fmt, va_list args) { if (explanations_) explanations_->RecordArgs(item, fmt, args); } void LookupAndAppend(const void* item, std::vector* out) { if (explanations_) explanations_->LookupAndAppend(item, out); } Explanations* ptr() const { return explanations_; } private: Explanations* explanations_; }; ninja-1.13.2/src/explanations_test.cc000066400000000000000000000055501510764045400175520ustar00rootroot00000000000000// Copyright 2024 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "explanations.h" #include "test.h" namespace { const void* MakeItem(size_t v) { return reinterpret_cast(v); } } // namespace TEST(Explanations, Explanations) { Explanations exp; exp.Record(MakeItem(1), "first explanation"); exp.Record(MakeItem(1), "second explanation"); exp.Record(MakeItem(2), "third explanation"); exp.Record(MakeItem(2), "fourth %s", "explanation"); std::vector list; exp.LookupAndAppend(MakeItem(0), &list); ASSERT_TRUE(list.empty()); exp.LookupAndAppend(MakeItem(1), &list); ASSERT_EQ(2u, list.size()); EXPECT_EQ(list[0], "first explanation"); EXPECT_EQ(list[1], "second explanation"); exp.LookupAndAppend(MakeItem(2), &list); ASSERT_EQ(4u, list.size()); EXPECT_EQ(list[0], "first explanation"); EXPECT_EQ(list[1], "second explanation"); EXPECT_EQ(list[2], "third explanation"); EXPECT_EQ(list[3], "fourth explanation"); } TEST(Explanations, OptionalExplanationsNonNull) { Explanations parent; OptionalExplanations exp(&parent); exp.Record(MakeItem(1), "first explanation"); exp.Record(MakeItem(1), "second explanation"); exp.Record(MakeItem(2), "third explanation"); exp.Record(MakeItem(2), "fourth %s", "explanation"); std::vector list; exp.LookupAndAppend(MakeItem(0), &list); ASSERT_TRUE(list.empty()); exp.LookupAndAppend(MakeItem(1), &list); ASSERT_EQ(2u, list.size()); EXPECT_EQ(list[0], "first explanation"); EXPECT_EQ(list[1], "second explanation"); exp.LookupAndAppend(MakeItem(2), &list); ASSERT_EQ(4u, list.size()); EXPECT_EQ(list[0], "first explanation"); EXPECT_EQ(list[1], "second explanation"); EXPECT_EQ(list[2], "third explanation"); EXPECT_EQ(list[3], "fourth explanation"); } TEST(Explanations, OptionalExplanationsWithNullPointer) { OptionalExplanations exp(nullptr); exp.Record(MakeItem(1), "first explanation"); exp.Record(MakeItem(1), "second explanation"); exp.Record(MakeItem(2), "third explanation"); exp.Record(MakeItem(2), "fourth %s", "explanation"); std::vector list; exp.LookupAndAppend(MakeItem(0), &list); ASSERT_TRUE(list.empty()); exp.LookupAndAppend(MakeItem(1), &list); ASSERT_TRUE(list.empty()); exp.LookupAndAppend(MakeItem(2), &list); ASSERT_TRUE(list.empty()); } ninja-1.13.2/src/gen_doxygen_mainpage.sh000077500000000000000000000027341510764045400202060ustar00rootroot00000000000000#!/bin/sh # Copyright 2011 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -o errexit set -o nounset STATUS=0 # Print each of its arguments on stderr (one per line) prefixed by the # basename of this script. stderr() { local me=$(basename "$0") local i for i do echo >&2 "$me: $i" done } # Print each of its arguments on stderr (one per line) prefixed by the # basename of this script and 'error'. error() { local i for i do stderr "error: $i" done STATUS=1 } generate_header() { cat <&2 "usage: $0 inputs..." exit 1 fi generate_header for i in "$@" do include_file "$i" done generate_footer exit $STATUS ninja-1.13.2/src/getopt.c000066400000000000000000000302751510764045400151470ustar00rootroot00000000000000/**************************************************************************** getopt.c - Read command line options AUTHOR: Gregory Pietsch CREATED Fri Jan 10 21:13:05 1997 DESCRIPTION: The getopt() function parses the command line arguments. Its arguments argc and argv are the argument count and array as passed to the main() function on program invocation. The argument optstring is a list of available option characters. If such a character is followed by a colon (`:'), the option takes an argument, which is placed in optarg. If such a character is followed by two colons, the option takes an optional argument, which is placed in optarg. If the option does not take an argument, optarg is NULL. The external variable optind is the index of the next array element of argv to be processed; it communicates from one call to the next which element to process. The getopt_long() function works like getopt() except that it also accepts long options started by two dashes `--'. If these take values, it is either in the form --arg=value or --arg value It takes the additional arguments longopts which is a pointer to the first element of an array of type GETOPT_LONG_OPTION_T. The last element of the array has to be filled with NULL for the name field. The longind pointer points to the index of the current long option relative to longopts if it is non-NULL. The getopt() function returns the option character if the option was found successfully, `:' if there was a missing parameter for one of the options, `?' for an unknown option character, and EOF for the end of the option list. The getopt_long() function's return value is described in the header file. The function getopt_long_only() is identical to getopt_long(), except that a plus sign `+' can introduce long options as well as `--'. The following describes how to deal with options that follow non-option argv-elements. If the caller did not specify anything, the default is REQUIRE_ORDER if the environment variable POSIXLY_CORRECT is defined, PERMUTE otherwise. REQUIRE_ORDER means don't recognize them as options; stop option processing when the first non-option is seen. This is what Unix does. This mode of operation is selected by either setting the environment variable POSIXLY_CORRECT, or using `+' as the first character of the optstring parameter. PERMUTE is the default. We permute the contents of ARGV as we scan, so that eventually all the non-options are at the end. This allows options to be given in any order, even with programs that were not written to expect this. RETURN_IN_ORDER is an option available to programs that were written to expect options and other argv-elements in any order and that care about the ordering of the two. We describe each non-option argv-element as if it were the argument of an option with character code 1. Using `-' as the first character of the optstring parameter selects this mode of operation. The special argument `--' forces an end of option-scanning regardless of the value of ordering. In the case of RETURN_IN_ORDER, only `--' can cause getopt() and friends to return EOF with optind != argc. COPYRIGHT NOTICE AND DISCLAIMER: Copyright (C) 1997 Gregory Pietsch This file and the accompanying getopt.h header file are hereby placed in the public domain without restrictions. Just give the author credit, don't claim you wrote it or prevent anyone else from using it. Gregory Pietsch's current e-mail address: gpietsch@comcast.net ****************************************************************************/ /* include files */ #include #include #include #ifndef GETOPT_H #include "getopt.h" #endif /* macros */ /* types */ typedef enum GETOPT_ORDERING_T { PERMUTE, RETURN_IN_ORDER, REQUIRE_ORDER } GETOPT_ORDERING_T; /* globally-defined variables */ char *optarg = NULL; int optind = 0; int opterr = 1; int optopt = '?'; /* functions */ /* reverse_argv_elements: reverses num elements starting at argv */ static void reverse_argv_elements (char **argv, int num) { int i; char *tmp; for (i = 0; i < (num >> 1); i++) { tmp = argv[i]; argv[i] = argv[num - i - 1]; argv[num - i - 1] = tmp; } } /* permute: swap two blocks of argv-elements given their lengths */ static void permute (char **argv, int len1, int len2) { reverse_argv_elements (argv, len1); reverse_argv_elements (argv, len1 + len2); reverse_argv_elements (argv, len2); } /* is_option: is this argv-element an option or the end of the option list? */ static int is_option (char *argv_element, int only) { return ((argv_element == NULL) || (argv_element[0] == '-') || (only && argv_element[0] == '+')); } /* getopt_internal: the function that does all the dirty work */ static int getopt_internal (int argc, char **argv, char *shortopts, GETOPT_LONG_OPTION_T * longopts, int *longind, int only) { GETOPT_ORDERING_T ordering = PERMUTE; static size_t optwhere = 0; size_t permute_from = 0; int num_nonopts = 0; int optindex = 0; size_t match_chars = 0; char *possible_arg = NULL; int longopt_match = -1; int has_arg = -1; char *cp = NULL; int arg_next = 0; /* first, deal with silly parameters and easy stuff */ if (argc == 0 || argv == NULL || (shortopts == NULL && longopts == NULL)) return (optopt = '?'); if (optind >= argc || argv[optind] == NULL) return EOF; if (strcmp (argv[optind], "--") == 0) { optind++; return EOF; } /* if this is our first time through */ if (optind == 0) optind = optwhere = 1; /* define ordering */ if (shortopts != NULL && (*shortopts == '-' || *shortopts == '+')) { ordering = (*shortopts == '-') ? RETURN_IN_ORDER : REQUIRE_ORDER; shortopts++; } else ordering = (getenv ("POSIXLY_CORRECT") != NULL) ? REQUIRE_ORDER : PERMUTE; /* * based on ordering, find our next option, if we're at the beginning of * one */ if (optwhere == 1) { switch (ordering) { case PERMUTE: permute_from = optind; num_nonopts = 0; while (!is_option (argv[optind], only)) { optind++; num_nonopts++; } if (argv[optind] == NULL) { /* no more options */ optind = permute_from; return EOF; } else if (strcmp (argv[optind], "--") == 0) { /* no more options, but have to get `--' out of the way */ permute (argv + permute_from, num_nonopts, 1); optind = permute_from + 1; return EOF; } break; case RETURN_IN_ORDER: if (!is_option (argv[optind], only)) { optarg = argv[optind++]; return (optopt = 1); } break; case REQUIRE_ORDER: if (!is_option (argv[optind], only)) return EOF; break; } } /* we've got an option, so parse it */ /* first, is it a long option? */ if (longopts != NULL && (memcmp (argv[optind], "--", 2) == 0 || (only && argv[optind][0] == '+')) && optwhere == 1) { /* handle long options */ if (memcmp (argv[optind], "--", 2) == 0) optwhere = 2; longopt_match = -1; possible_arg = strchr (argv[optind] + optwhere, '='); if (possible_arg == NULL) { /* no =, so next argv might be arg */ match_chars = strlen (argv[optind]); possible_arg = argv[optind] + match_chars; match_chars = match_chars - optwhere; } else match_chars = (possible_arg - argv[optind]) - optwhere; for (optindex = 0; longopts[optindex].name != NULL; optindex++) { if (memcmp (argv[optind] + optwhere, longopts[optindex].name, match_chars) == 0) { /* do we have an exact match? */ if (match_chars == strlen (longopts[optindex].name)) { longopt_match = optindex; break; } /* do any characters match? */ else { if (longopt_match < 0) longopt_match = optindex; else { /* we have ambiguous options */ if (opterr) fprintf (stderr, "%s: option `%s' is ambiguous " "(could be `--%s' or `--%s')\n", argv[0], argv[optind], longopts[longopt_match].name, longopts[optindex].name); return (optopt = '?'); } } } } if (longopt_match >= 0) has_arg = longopts[longopt_match].has_arg; } /* if we didn't find a long option, is it a short option? */ if (longopt_match < 0 && shortopts != NULL) { cp = strchr (shortopts, argv[optind][optwhere]); if (cp == NULL) { /* couldn't find option in shortopts */ if (opterr) fprintf (stderr, "%s: invalid option -- `-%c'\n", argv[0], argv[optind][optwhere]); optwhere++; if (argv[optind][optwhere] == '\0') { optind++; optwhere = 1; } return (optopt = '?'); } has_arg = ((cp[1] == ':') ? ((cp[2] == ':') ? OPTIONAL_ARG : required_argument) : no_argument); possible_arg = argv[optind] + optwhere + 1; optopt = *cp; } /* get argument and reset optwhere */ arg_next = 0; switch (has_arg) { case OPTIONAL_ARG: if (*possible_arg == '=') possible_arg++; if (*possible_arg != '\0') { optarg = possible_arg; optwhere = 1; } else optarg = NULL; break; case required_argument: if (*possible_arg == '=') possible_arg++; if (*possible_arg != '\0') { optarg = possible_arg; optwhere = 1; } else if (optind + 1 >= argc) { if (opterr) { fprintf (stderr, "%s: argument required for option `", argv[0]); if (longopt_match >= 0) fprintf (stderr, "--%s'\n", longopts[longopt_match].name); else fprintf (stderr, "-%c'\n", *cp); } optind++; return (optopt = ':'); } else { optarg = argv[optind + 1]; arg_next = 1; optwhere = 1; } break; case no_argument: if (longopt_match < 0) { optwhere++; if (argv[optind][optwhere] == '\0') optwhere = 1; } else optwhere = 1; optarg = NULL; break; } /* do we have to permute or otherwise modify optind? */ if (ordering == PERMUTE && optwhere == 1 && num_nonopts != 0) { permute (argv + permute_from, num_nonopts, 1 + arg_next); optind = permute_from + 1 + arg_next; } else if (optwhere == 1) optind = optind + 1 + arg_next; /* finally return */ if (longopt_match >= 0) { if (longind != NULL) *longind = longopt_match; if (longopts[longopt_match].flag != NULL) { *(longopts[longopt_match].flag) = longopts[longopt_match].val; return 0; } else return longopts[longopt_match].val; } else return optopt; } #ifndef _AIX int getopt (int argc, char **argv, char *optstring) { return getopt_internal (argc, argv, optstring, NULL, NULL, 0); } #endif int getopt_long (int argc, char **argv, const char *shortopts, const GETOPT_LONG_OPTION_T * longopts, int *longind) { return getopt_internal (argc, argv, (char*)shortopts, (GETOPT_LONG_OPTION_T*)longopts, longind, 0); } int getopt_long_only (int argc, char **argv, const char *shortopts, const GETOPT_LONG_OPTION_T * longopts, int *longind) { return getopt_internal (argc, argv, (char*)shortopts, (GETOPT_LONG_OPTION_T*)longopts, longind, 1); } /* end of file GETOPT.C */ ninja-1.13.2/src/getopt.h000066400000000000000000000032571510764045400151540ustar00rootroot00000000000000#ifndef GETOPT_H #define GETOPT_H /* include files needed by this include file */ /* macros defined by this include file */ #define no_argument 0 #define required_argument 1 #define OPTIONAL_ARG 2 /* types defined by this include file */ /* GETOPT_LONG_OPTION_T: The type of long option */ typedef struct GETOPT_LONG_OPTION_T { const char *name; /* the name of the long option */ int has_arg; /* one of the above macros */ int *flag; /* determines if getopt_long() returns a * value for a long option; if it is * non-NULL, 0 is returned as a function * value and the value of val is stored in * the area pointed to by flag. Otherwise, * val is returned. */ int val; /* determines the value to return if flag is * NULL. */ } GETOPT_LONG_OPTION_T; typedef GETOPT_LONG_OPTION_T option; #ifdef __cplusplus extern "C" { #endif /* externally-defined variables */ extern char *optarg; extern int optind; extern int opterr; extern int optopt; /* function prototypes */ #ifndef _AIX int getopt (int argc, char **argv, char *optstring); #endif int getopt_long (int argc, char **argv, const char *shortopts, const GETOPT_LONG_OPTION_T * longopts, int *longind); int getopt_long_only (int argc, char **argv, const char *shortopts, const GETOPT_LONG_OPTION_T * longopts, int *longind); #ifdef __cplusplus }; #endif #endif /* GETOPT_H */ /* END OF FILE getopt.h */ ninja-1.13.2/src/graph.cc000066400000000000000000000643701510764045400151140ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "graph.h" #include #include #include #include #include "build_log.h" #include "debug_flags.h" #include "depfile_parser.h" #include "deps_log.h" #include "disk_interface.h" #include "manifest_parser.h" #include "metrics.h" #include "state.h" #include "util.h" using namespace std; bool Node::Stat(DiskInterface* disk_interface, string* err) { mtime_ = disk_interface->Stat(path_, err); if (mtime_ == -1) { return false; } exists_ = (mtime_ != 0) ? ExistenceStatusExists : ExistenceStatusMissing; return true; } void Node::UpdatePhonyMtime(TimeStamp mtime) { if (!exists()) { mtime_ = std::max(mtime_, mtime); } } bool DependencyScan::RecomputeDirty(Node* initial_node, std::vector* validation_nodes, string* err) { std::vector stack; std::vector new_validation_nodes; std::deque nodes(1, initial_node); // RecomputeNodeDirty might return new validation nodes that need to be // checked for dirty state, keep a queue of nodes to visit. while (!nodes.empty()) { Node* node = nodes.front(); nodes.pop_front(); stack.clear(); new_validation_nodes.clear(); if (!RecomputeNodeDirty(node, &stack, &new_validation_nodes, err)) return false; nodes.insert(nodes.end(), new_validation_nodes.begin(), new_validation_nodes.end()); if (!new_validation_nodes.empty()) { assert(validation_nodes && "validations require RecomputeDirty to be called with validation_nodes"); validation_nodes->insert(validation_nodes->end(), new_validation_nodes.begin(), new_validation_nodes.end()); } } return true; } bool DependencyScan::RecomputeNodeDirty(Node* node, std::vector* stack, std::vector* validation_nodes, string* err) { Edge* edge = node->in_edge(); if (!edge) { // If we already visited this leaf node then we are done. if (node->status_known()) return true; // This node has no in-edge; it is dirty if it is missing. if (!node->StatIfNecessary(disk_interface_, err)) return false; if (!node->exists()) explanations_.Record(node, "%s has no in-edge and is missing", node->path().c_str()); node->set_dirty(!node->exists()); return true; } // If we already finished this edge then we are done. if (edge->mark_ == Edge::VisitDone) return true; // If we encountered this edge earlier in the call stack we have a cycle. if (!VerifyDAG(node, stack, err)) return false; // Mark the edge temporarily while in the call stack. edge->mark_ = Edge::VisitInStack; stack->push_back(node); bool dirty = false; edge->outputs_ready_ = true; edge->deps_missing_ = false; if (!edge->deps_loaded_) { // This is our first encounter with this edge. // If there is a pending dyndep file, visit it now: // * If the dyndep file is ready then load it now to get any // additional inputs and outputs for this and other edges. // Once the dyndep file is loaded it will no longer be pending // if any other edges encounter it, but they will already have // been updated. // * If the dyndep file is not ready then since is known to be an // input to this edge, the edge will not be considered ready below. // Later during the build the dyndep file will become ready and be // loaded to update this edge before it can possibly be scheduled. if (edge->dyndep_ && edge->dyndep_->dyndep_pending()) { if (!RecomputeNodeDirty(edge->dyndep_, stack, validation_nodes, err)) return false; if (!edge->dyndep_->in_edge() || edge->dyndep_->in_edge()->outputs_ready()) { // The dyndep file is ready, so load it now. if (!LoadDyndeps(edge->dyndep_, err)) return false; } } } // Load output mtimes so we can compare them to the most recent input below. for (vector::iterator o = edge->outputs_.begin(); o != edge->outputs_.end(); ++o) { if (!(*o)->StatIfNecessary(disk_interface_, err)) return false; } if (!edge->deps_loaded_) { // This is our first encounter with this edge. Load discovered deps. edge->deps_loaded_ = true; if (!dep_loader_.LoadDeps(edge, err)) { if (!err->empty()) return false; // Failed to load dependency info: rebuild to regenerate it. // LoadDeps() did explanations_->Record() already, no need to do it here. dirty = edge->deps_missing_ = true; } } // Store any validation nodes from the edge for adding to the initial // nodes. Don't recurse into them, that would trigger the dependency // cycle detector if the validation node depends on this node. // RecomputeDirty will add the validation nodes to the initial nodes // and recurse into them. validation_nodes->insert(validation_nodes->end(), edge->validations_.begin(), edge->validations_.end()); // Visit all inputs; we're dirty if any of the inputs are dirty. Node* most_recent_input = NULL; for (vector::iterator i = edge->inputs_.begin(); i != edge->inputs_.end(); ++i) { // Visit this input. if (!RecomputeNodeDirty(*i, stack, validation_nodes, err)) return false; // If an input is not ready, neither are our outputs. if (Edge* in_edge = (*i)->in_edge()) { if (!in_edge->outputs_ready_) edge->outputs_ready_ = false; } if (!edge->is_order_only(i - edge->inputs_.begin())) { // If a regular input is dirty (or missing), we're dirty. // Otherwise consider mtime. if ((*i)->dirty()) { explanations_.Record(node, "%s is dirty", (*i)->path().c_str()); dirty = true; } else { if (!most_recent_input || (*i)->mtime() > most_recent_input->mtime()) { most_recent_input = *i; } } } } // We may also be dirty due to output state: missing outputs, out of // date outputs, etc. Visit all outputs and determine whether they're dirty. if (!dirty) if (!RecomputeOutputsDirty(edge, most_recent_input, &dirty, err)) return false; // Finally, visit each output and update their dirty state if necessary. for (vector::iterator o = edge->outputs_.begin(); o != edge->outputs_.end(); ++o) { if (dirty) (*o)->MarkDirty(); } // If an edge is dirty, its outputs are normally not ready. (It's // possible to be clean but still not be ready in the presence of // order-only inputs.) // But phony edges with no inputs have nothing to do, so are always // ready. if (dirty && !(edge->is_phony() && edge->inputs_.empty())) edge->outputs_ready_ = false; // Mark the edge as finished during this walk now that it will no longer // be in the call stack. edge->mark_ = Edge::VisitDone; assert(stack->back() == node); stack->pop_back(); return true; } bool DependencyScan::VerifyDAG(Node* node, vector* stack, string* err) { Edge* edge = node->in_edge(); assert(edge != NULL); // If we have no temporary mark on the edge then we do not yet have a cycle. if (edge->mark_ != Edge::VisitInStack) return true; // We have this edge earlier in the call stack. Find it. vector::iterator start = stack->begin(); while (start != stack->end() && (*start)->in_edge() != edge) ++start; assert(start != stack->end()); // Make the cycle clear by reporting its start as the node at its end // instead of some other output of the starting edge. For example, // running 'ninja b' on // build a b: cat c // build c: cat a // should report a -> c -> a instead of b -> c -> a. *start = node; // Construct the error message rejecting the cycle. *err = "dependency cycle: "; for (vector::const_iterator i = start; i != stack->end(); ++i) { err->append((*i)->path()); err->append(" -> "); } err->append((*start)->path()); if ((start + 1) == stack->end() && edge->maybe_phonycycle_diagnostic()) { // The manifest parser would have filtered out the self-referencing // input if it were not configured to allow the error. err->append(" [-w phonycycle=err]"); } return false; } bool DependencyScan::RecomputeOutputsDirty(Edge* edge, Node* most_recent_input, bool* outputs_dirty, string* err) { string command = edge->EvaluateCommand(/*incl_rsp_file=*/true); for (vector::iterator o = edge->outputs_.begin(); o != edge->outputs_.end(); ++o) { if (RecomputeOutputDirty(edge, most_recent_input, command, *o)) { *outputs_dirty = true; return true; } } return true; } bool DependencyScan::RecomputeOutputDirty(const Edge* edge, const Node* most_recent_input, const string& command, Node* output) { if (edge->is_phony()) { // Phony edges don't write any output. Outputs are only dirty if // there are no inputs and we're missing the output. if (edge->inputs_.empty() && !output->exists()) { explanations_.Record( output, "output %s of phony edge with no inputs doesn't exist", output->path().c_str()); return true; } // Update the mtime with the newest input. Dependents can thus call mtime() // on the fake node and get the latest mtime of the dependencies if (most_recent_input) { output->UpdatePhonyMtime(most_recent_input->mtime()); } // Phony edges are clean, nothing to do return false; } // Dirty if we're missing the output. if (!output->exists()) { explanations_.Record(output, "output %s doesn't exist", output->path().c_str()); return true; } BuildLog::LogEntry* entry = 0; // If this is a restat rule, we may have cleaned the output in a // previous run and stored the command start time in the build log. // We don't want to consider a restat rule's outputs as dirty unless // an input changed since the last run, so we'll skip checking the // output file's actual mtime and simply check the recorded mtime from // the log against the most recent input's mtime (see below) bool used_restat = false; if (edge->GetBindingBool("restat") && build_log() && (entry = build_log()->LookupByOutput(output->path()))) { used_restat = true; } // Dirty if the output is older than the input. if (!used_restat && most_recent_input && output->mtime() < most_recent_input->mtime()) { explanations_.Record(output, "output %s older than most recent input %s " "(%" PRId64 " vs %" PRId64 ")", output->path().c_str(), most_recent_input->path().c_str(), output->mtime(), most_recent_input->mtime()); return true; } if (build_log()) { bool generator = edge->GetBindingBool("generator"); if (entry || (entry = build_log()->LookupByOutput(output->path()))) { if (!generator && BuildLog::LogEntry::HashCommand(command) != entry->command_hash) { // May also be dirty due to the command changing since the last build. // But if this is a generator rule, the command changing does not make us // dirty. explanations_.Record(output, "command line changed for %s", output->path().c_str()); return true; } if (most_recent_input && entry->mtime < most_recent_input->mtime()) { // May also be dirty due to the mtime in the log being older than the // mtime of the most recent input. This can occur even when the mtime // on disk is newer if a previous run wrote to the output file but // exited with an error or was interrupted. If this was a restat rule, // then we only check the recorded mtime against the most recent input // mtime and ignore the actual output's mtime above. explanations_.Record( output, "recorded mtime of %s older than most recent input %s (%" PRId64 " vs %" PRId64 ")", output->path().c_str(), most_recent_input->path().c_str(), entry->mtime, most_recent_input->mtime()); return true; } } if (!entry && !generator) { explanations_.Record(output, "command line not found in log for %s", output->path().c_str()); return true; } } return false; } bool DependencyScan::LoadDyndeps(Node* node, string* err) const { return dyndep_loader_.LoadDyndeps(node, err); } bool DependencyScan::LoadDyndeps(Node* node, DyndepFile* ddf, string* err) const { return dyndep_loader_.LoadDyndeps(node, ddf, err); } bool Edge::AllInputsReady() const { for (vector::const_iterator i = inputs_.begin(); i != inputs_.end(); ++i) { if ((*i)->in_edge() && !(*i)->in_edge()->outputs_ready()) return false; } return true; } /// An Env for an Edge, providing $in and $out. struct EdgeEnv : public Env { enum EscapeKind { kShellEscape, kDoNotEscape }; EdgeEnv(const Edge* const edge, const EscapeKind escape) : edge_(edge), escape_in_out_(escape), recursive_(false) {} virtual string LookupVariable(const string& var); /// Given a span of Nodes, construct a list of paths suitable for a command /// line. std::string MakePathList(const Node* const* span, size_t size, char sep) const; private: std::vector lookups_; const Edge* const edge_; EscapeKind escape_in_out_; bool recursive_; }; string EdgeEnv::LookupVariable(const string& var) { if (var == "in" || var == "in_newline") { int explicit_deps_count = static_cast(edge_->inputs_.size() - edge_->implicit_deps_ - edge_->order_only_deps_); return MakePathList(edge_->inputs_.data(), explicit_deps_count, var == "in" ? ' ' : '\n'); } else if (var == "out") { int explicit_outs_count = static_cast(edge_->outputs_.size() - edge_->implicit_outs_); return MakePathList(&edge_->outputs_[0], explicit_outs_count, ' '); } // Technical note about the lookups_ vector. // // This is used to detect cycles during recursive variable expansion // which can be seen as a graph traversal problem. Consider the following // example: // // rule something // command = $foo $foo $var1 // var1 = $var2 // var2 = $var3 // var3 = $var1 // foo = FOO // // Each variable definition can be seen as a node in a graph that looks // like the following: // // command --> foo // | // v // var1 <-----. // | | // v | // var2 ---> var3 // // The lookups_ vector is used as a stack of visited nodes/variables // during recursive expansion. Entering a node adds an item to the // stack, leaving the node removes it. // // The recursive_ flag is used as a small performance optimization // to never record the starting node in the stack when beginning a new // expansion, since in most cases, expansions are not recursive // at all. // if (recursive_) { auto it = std::find(lookups_.begin(), lookups_.end(), var); if (it != lookups_.end()) { std::string cycle; for (; it != lookups_.end(); ++it) cycle.append(*it + " -> "); cycle.append(var); Fatal(("cycle in rule variables: " + cycle).c_str()); } } // See notes on BindingEnv::LookupWithFallback. const EvalString* eval = edge_->rule_->GetBinding(var); bool record_varname = recursive_ && eval; if (record_varname) lookups_.push_back(var); // In practice, variables defined on rules never use another rule variable. // For performance, only start checking for cycles after the first lookup. recursive_ = true; std::string result = edge_->env_->LookupWithFallback(var, eval, this); if (record_varname) lookups_.pop_back(); return result; } std::string EdgeEnv::MakePathList(const Node* const* const span, const size_t size, const char sep) const { string result; for (const Node* const* i = span; i != span + size; ++i) { if (!result.empty()) result.push_back(sep); const string& path = (*i)->PathDecanonicalized(); if (escape_in_out_ == kShellEscape) { #ifdef _WIN32 GetWin32EscapedString(path, &result); #else GetShellEscapedString(path, &result); #endif } else { result.append(path); } } return result; } std::string Edge::EvaluateCommand(const bool incl_rsp_file) const { string command = GetBinding("command"); if (incl_rsp_file) { string rspfile_content = GetBinding("rspfile_content"); if (!rspfile_content.empty()) command += ";rspfile=" + rspfile_content; } return command; } std::string Edge::GetBinding(const std::string& key) const { EdgeEnv env(this, EdgeEnv::kShellEscape); return env.LookupVariable(key); } bool Edge::GetBindingBool(const string& key) const { return !GetBinding(key).empty(); } string Edge::GetUnescapedDepfile() const { EdgeEnv env(this, EdgeEnv::kDoNotEscape); return env.LookupVariable("depfile"); } string Edge::GetUnescapedDyndep() const { EdgeEnv env(this, EdgeEnv::kDoNotEscape); return env.LookupVariable("dyndep"); } std::string Edge::GetUnescapedRspfile() const { EdgeEnv env(this, EdgeEnv::kDoNotEscape); return env.LookupVariable("rspfile"); } void Edge::Dump(const char* prefix) const { printf("%s[ ", prefix); for (vector::const_iterator i = inputs_.begin(); i != inputs_.end() && *i != NULL; ++i) { printf("%s ", (*i)->path().c_str()); } printf("--%s-> ", rule_->name().c_str()); for (vector::const_iterator i = outputs_.begin(); i != outputs_.end() && *i != NULL; ++i) { printf("%s ", (*i)->path().c_str()); } if (!validations_.empty()) { printf(" validations "); for (std::vector::const_iterator i = validations_.begin(); i != validations_.end() && *i != NULL; ++i) { printf("%s ", (*i)->path().c_str()); } } if (pool_) { if (!pool_->name().empty()) { printf("(in pool '%s')", pool_->name().c_str()); } } else { printf("(null pool?)"); } printf("] 0x%p\n", this); } bool Edge::is_phony() const { return rule_->IsPhony(); } bool Edge::use_console() const { return pool() == &State::kConsolePool; } bool Edge::maybe_phonycycle_diagnostic() const { // CMake 2.8.12.x and 3.0.x produced self-referencing phony rules // of the form "build a: phony ... a ...". Restrict our // "phonycycle" diagnostic option to the form it used. return is_phony() && outputs_.size() == 1 && implicit_outs_ == 0 && implicit_deps_ == 0; } // static string Node::PathDecanonicalized(const string& path, uint64_t slash_bits) { string result = path; #ifdef _WIN32 uint64_t mask = 1; for (char* c = &result[0]; (c = strchr(c, '/')) != NULL;) { if (slash_bits & mask) *c = '\\'; c++; mask <<= 1; } #endif return result; } void Node::Dump(const char* prefix) const { printf("%s <%s 0x%p> mtime: %" PRId64 "%s, (:%s), ", prefix, path().c_str(), this, mtime(), exists() ? "" : " (:missing)", dirty() ? " dirty" : " clean"); if (in_edge()) { in_edge()->Dump("in-edge: "); } else { printf("no in-edge\n"); } printf(" out edges:\n"); for (vector::const_iterator e = out_edges().begin(); e != out_edges().end() && *e != NULL; ++e) { (*e)->Dump(" +- "); } if (!validation_out_edges().empty()) { printf(" validation out edges:\n"); for (std::vector::const_iterator e = validation_out_edges().begin(); e != validation_out_edges().end() && *e != NULL; ++e) { (*e)->Dump(" +- "); } } } bool ImplicitDepLoader::LoadDeps(Edge* edge, string* err) { string deps_type = edge->GetBinding("deps"); if (!deps_type.empty()) return LoadDepsFromLog(edge, err); string depfile = edge->GetUnescapedDepfile(); if (!depfile.empty()) return LoadDepFile(edge, depfile, err); // No deps to load. return true; } struct matches { explicit matches(std::vector::iterator i) : i_(i) {} bool operator()(const Node* node) const { StringPiece opath = StringPiece(node->path()); return *i_ == opath; } std::vector::iterator i_; }; bool ImplicitDepLoader::LoadDepFile(Edge* edge, const string& path, string* err) { METRIC_RECORD("depfile load"); // Read depfile content. Treat a missing depfile as empty. string content; switch (disk_interface_->ReadFile(path, &content, err)) { case DiskInterface::Okay: break; case DiskInterface::NotFound: err->clear(); break; case DiskInterface::OtherError: *err = "loading '" + path + "': " + *err; return false; } // On a missing depfile: return false and empty *err. Node* first_output = edge->outputs_[0]; if (content.empty()) { explanations_.Record(first_output, "depfile '%s' is missing", path.c_str()); return false; } DepfileParser depfile(depfile_parser_options_ ? *depfile_parser_options_ : DepfileParserOptions()); string depfile_err; if (!depfile.Parse(&content, &depfile_err)) { *err = path + ": " + depfile_err; return false; } if (depfile.outs_.empty()) { *err = path + ": no outputs declared"; return false; } uint64_t unused; std::vector::iterator primary_out = depfile.outs_.begin(); CanonicalizePath(const_cast(primary_out->str_), &primary_out->len_, &unused); // Check that this depfile matches the edge's output, if not return false to // mark the edge as dirty. StringPiece opath = StringPiece(first_output->path()); if (opath != *primary_out) { explanations_.Record(first_output, "expected depfile '%s' to mention '%s', got '%s'", path.c_str(), first_output->path().c_str(), primary_out->AsString().c_str()); return false; } // Ensure that all mentioned outputs are outputs of the edge. for (std::vector::iterator o = depfile.outs_.begin(); o != depfile.outs_.end(); ++o) { matches m(o); if (std::find_if(edge->outputs_.begin(), edge->outputs_.end(), m) == edge->outputs_.end()) { *err = path + ": depfile mentions '" + o->AsString() + "' as an output, but no such output was declared"; return false; } } return ProcessDepfileDeps(edge, &depfile.ins_, err); } bool ImplicitDepLoader::ProcessDepfileDeps( Edge* edge, std::vector* depfile_ins, std::string* err) { // Preallocate space in edge->inputs_ to be filled in below. vector::iterator implicit_dep = PreallocateSpace(edge, static_cast(depfile_ins->size())); // Add all its in-edges. for (std::vector::iterator i = depfile_ins->begin(); i != depfile_ins->end(); ++i, ++implicit_dep) { uint64_t slash_bits; CanonicalizePath(const_cast(i->str_), &i->len_, &slash_bits); Node* node = state_->GetNode(*i, slash_bits); *implicit_dep = node; node->AddOutEdge(edge); } return true; } bool ImplicitDepLoader::LoadDepsFromLog(Edge* edge, string* err) { // NOTE: deps are only supported for single-target edges. Node* output = edge->outputs_[0]; DepsLog::Deps* deps = deps_log_ ? deps_log_->GetDeps(output) : NULL; if (!deps) { explanations_.Record(output, "deps for '%s' are missing", output->path().c_str()); return false; } // Deps are invalid if the output is newer than the deps. if (output->mtime() > deps->mtime) { explanations_.Record(output, "stored deps info out of date for '%s' (%" PRId64 " vs %" PRId64 ")", output->path().c_str(), deps->mtime, output->mtime()); return false; } Node** nodes = deps->nodes; size_t node_count = deps->node_count; edge->inputs_.insert(edge->inputs_.end() - edge->order_only_deps_, nodes, nodes + node_count); edge->implicit_deps_ += node_count; for (size_t i = 0; i < node_count; ++i) { nodes[i]->AddOutEdge(edge); } return true; } vector::iterator ImplicitDepLoader::PreallocateSpace(Edge* edge, int count) { edge->inputs_.insert(edge->inputs_.end() - edge->order_only_deps_, (size_t)count, 0); edge->implicit_deps_ += count; return edge->inputs_.end() - edge->order_only_deps_ - count; } void InputsCollector::VisitNode(const Node* node) { const Edge* edge = node->in_edge(); if (!edge) // A source file. return; // Add inputs of the producing edge to the result, // except if they are themselves produced by a phony // edge. for (const Node* input : edge->inputs_) { if (!visited_nodes_.insert(input).second) continue; VisitNode(input); const Edge* input_edge = input->in_edge(); if (!(input_edge && input_edge->is_phony())) { inputs_.push_back(input); } } } std::vector InputsCollector::GetInputsAsStrings( bool shell_escape) const { std::vector result; result.reserve(inputs_.size()); for (const Node* input : inputs_) { std::string unescaped = input->PathDecanonicalized(); if (shell_escape) { std::string path; #ifdef _WIN32 GetWin32EscapedString(unescaped, &path); #else GetShellEscapedString(unescaped, &path); #endif result.push_back(std::move(path)); } else { result.push_back(std::move(unescaped)); } } return result; } ninja-1.13.2/src/graph.h000066400000000000000000000402461510764045400147520ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef NINJA_GRAPH_H_ #define NINJA_GRAPH_H_ #include #include #include #include #include #include "dyndep.h" #include "eval_env.h" #include "explanations.h" #include "jobserver.h" #include "timestamp.h" #include "util.h" struct BuildLog; struct DepfileParserOptions; struct DiskInterface; struct DepsLog; struct Edge; struct Node; struct Pool; struct State; /// Information about a node in the dependency graph: the file, whether /// it's dirty, mtime, etc. struct Node { Node(const std::string& path, uint64_t slash_bits) : path_(path), slash_bits_(slash_bits) {} /// Return false on error. bool Stat(DiskInterface* disk_interface, std::string* err); /// If the file doesn't exist, set the mtime_ from its dependencies void UpdatePhonyMtime(TimeStamp mtime); /// Return false on error. bool StatIfNecessary(DiskInterface* disk_interface, std::string* err) { if (status_known()) return true; return Stat(disk_interface, err); } /// Mark as not-yet-stat()ed and not dirty. void ResetState() { mtime_ = -1; exists_ = ExistenceStatusUnknown; dirty_ = false; } /// Mark the Node as already-stat()ed and missing. void MarkMissing() { if (mtime_ == -1) { mtime_ = 0; } exists_ = ExistenceStatusMissing; } bool exists() const { return exists_ == ExistenceStatusExists; } bool status_known() const { return exists_ != ExistenceStatusUnknown; } const std::string& path() const { return path_; } /// Get |path()| but use slash_bits to convert back to original slash styles. std::string PathDecanonicalized() const { return PathDecanonicalized(path_, slash_bits_); } static std::string PathDecanonicalized(const std::string& path, uint64_t slash_bits); uint64_t slash_bits() const { return slash_bits_; } TimeStamp mtime() const { return mtime_; } bool dirty() const { return dirty_; } void set_dirty(bool dirty) { dirty_ = dirty; } void MarkDirty() { dirty_ = true; } bool dyndep_pending() const { return dyndep_pending_; } void set_dyndep_pending(bool pending) { dyndep_pending_ = pending; } Edge* in_edge() const { return in_edge_; } void set_in_edge(Edge* edge) { in_edge_ = edge; } /// Indicates whether this node was generated from a depfile or dyndep file, /// instead of being a regular input or output from the Ninja manifest. bool generated_by_dep_loader() const { return generated_by_dep_loader_; } void set_generated_by_dep_loader(bool value) { generated_by_dep_loader_ = value; } int id() const { return id_; } void set_id(int id) { id_ = id; } const std::vector& out_edges() const { return out_edges_; } const std::vector& validation_out_edges() const { return validation_out_edges_; } void AddOutEdge(Edge* edge) { out_edges_.push_back(edge); } void AddValidationOutEdge(Edge* edge) { validation_out_edges_.push_back(edge); } void Dump(const char* prefix="") const; private: std::string path_; /// Set bits starting from lowest for backslashes that were normalized to /// forward slashes by CanonicalizePath. See |PathDecanonicalized|. uint64_t slash_bits_ = 0; /// Possible values of mtime_: /// -1: file hasn't been examined /// 0: we looked, and file doesn't exist /// >0: actual file's mtime, or the latest mtime of its dependencies if it doesn't exist TimeStamp mtime_ = -1; enum ExistenceStatus { /// The file hasn't been examined. ExistenceStatusUnknown, /// The file doesn't exist. mtime_ will be the latest mtime of its dependencies. ExistenceStatusMissing, /// The path is an actual file. mtime_ will be the file's mtime. ExistenceStatusExists }; ExistenceStatus exists_ = ExistenceStatusUnknown; /// Dirty is true when the underlying file is out-of-date. /// But note that Edge::outputs_ready_ is also used in judging which /// edges to build. bool dirty_ = false; /// Store whether dyndep information is expected from this node but /// has not yet been loaded. bool dyndep_pending_ = false; /// Set to true when this node comes from a depfile, a dyndep file or the /// deps log. If it does not have a producing edge, the build should not /// abort if it is missing (as for regular source inputs). By default /// all nodes have this flag set to true, since the deps and build logs /// can be loaded before the manifest. bool generated_by_dep_loader_ = true; /// The Edge that produces this Node, or NULL when there is no /// known edge to produce it. Edge* in_edge_ = nullptr; /// All Edges that use this Node as an input. std::vector out_edges_; /// All Edges that use this Node as a validation. std::vector validation_out_edges_; /// A dense integer id for the node, assigned and used by DepsLog. int id_ = -1; }; /// An edge in the dependency graph; links between Nodes using Rules. struct Edge { enum VisitMark { VisitNone, VisitInStack, VisitDone }; Edge() = default; /// Return true if all inputs' in-edges are ready. bool AllInputsReady() const; /// Expand all variables in a command and return it as a string. /// If incl_rsp_file is enabled, the string will also contain the /// full contents of a response file (if applicable) std::string EvaluateCommand(bool incl_rsp_file = false) const; /// Returns the shell-escaped value of |key|. std::string GetBinding(const std::string& key) const; bool GetBindingBool(const std::string& key) const; /// Like GetBinding("depfile"), but without shell escaping. std::string GetUnescapedDepfile() const; /// Like GetBinding("dyndep"), but without shell escaping. std::string GetUnescapedDyndep() const; /// Like GetBinding("rspfile"), but without shell escaping. std::string GetUnescapedRspfile() const; void Dump(const char* prefix="") const; // critical_path_weight is the priority during build scheduling. The // "critical path" between this edge's inputs and any target node is // the path which maximises the sum oof weights along that path. // NOTE: Defaults to -1 as a marker smaller than any valid weight int64_t critical_path_weight() const { return critical_path_weight_; } void set_critical_path_weight(int64_t critical_path_weight) { critical_path_weight_ = critical_path_weight; } const Rule* rule_ = nullptr; Pool* pool_ = nullptr; std::vector inputs_; std::vector outputs_; std::vector validations_; Node* dyndep_ = nullptr; BindingEnv* env_ = nullptr; VisitMark mark_ = VisitNone; size_t id_ = 0; int64_t critical_path_weight_ = -1; bool outputs_ready_ = false; bool deps_loaded_ = false; bool deps_missing_ = false; bool generated_by_dep_loader_ = false; TimeStamp command_start_time_ = 0; const Rule& rule() const { return *rule_; } Pool* pool() const { return pool_; } int weight() const { return 1; } bool outputs_ready() const { return outputs_ready_; } // There are three types of inputs. // 1) explicit deps, which show up as $in on the command line; // 2) implicit deps, which the target depends on implicitly (e.g. C headers), // and changes in them cause the target to rebuild; // 3) order-only deps, which are needed before the target builds but which // don't cause the target to rebuild. // These are stored in inputs_ in that order, and we keep counts of // #2 and #3 when we need to access the various subsets. int implicit_deps_ = 0; int order_only_deps_ = 0; bool is_implicit(size_t index) { return index >= inputs_.size() - order_only_deps_ - implicit_deps_ && !is_order_only(index); } bool is_order_only(size_t index) { return index >= inputs_.size() - order_only_deps_; } // There are two types of outputs. // 1) explicit outs, which show up as $out on the command line; // 2) implicit outs, which the target generates but are not part of $out. // These are stored in outputs_ in that order, and we keep a count of // #2 to use when we need to access the various subsets. int implicit_outs_ = 0; bool is_implicit_out(size_t index) const { return index >= outputs_.size() - implicit_outs_; } bool is_phony() const; bool use_console() const; bool maybe_phonycycle_diagnostic() const; /// A Jobserver slot instance. Invalid by default. Jobserver::Slot job_slot_; // Historical info: how long did this edge take last time, // as per .ninja_log, if known? Defaults to -1 if unknown. int64_t prev_elapsed_time_millis = -1; }; struct EdgeCmp { bool operator()(const Edge* a, const Edge* b) const { return a->id_ < b->id_; } }; typedef std::set EdgeSet; /// ImplicitDepLoader loads implicit dependencies, as referenced via the /// "depfile" attribute in build files. struct ImplicitDepLoader { ImplicitDepLoader(State* state, DepsLog* deps_log, DiskInterface* disk_interface, DepfileParserOptions const* depfile_parser_options, Explanations* explanations) : state_(state), disk_interface_(disk_interface), deps_log_(deps_log), depfile_parser_options_(depfile_parser_options), explanations_(explanations) {} /// Load implicit dependencies for \a edge. /// @return false on error (without filling \a err if info is just missing // or out of date). bool LoadDeps(Edge* edge, std::string* err); DepsLog* deps_log() const { return deps_log_; } protected: /// Process loaded implicit dependencies for \a edge and update the graph /// @return false on error (without filling \a err if info is just missing) virtual bool ProcessDepfileDeps(Edge* edge, std::vector* depfile_ins, std::string* err); /// Load implicit dependencies for \a edge from a depfile attribute. /// @return false on error (without filling \a err if info is just missing). bool LoadDepFile(Edge* edge, const std::string& path, std::string* err); /// Load implicit dependencies for \a edge from the DepsLog. /// @return false on error (without filling \a err if info is just missing). bool LoadDepsFromLog(Edge* edge, std::string* err); /// Preallocate \a count spaces in the input array on \a edge, returning /// an iterator pointing at the first new space. std::vector::iterator PreallocateSpace(Edge* edge, int count); State* state_; DiskInterface* disk_interface_; DepsLog* deps_log_; DepfileParserOptions const* depfile_parser_options_; OptionalExplanations explanations_; }; /// DependencyScan manages the process of scanning the files in a graph /// and updating the dirty/outputs_ready state of all the nodes and edges. struct DependencyScan { DependencyScan(State* state, BuildLog* build_log, DepsLog* deps_log, DiskInterface* disk_interface, DepfileParserOptions const* depfile_parser_options, Explanations* explanations) : build_log_(build_log), disk_interface_(disk_interface), dep_loader_(state, deps_log, disk_interface, depfile_parser_options, explanations), dyndep_loader_(state, disk_interface), explanations_(explanations) {} /// Update the |dirty_| state of the given nodes by transitively inspecting /// their input edges. /// Examine inputs, outputs, and command lines to judge whether an edge /// needs to be re-run, and update outputs_ready_ and each outputs' |dirty_| /// state accordingly. /// Appends any validation nodes found to the nodes parameter. /// Returns false on failure. bool RecomputeDirty(Node* node, std::vector* validation_nodes, std::string* err); /// Recompute whether any output of the edge is dirty, if so sets |*dirty|. /// Returns false on failure. bool RecomputeOutputsDirty(Edge* edge, Node* most_recent_input, bool* dirty, std::string* err); BuildLog* build_log() const { return build_log_; } void set_build_log(BuildLog* log) { build_log_ = log; } DepsLog* deps_log() const { return dep_loader_.deps_log(); } /// Load a dyndep file from the given node's path and update the /// build graph with the new information. One overload accepts /// a caller-owned 'DyndepFile' object in which to store the /// information loaded from the dyndep file. bool LoadDyndeps(Node* node, std::string* err) const; bool LoadDyndeps(Node* node, DyndepFile* ddf, std::string* err) const; private: bool RecomputeNodeDirty(Node* node, std::vector* stack, std::vector* validation_nodes, std::string* err); bool VerifyDAG(Node* node, std::vector* stack, std::string* err); /// Recompute whether a given single output should be marked dirty. /// Returns true if so. bool RecomputeOutputDirty(const Edge* edge, const Node* most_recent_input, const std::string& command, Node* output); void RecordExplanation(const Node* node, const char* fmt, ...); BuildLog* build_log_; DiskInterface* disk_interface_; ImplicitDepLoader dep_loader_; DyndepLoader dyndep_loader_; OptionalExplanations explanations_; }; // Implements a less comparison for edges by priority, where highest // priority is defined lexicographically first by largest critical // time, then lowest ID. // // Including ID means that wherever the critical path weights are the // same, the edges are executed in ascending ID order which was // historically how all tasks were scheduled. struct EdgePriorityLess { bool operator()(const Edge* e1, const Edge* e2) const { const int64_t cw1 = e1->critical_path_weight(); const int64_t cw2 = e2->critical_path_weight(); if (cw1 != cw2) { return cw1 < cw2; } return e1->id_ > e2->id_; } }; // Reverse of EdgePriorityLess, e.g. to sort by highest priority first struct EdgePriorityGreater { bool operator()(const Edge* e1, const Edge* e2) const { return EdgePriorityLess()(e2, e1); } }; // A priority queue holding non-owning Edge pointers. top() will // return the edge with the largest critical path weight, and lowest // ID if more than one edge has the same critical path weight. class EdgePriorityQueue: public std::priority_queue, EdgePriorityLess>{ public: void clear() { c.clear(); } }; /// A class used to collect the transitive set of inputs from a given set /// of starting nodes. Used to implement the `inputs` tool. /// /// When collecting inputs, the outputs of phony edges are always ignored /// from the result, but are followed by the dependency walk. /// /// Usage is: /// - Create instance. /// - Call VisitNode() for each root node to collect inputs from. /// - Call inputs() to retrieve the list of input node pointers. /// - Call GetInputsAsStrings() to retrieve the list of inputs as a string /// vector. /// struct InputsCollector { /// Visit a single @arg node during this collection. void VisitNode(const Node* node); /// Retrieve list of visited input nodes. A dependency always appears /// before its dependents in the result, but final order depends on the /// order of the VisitNode() calls performed before this. const std::vector& inputs() const { return inputs_; } /// Same as inputs(), but returns the list of visited nodes as a list of /// strings, with optional shell escaping. std::vector GetInputsAsStrings(bool shell_escape = false) const; /// Reset collector state. void Reset() { inputs_.clear(); visited_nodes_.clear(); } private: std::vector inputs_; std::set visited_nodes_; }; #endif // NINJA_GRAPH_H_ ninja-1.13.2/src/graph_test.cc000066400000000000000000001001561510764045400161440ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "graph.h" #include "build.h" #include "command_collector.h" #include "test.h" using namespace std; struct GraphTest : public StateTestWithBuiltinRules { GraphTest() : scan_(&state_, NULL, NULL, &fs_, NULL, NULL) {} VirtualFileSystem fs_; DependencyScan scan_; }; TEST_F(GraphTest, MissingImplicit) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build out: cat in | implicit\n")); fs_.Create("in", ""); fs_.Create("out", ""); string err; EXPECT_TRUE(scan_.RecomputeDirty(GetNode("out"), NULL, &err)); ASSERT_EQ("", err); // A missing implicit dep *should* make the output dirty. // (In fact, a build will fail.) // This is a change from prior semantics of ninja. EXPECT_TRUE(GetNode("out")->dirty()); } TEST_F(GraphTest, ModifiedImplicit) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build out: cat in | implicit\n")); fs_.Create("in", ""); fs_.Create("out", ""); fs_.Tick(); fs_.Create("implicit", ""); string err; EXPECT_TRUE(scan_.RecomputeDirty(GetNode("out"), NULL, &err)); ASSERT_EQ("", err); // A modified implicit dep should make the output dirty. EXPECT_TRUE(GetNode("out")->dirty()); } TEST_F(GraphTest, FunkyMakefilePath) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule catdep\n" " depfile = $out.d\n" " command = cat $in > $out\n" "build out.o: catdep foo.cc\n")); fs_.Create("foo.cc", ""); fs_.Create("out.o.d", "out.o: ./foo/../implicit.h\n"); fs_.Create("out.o", ""); fs_.Tick(); fs_.Create("implicit.h", ""); string err; EXPECT_TRUE(scan_.RecomputeDirty(GetNode("out.o"), NULL, &err)); ASSERT_EQ("", err); // implicit.h has changed, though our depfile refers to it with a // non-canonical path; we should still find it. EXPECT_TRUE(GetNode("out.o")->dirty()); } TEST_F(GraphTest, ExplicitImplicit) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule catdep\n" " depfile = $out.d\n" " command = cat $in > $out\n" "build implicit.h: cat data\n" "build out.o: catdep foo.cc || implicit.h\n")); fs_.Create("implicit.h", ""); fs_.Create("foo.cc", ""); fs_.Create("out.o.d", "out.o: implicit.h\n"); fs_.Create("out.o", ""); fs_.Tick(); fs_.Create("data", ""); string err; EXPECT_TRUE(scan_.RecomputeDirty(GetNode("out.o"), NULL, &err)); ASSERT_EQ("", err); // We have both an implicit and an explicit dep on implicit.h. // The implicit dep should "win" (in the sense that it should cause // the output to be dirty). EXPECT_TRUE(GetNode("out.o")->dirty()); } TEST_F(GraphTest, ImplicitOutputParse) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build out | out.imp: cat in\n")); Edge* edge = GetNode("out")->in_edge(); EXPECT_EQ(size_t(2), edge->outputs_.size()); EXPECT_EQ("out", edge->outputs_[0]->path()); EXPECT_EQ("out.imp", edge->outputs_[1]->path()); EXPECT_EQ(1, edge->implicit_outs_); EXPECT_EQ(edge, GetNode("out.imp")->in_edge()); } TEST_F(GraphTest, ImplicitOutputMissing) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build out | out.imp: cat in\n")); fs_.Create("in", ""); fs_.Create("out", ""); string err; EXPECT_TRUE(scan_.RecomputeDirty(GetNode("out"), NULL, &err)); ASSERT_EQ("", err); EXPECT_TRUE(GetNode("out")->dirty()); EXPECT_TRUE(GetNode("out.imp")->dirty()); } TEST_F(GraphTest, ImplicitOutputOutOfDate) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build out | out.imp: cat in\n")); fs_.Create("out.imp", ""); fs_.Tick(); fs_.Create("in", ""); fs_.Create("out", ""); string err; EXPECT_TRUE(scan_.RecomputeDirty(GetNode("out"), NULL, &err)); ASSERT_EQ("", err); EXPECT_TRUE(GetNode("out")->dirty()); EXPECT_TRUE(GetNode("out.imp")->dirty()); } TEST_F(GraphTest, ImplicitOutputOnlyParse) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build | out.imp: cat in\n")); Edge* edge = GetNode("out.imp")->in_edge(); EXPECT_EQ(size_t(1), edge->outputs_.size()); EXPECT_EQ("out.imp", edge->outputs_[0]->path()); EXPECT_EQ(1, edge->implicit_outs_); EXPECT_EQ(edge, GetNode("out.imp")->in_edge()); } TEST_F(GraphTest, ImplicitOutputOnlyMissing) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build | out.imp: cat in\n")); fs_.Create("in", ""); string err; EXPECT_TRUE(scan_.RecomputeDirty(GetNode("out.imp"), NULL, &err)); ASSERT_EQ("", err); EXPECT_TRUE(GetNode("out.imp")->dirty()); } TEST_F(GraphTest, ImplicitOutputOnlyOutOfDate) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build | out.imp: cat in\n")); fs_.Create("out.imp", ""); fs_.Tick(); fs_.Create("in", ""); string err; EXPECT_TRUE(scan_.RecomputeDirty(GetNode("out.imp"), NULL, &err)); ASSERT_EQ("", err); EXPECT_TRUE(GetNode("out.imp")->dirty()); } TEST_F(GraphTest, PathWithCurrentDirectory) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule catdep\n" " depfile = $out.d\n" " command = cat $in > $out\n" "build ./out.o: catdep ./foo.cc\n")); fs_.Create("foo.cc", ""); fs_.Create("out.o.d", "out.o: foo.cc\n"); fs_.Create("out.o", ""); string err; EXPECT_TRUE(scan_.RecomputeDirty(GetNode("out.o"), NULL, &err)); ASSERT_EQ("", err); EXPECT_FALSE(GetNode("out.o")->dirty()); } TEST_F(GraphTest, RootNodes) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build out1: cat in1\n" "build mid1: cat in1\n" "build out2: cat mid1\n" "build out3 out4: cat mid1\n")); string err; vector root_nodes = state_.RootNodes(&err); EXPECT_EQ(4u, root_nodes.size()); for (size_t i = 0; i < root_nodes.size(); ++i) { string name = root_nodes[i]->path(); EXPECT_EQ("out", name.substr(0, 3)); } } TEST_F(GraphTest, InputsCollector) { // Build plan for the following graph: // // in1 // |___________ // | | // === === // | | // out1 mid1 // | ____|_____ // | | | // | === ======= // | | | | // | out2 out3 out4 // | | | // =======phony====== // | // all // ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build out1: cat in1\n" "build mid1: cat in1\n" "build out2: cat mid1\n" "build out3 out4: cat mid1\n" "build all: phony out1 out2 out3\n")); InputsCollector collector; // Start visit from out1, this should add in1 to the inputs. collector.Reset(); collector.VisitNode(GetNode("out1")); auto inputs = collector.GetInputsAsStrings(); ASSERT_EQ(1u, inputs.size()); EXPECT_EQ("in1", inputs[0]); // Add a visit from out2, this should add mid1. collector.VisitNode(GetNode("out2")); inputs = collector.GetInputsAsStrings(); ASSERT_EQ(2u, inputs.size()); EXPECT_EQ("in1", inputs[0]); EXPECT_EQ("mid1", inputs[1]); // Another visit from all, this should add out1, out2 and out3, // but not out4. collector.VisitNode(GetNode("all")); inputs = collector.GetInputsAsStrings(); ASSERT_EQ(5u, inputs.size()); EXPECT_EQ("in1", inputs[0]); EXPECT_EQ("mid1", inputs[1]); EXPECT_EQ("out1", inputs[2]); EXPECT_EQ("out2", inputs[3]); EXPECT_EQ("out3", inputs[4]); collector.Reset(); // Starting directly from all, will add out1 before mid1 compared // to the previous example above. collector.VisitNode(GetNode("all")); inputs = collector.GetInputsAsStrings(); ASSERT_EQ(5u, inputs.size()); EXPECT_EQ("in1", inputs[0]); EXPECT_EQ("out1", inputs[1]); EXPECT_EQ("mid1", inputs[2]); EXPECT_EQ("out2", inputs[3]); EXPECT_EQ("out3", inputs[4]); } TEST_F(GraphTest, InputsCollectorWithEscapes) { ASSERT_NO_FATAL_FAILURE(AssertParse( &state_, "build out$ 1: cat in1 in2 in$ with$ space | implicit || order_only\n")); InputsCollector collector; collector.VisitNode(GetNode("out 1")); auto inputs = collector.GetInputsAsStrings(); ASSERT_EQ(5u, inputs.size()); EXPECT_EQ("in1", inputs[0]); EXPECT_EQ("in2", inputs[1]); EXPECT_EQ("in with space", inputs[2]); EXPECT_EQ("implicit", inputs[3]); EXPECT_EQ("order_only", inputs[4]); inputs = collector.GetInputsAsStrings(true); ASSERT_EQ(5u, inputs.size()); EXPECT_EQ("in1", inputs[0]); EXPECT_EQ("in2", inputs[1]); #ifdef _WIN32 EXPECT_EQ("\"in with space\"", inputs[2]); #else EXPECT_EQ("'in with space'", inputs[2]); #endif EXPECT_EQ("implicit", inputs[3]); EXPECT_EQ("order_only", inputs[4]); } TEST_F(GraphTest, CommandCollector) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build out1: cat in1\n" "build mid1: cat in1\n" "build out2: cat mid1\n" "build out3 out4: cat mid1\n" "build all: phony out1 out2 out3\n")); { CommandCollector collector; auto& edges = collector.in_edges; // Start visit from out2; this should add `build mid1` and `build out2` to // the edge list. collector.CollectFrom(GetNode("out2")); ASSERT_EQ(2u, edges.size()); EXPECT_EQ("cat in1 > mid1", edges[0]->EvaluateCommand()); EXPECT_EQ("cat mid1 > out2", edges[1]->EvaluateCommand()); // Add a visit from out1, this should append `build out1` collector.CollectFrom(GetNode("out1")); ASSERT_EQ(3u, edges.size()); EXPECT_EQ("cat in1 > out1", edges[2]->EvaluateCommand()); // Another visit from all; this should add edges for out1, out2 and out3, // but not all (because it's phony). collector.CollectFrom(GetNode("all")); ASSERT_EQ(4u, edges.size()); EXPECT_EQ("cat in1 > mid1", edges[0]->EvaluateCommand()); EXPECT_EQ("cat mid1 > out2", edges[1]->EvaluateCommand()); EXPECT_EQ("cat in1 > out1", edges[2]->EvaluateCommand()); EXPECT_EQ("cat mid1 > out3 out4", edges[3]->EvaluateCommand()); } { CommandCollector collector; auto& edges = collector.in_edges; // Starting directly from all, will add `build out1` before `build mid1` // compared to the previous example above. collector.CollectFrom(GetNode("all")); ASSERT_EQ(4u, edges.size()); EXPECT_EQ("cat in1 > out1", edges[0]->EvaluateCommand()); EXPECT_EQ("cat in1 > mid1", edges[1]->EvaluateCommand()); EXPECT_EQ("cat mid1 > out2", edges[2]->EvaluateCommand()); EXPECT_EQ("cat mid1 > out3 out4", edges[3]->EvaluateCommand()); } } TEST_F(GraphTest, VarInOutPathEscaping) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build a$ b: cat no'space with$ space$$ no\"space2\n")); Edge* edge = GetNode("a b")->in_edge(); #ifdef _WIN32 EXPECT_EQ("cat no'space \"with space$\" \"no\\\"space2\" > \"a b\"", edge->EvaluateCommand()); #else EXPECT_EQ("cat 'no'\\''space' 'with space$' 'no\"space2' > 'a b'", edge->EvaluateCommand()); #endif } // Regression test for https://github.com/ninja-build/ninja/issues/380 TEST_F(GraphTest, DepfileWithCanonicalizablePath) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule catdep\n" " depfile = $out.d\n" " command = cat $in > $out\n" "build ./out.o: catdep ./foo.cc\n")); fs_.Create("foo.cc", ""); fs_.Create("out.o.d", "out.o: bar/../foo.cc\n"); fs_.Create("out.o", ""); string err; EXPECT_TRUE(scan_.RecomputeDirty(GetNode("out.o"), NULL, &err)); ASSERT_EQ("", err); EXPECT_FALSE(GetNode("out.o")->dirty()); } // Regression test for https://github.com/ninja-build/ninja/issues/404 TEST_F(GraphTest, DepfileRemoved) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule catdep\n" " depfile = $out.d\n" " command = cat $in > $out\n" "build ./out.o: catdep ./foo.cc\n")); fs_.Create("foo.h", ""); fs_.Create("foo.cc", ""); fs_.Tick(); fs_.Create("out.o.d", "out.o: foo.h\n"); fs_.Create("out.o", ""); string err; EXPECT_TRUE(scan_.RecomputeDirty(GetNode("out.o"), NULL, &err)); ASSERT_EQ("", err); EXPECT_FALSE(GetNode("out.o")->dirty()); state_.Reset(); fs_.RemoveFile("out.o.d"); EXPECT_TRUE(scan_.RecomputeDirty(GetNode("out.o"), NULL, &err)); ASSERT_EQ("", err); EXPECT_TRUE(GetNode("out.o")->dirty()); } // Check that rule-level variables are in scope for eval. TEST_F(GraphTest, RuleVariablesInScope) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule r\n" " depfile = x\n" " command = depfile is $depfile\n" "build out: r in\n")); Edge* edge = GetNode("out")->in_edge(); EXPECT_EQ("depfile is x", edge->EvaluateCommand()); } // Check that build statements can override rule builtins like depfile. TEST_F(GraphTest, DepfileOverride) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule r\n" " depfile = x\n" " command = unused\n" "build out: r in\n" " depfile = y\n")); Edge* edge = GetNode("out")->in_edge(); EXPECT_EQ("y", edge->GetBinding("depfile")); } // Check that overridden values show up in expansion of rule-level bindings. TEST_F(GraphTest, DepfileOverrideParent) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule r\n" " depfile = x\n" " command = depfile is $depfile\n" "build out: r in\n" " depfile = y\n")); Edge* edge = GetNode("out")->in_edge(); EXPECT_EQ("depfile is y", edge->GetBinding("command")); } // Verify that building a nested phony rule prints "no work to do" TEST_F(GraphTest, NestedPhonyPrintsDone) { AssertParse(&state_, "build n1: phony \n" "build n2: phony n1\n" ); string err; EXPECT_TRUE(scan_.RecomputeDirty(GetNode("n2"), NULL, &err)); ASSERT_EQ("", err); Plan plan_; EXPECT_TRUE(plan_.AddTarget(GetNode("n2"), &err)); ASSERT_EQ("", err); EXPECT_EQ(0, plan_.command_edge_count()); ASSERT_FALSE(plan_.more_to_do()); } TEST_F(GraphTest, PhonySelfReferenceError) { ManifestParserOptions parser_opts; parser_opts.phony_cycle_action_ = kPhonyCycleActionError; AssertParse(&state_, "build a: phony a\n", parser_opts); string err; EXPECT_FALSE(scan_.RecomputeDirty(GetNode("a"), NULL, &err)); ASSERT_EQ("dependency cycle: a -> a [-w phonycycle=err]", err); } TEST_F(GraphTest, DependencyCycle) { AssertParse(&state_, "build out: cat mid\n" "build mid: cat in\n" "build in: cat pre\n" "build pre: cat out\n"); string err; EXPECT_FALSE(scan_.RecomputeDirty(GetNode("out"), NULL, &err)); ASSERT_EQ("dependency cycle: out -> mid -> in -> pre -> out", err); } TEST_F(GraphTest, CycleInEdgesButNotInNodes1) { string err; AssertParse(&state_, "build a b: cat a\n"); EXPECT_FALSE(scan_.RecomputeDirty(GetNode("b"), NULL, &err)); ASSERT_EQ("dependency cycle: a -> a", err); } TEST_F(GraphTest, CycleInEdgesButNotInNodes2) { string err; ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build b a: cat a\n")); EXPECT_FALSE(scan_.RecomputeDirty(GetNode("b"), NULL, &err)); ASSERT_EQ("dependency cycle: a -> a", err); } TEST_F(GraphTest, CycleInEdgesButNotInNodes3) { string err; ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build a b: cat c\n" "build c: cat a\n")); EXPECT_FALSE(scan_.RecomputeDirty(GetNode("b"), NULL, &err)); ASSERT_EQ("dependency cycle: a -> c -> a", err); } TEST_F(GraphTest, CycleInEdgesButNotInNodes4) { string err; ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build d: cat c\n" "build c: cat b\n" "build b: cat a\n" "build a e: cat d\n" "build f: cat e\n")); EXPECT_FALSE(scan_.RecomputeDirty(GetNode("f"), NULL, &err)); ASSERT_EQ("dependency cycle: a -> d -> c -> b -> a", err); } // Verify that cycles in graphs with multiple outputs are handled correctly // in RecomputeDirty() and don't cause deps to be loaded multiple times. TEST_F(GraphTest, CycleWithLengthZeroFromDepfile) { AssertParse(&state_, "rule deprule\n" " depfile = dep.d\n" " command = unused\n" "build a b: deprule\n" ); fs_.Create("dep.d", "a: b\n"); string err; EXPECT_FALSE(scan_.RecomputeDirty(GetNode("a"), NULL, &err)); ASSERT_EQ("dependency cycle: b -> b", err); // Despite the depfile causing edge to be a cycle (it has outputs a and b, // but the depfile also adds b as an input), the deps should have been loaded // only once: Edge* edge = GetNode("a")->in_edge(); EXPECT_EQ(size_t(1), edge->inputs_.size()); EXPECT_EQ("b", edge->inputs_[0]->path()); } // Like CycleWithLengthZeroFromDepfile but with a higher cycle length. TEST_F(GraphTest, CycleWithLengthOneFromDepfile) { AssertParse(&state_, "rule deprule\n" " depfile = dep.d\n" " command = unused\n" "rule r\n" " command = unused\n" "build a b: deprule\n" "build c: r b\n" ); fs_.Create("dep.d", "a: c\n"); string err; EXPECT_FALSE(scan_.RecomputeDirty(GetNode("a"), NULL, &err)); ASSERT_EQ("dependency cycle: b -> c -> b", err); // Despite the depfile causing edge to be a cycle (|edge| has outputs a and b, // but c's in_edge has b as input but the depfile also adds |edge| as // output)), the deps should have been loaded only once: Edge* edge = GetNode("a")->in_edge(); EXPECT_EQ(size_t(1), edge->inputs_.size()); EXPECT_EQ("c", edge->inputs_[0]->path()); } // Like CycleWithLengthOneFromDepfile but building a node one hop away from // the cycle. TEST_F(GraphTest, CycleWithLengthOneFromDepfileOneHopAway) { AssertParse(&state_, "rule deprule\n" " depfile = dep.d\n" " command = unused\n" "rule r\n" " command = unused\n" "build a b: deprule\n" "build c: r b\n" "build d: r a\n" ); fs_.Create("dep.d", "a: c\n"); string err; EXPECT_FALSE(scan_.RecomputeDirty(GetNode("d"), NULL, &err)); ASSERT_EQ("dependency cycle: b -> c -> b", err); // Despite the depfile causing edge to be a cycle (|edge| has outputs a and b, // but c's in_edge has b as input but the depfile also adds |edge| as // output)), the deps should have been loaded only once: Edge* edge = GetNode("a")->in_edge(); EXPECT_EQ(size_t(1), edge->inputs_.size()); EXPECT_EQ("c", edge->inputs_[0]->path()); } #ifdef _WIN32 TEST_F(GraphTest, Decanonicalize) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build out\\out1: cat src\\in1\n" "build out\\out2/out3\\out4: cat mid1\n" "build out3 out4\\foo: cat mid1\n")); string err; vector root_nodes = state_.RootNodes(&err); EXPECT_EQ(4u, root_nodes.size()); EXPECT_EQ(root_nodes[0]->path(), "out/out1"); EXPECT_EQ(root_nodes[1]->path(), "out/out2/out3/out4"); EXPECT_EQ(root_nodes[2]->path(), "out3"); EXPECT_EQ(root_nodes[3]->path(), "out4/foo"); EXPECT_EQ(root_nodes[0]->PathDecanonicalized(), "out\\out1"); EXPECT_EQ(root_nodes[1]->PathDecanonicalized(), "out\\out2/out3\\out4"); EXPECT_EQ(root_nodes[2]->PathDecanonicalized(), "out3"); EXPECT_EQ(root_nodes[3]->PathDecanonicalized(), "out4\\foo"); } #endif TEST_F(GraphTest, DyndepLoadTrivial) { AssertParse(&state_, "rule r\n" " command = unused\n" "build out: r in || dd\n" " dyndep = dd\n" ); fs_.Create("dd", "ninja_dyndep_version = 1\n" "build out: dyndep\n" ); string err; ASSERT_TRUE(GetNode("dd")->dyndep_pending()); EXPECT_TRUE(scan_.LoadDyndeps(GetNode("dd"), &err)); EXPECT_EQ("", err); EXPECT_FALSE(GetNode("dd")->dyndep_pending()); Edge* edge = GetNode("out")->in_edge(); ASSERT_EQ(size_t(1), edge->outputs_.size()); EXPECT_EQ("out", edge->outputs_[0]->path()); ASSERT_EQ(size_t(2), edge->inputs_.size()); EXPECT_EQ("in", edge->inputs_[0]->path()); EXPECT_EQ("dd", edge->inputs_[1]->path()); EXPECT_EQ(0, edge->implicit_deps_); EXPECT_EQ(1, edge->order_only_deps_); EXPECT_FALSE(edge->GetBindingBool("restat")); } TEST_F(GraphTest, DyndepLoadImplicit) { AssertParse(&state_, "rule r\n" " command = unused\n" "build out1: r in || dd\n" " dyndep = dd\n" "build out2: r in\n" ); fs_.Create("dd", "ninja_dyndep_version = 1\n" "build out1: dyndep | out2\n" ); string err; ASSERT_TRUE(GetNode("dd")->dyndep_pending()); EXPECT_TRUE(scan_.LoadDyndeps(GetNode("dd"), &err)); EXPECT_EQ("", err); EXPECT_FALSE(GetNode("dd")->dyndep_pending()); Edge* edge = GetNode("out1")->in_edge(); ASSERT_EQ(size_t(1), edge->outputs_.size()); EXPECT_EQ("out1", edge->outputs_[0]->path()); ASSERT_EQ(size_t(3), edge->inputs_.size()); EXPECT_EQ("in", edge->inputs_[0]->path()); EXPECT_EQ("out2", edge->inputs_[1]->path()); EXPECT_EQ("dd", edge->inputs_[2]->path()); EXPECT_EQ(1, edge->implicit_deps_); EXPECT_EQ(1, edge->order_only_deps_); EXPECT_FALSE(edge->GetBindingBool("restat")); } TEST_F(GraphTest, DyndepLoadMissingFile) { AssertParse(&state_, "rule r\n" " command = unused\n" "build out: r in || dd\n" " dyndep = dd\n" ); string err; ASSERT_TRUE(GetNode("dd")->dyndep_pending()); EXPECT_FALSE(scan_.LoadDyndeps(GetNode("dd"), &err)); EXPECT_EQ("loading 'dd': No such file or directory", err); } TEST_F(GraphTest, DyndepLoadMissingEntry) { AssertParse(&state_, "rule r\n" " command = unused\n" "build out: r in || dd\n" " dyndep = dd\n" ); fs_.Create("dd", "ninja_dyndep_version = 1\n" ); string err; ASSERT_TRUE(GetNode("dd")->dyndep_pending()); EXPECT_FALSE(scan_.LoadDyndeps(GetNode("dd"), &err)); EXPECT_EQ("'out' not mentioned in its dyndep file 'dd'", err); } TEST_F(GraphTest, DyndepLoadExtraEntry) { AssertParse(&state_, "rule r\n" " command = unused\n" "build out: r in || dd\n" " dyndep = dd\n" "build out2: r in || dd\n" ); fs_.Create("dd", "ninja_dyndep_version = 1\n" "build out: dyndep\n" "build out2: dyndep\n" ); string err; ASSERT_TRUE(GetNode("dd")->dyndep_pending()); EXPECT_FALSE(scan_.LoadDyndeps(GetNode("dd"), &err)); EXPECT_EQ("dyndep file 'dd' mentions output 'out2' whose build statement " "does not have a dyndep binding for the file", err); } TEST_F(GraphTest, DyndepLoadOutputWithMultipleRules1) { AssertParse(&state_, "rule r\n" " command = unused\n" "build out1 | out-twice.imp: r in1\n" "build out2: r in2 || dd\n" " dyndep = dd\n" ); fs_.Create("dd", "ninja_dyndep_version = 1\n" "build out2 | out-twice.imp: dyndep\n" ); string err; ASSERT_TRUE(GetNode("dd")->dyndep_pending()); EXPECT_FALSE(scan_.LoadDyndeps(GetNode("dd"), &err)); EXPECT_EQ("multiple rules generate out-twice.imp", err); } TEST_F(GraphTest, DyndepLoadOutputWithMultipleRules2) { AssertParse(&state_, "rule r\n" " command = unused\n" "build out1: r in1 || dd1\n" " dyndep = dd1\n" "build out2: r in2 || dd2\n" " dyndep = dd2\n" ); fs_.Create("dd1", "ninja_dyndep_version = 1\n" "build out1 | out-twice.imp: dyndep\n" ); fs_.Create("dd2", "ninja_dyndep_version = 1\n" "build out2 | out-twice.imp: dyndep\n" ); string err; ASSERT_TRUE(GetNode("dd1")->dyndep_pending()); EXPECT_TRUE(scan_.LoadDyndeps(GetNode("dd1"), &err)); EXPECT_EQ("", err); ASSERT_TRUE(GetNode("dd2")->dyndep_pending()); EXPECT_FALSE(scan_.LoadDyndeps(GetNode("dd2"), &err)); EXPECT_EQ("multiple rules generate out-twice.imp", err); } TEST_F(GraphTest, DyndepLoadMultiple) { AssertParse(&state_, "rule r\n" " command = unused\n" "build out1: r in1 || dd\n" " dyndep = dd\n" "build out2: r in2 || dd\n" " dyndep = dd\n" "build outNot: r in3 || dd\n" ); fs_.Create("dd", "ninja_dyndep_version = 1\n" "build out1 | out1imp: dyndep | in1imp\n" "build out2: dyndep | in2imp\n" " restat = 1\n" ); string err; ASSERT_TRUE(GetNode("dd")->dyndep_pending()); EXPECT_TRUE(scan_.LoadDyndeps(GetNode("dd"), &err)); EXPECT_EQ("", err); EXPECT_FALSE(GetNode("dd")->dyndep_pending()); Edge* edge1 = GetNode("out1")->in_edge(); ASSERT_EQ(size_t(2), edge1->outputs_.size()); EXPECT_EQ("out1", edge1->outputs_[0]->path()); EXPECT_EQ("out1imp", edge1->outputs_[1]->path()); EXPECT_EQ(1, edge1->implicit_outs_); ASSERT_EQ(size_t(3), edge1->inputs_.size()); EXPECT_EQ("in1", edge1->inputs_[0]->path()); EXPECT_EQ("in1imp", edge1->inputs_[1]->path()); EXPECT_EQ("dd", edge1->inputs_[2]->path()); EXPECT_EQ(1, edge1->implicit_deps_); EXPECT_EQ(1, edge1->order_only_deps_); EXPECT_FALSE(edge1->GetBindingBool("restat")); EXPECT_EQ(edge1, GetNode("out1imp")->in_edge()); Node* in1imp = GetNode("in1imp"); ASSERT_EQ(size_t(1), in1imp->out_edges().size()); EXPECT_EQ(edge1, in1imp->out_edges()[0]); Edge* edge2 = GetNode("out2")->in_edge(); ASSERT_EQ(size_t(1), edge2->outputs_.size()); EXPECT_EQ("out2", edge2->outputs_[0]->path()); EXPECT_EQ(0, edge2->implicit_outs_); ASSERT_EQ(size_t(3), edge2->inputs_.size()); EXPECT_EQ("in2", edge2->inputs_[0]->path()); EXPECT_EQ("in2imp", edge2->inputs_[1]->path()); EXPECT_EQ("dd", edge2->inputs_[2]->path()); EXPECT_EQ(1, edge2->implicit_deps_); EXPECT_EQ(1, edge2->order_only_deps_); EXPECT_TRUE(edge2->GetBindingBool("restat")); Node* in2imp = GetNode("in2imp"); ASSERT_EQ(size_t(1), in2imp->out_edges().size()); EXPECT_EQ(edge2, in2imp->out_edges()[0]); } TEST_F(GraphTest, DyndepFileMissing) { AssertParse(&state_, "rule r\n" " command = unused\n" "build out: r || dd\n" " dyndep = dd\n" ); string err; EXPECT_FALSE(scan_.RecomputeDirty(GetNode("out"), NULL, &err)); ASSERT_EQ("loading 'dd': No such file or directory", err); } TEST_F(GraphTest, DyndepFileError) { AssertParse(&state_, "rule r\n" " command = unused\n" "build out: r || dd\n" " dyndep = dd\n" ); fs_.Create("dd", "ninja_dyndep_version = 1\n" ); string err; EXPECT_FALSE(scan_.RecomputeDirty(GetNode("out"), NULL, &err)); ASSERT_EQ("'out' not mentioned in its dyndep file 'dd'", err); } TEST_F(GraphTest, DyndepImplicitInputNewer) { AssertParse(&state_, "rule r\n" " command = unused\n" "build out: r || dd\n" " dyndep = dd\n" ); fs_.Create("dd", "ninja_dyndep_version = 1\n" "build out: dyndep | in\n" ); fs_.Create("out", ""); fs_.Tick(); fs_.Create("in", ""); string err; EXPECT_TRUE(scan_.RecomputeDirty(GetNode("out"), NULL, &err)); ASSERT_EQ("", err); EXPECT_FALSE(GetNode("in")->dirty()); EXPECT_FALSE(GetNode("dd")->dirty()); // "out" is dirty due to dyndep-specified implicit input EXPECT_TRUE(GetNode("out")->dirty()); } TEST_F(GraphTest, DyndepFileReady) { AssertParse(&state_, "rule r\n" " command = unused\n" "build dd: r dd-in\n" "build out: r || dd\n" " dyndep = dd\n" ); fs_.Create("dd-in", ""); fs_.Create("dd", "ninja_dyndep_version = 1\n" "build out: dyndep | in\n" ); fs_.Create("out", ""); fs_.Tick(); fs_.Create("in", ""); string err; EXPECT_TRUE(scan_.RecomputeDirty(GetNode("out"), NULL, &err)); ASSERT_EQ("", err); EXPECT_FALSE(GetNode("in")->dirty()); EXPECT_FALSE(GetNode("dd")->dirty()); EXPECT_TRUE(GetNode("dd")->in_edge()->outputs_ready()); // "out" is dirty due to dyndep-specified implicit input EXPECT_TRUE(GetNode("out")->dirty()); } TEST_F(GraphTest, DyndepFileNotClean) { AssertParse(&state_, "rule r\n" " command = unused\n" "build dd: r dd-in\n" "build out: r || dd\n" " dyndep = dd\n" ); fs_.Create("dd", "this-should-not-be-loaded"); fs_.Tick(); fs_.Create("dd-in", ""); fs_.Create("out", ""); string err; EXPECT_TRUE(scan_.RecomputeDirty(GetNode("out"), NULL, &err)); ASSERT_EQ("", err); EXPECT_TRUE(GetNode("dd")->dirty()); EXPECT_FALSE(GetNode("dd")->in_edge()->outputs_ready()); // "out" is clean but not ready since "dd" is not ready EXPECT_FALSE(GetNode("out")->dirty()); EXPECT_FALSE(GetNode("out")->in_edge()->outputs_ready()); } TEST_F(GraphTest, DyndepFileNotReady) { AssertParse(&state_, "rule r\n" " command = unused\n" "build tmp: r\n" "build dd: r dd-in || tmp\n" "build out: r || dd\n" " dyndep = dd\n" ); fs_.Create("dd", "this-should-not-be-loaded"); fs_.Create("dd-in", ""); fs_.Tick(); fs_.Create("out", ""); string err; EXPECT_TRUE(scan_.RecomputeDirty(GetNode("out"), NULL, &err)); ASSERT_EQ("", err); EXPECT_FALSE(GetNode("dd")->dirty()); EXPECT_FALSE(GetNode("dd")->in_edge()->outputs_ready()); EXPECT_FALSE(GetNode("out")->dirty()); EXPECT_FALSE(GetNode("out")->in_edge()->outputs_ready()); } TEST_F(GraphTest, DyndepFileSecondNotReady) { AssertParse(&state_, "rule r\n" " command = unused\n" "build dd1: r dd1-in\n" "build dd2-in: r || dd1\n" " dyndep = dd1\n" "build dd2: r dd2-in\n" "build out: r || dd2\n" " dyndep = dd2\n" ); fs_.Create("dd1", ""); fs_.Create("dd2", ""); fs_.Create("dd2-in", ""); fs_.Tick(); fs_.Create("dd1-in", ""); fs_.Create("out", ""); string err; EXPECT_TRUE(scan_.RecomputeDirty(GetNode("out"), NULL, &err)); ASSERT_EQ("", err); EXPECT_TRUE(GetNode("dd1")->dirty()); EXPECT_FALSE(GetNode("dd1")->in_edge()->outputs_ready()); EXPECT_FALSE(GetNode("dd2")->dirty()); EXPECT_FALSE(GetNode("dd2")->in_edge()->outputs_ready()); EXPECT_FALSE(GetNode("out")->dirty()); EXPECT_FALSE(GetNode("out")->in_edge()->outputs_ready()); } TEST_F(GraphTest, DyndepFileCircular) { AssertParse(&state_, "rule r\n" " command = unused\n" "build out: r in || dd\n" " depfile = out.d\n" " dyndep = dd\n" "build in: r circ\n" ); fs_.Create("out.d", "out: inimp\n"); fs_.Create("dd", "ninja_dyndep_version = 1\n" "build out | circ: dyndep\n" ); fs_.Create("out", ""); Edge* edge = GetNode("out")->in_edge(); string err; EXPECT_FALSE(scan_.RecomputeDirty(GetNode("out"), NULL, &err)); EXPECT_EQ("dependency cycle: circ -> in -> circ", err); // Verify that "out.d" was loaded exactly once despite // circular reference discovered from dyndep file. ASSERT_EQ(size_t(3), edge->inputs_.size()); EXPECT_EQ("in", edge->inputs_[0]->path()); EXPECT_EQ("inimp", edge->inputs_[1]->path()); EXPECT_EQ("dd", edge->inputs_[2]->path()); EXPECT_EQ(1, edge->implicit_deps_); EXPECT_EQ(1, edge->order_only_deps_); } TEST_F(GraphTest, Validation) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "build out: cat in |@ validate\n" "build validate: cat in\n")); fs_.Create("in", ""); string err; std::vector validation_nodes; EXPECT_TRUE(scan_.RecomputeDirty(GetNode("out"), &validation_nodes, &err)); ASSERT_EQ("", err); ASSERT_EQ(validation_nodes.size(), size_t(1)); EXPECT_EQ(validation_nodes[0]->path(), "validate"); EXPECT_TRUE(GetNode("out")->dirty()); EXPECT_TRUE(GetNode("validate")->dirty()); } // Check that phony's dependencies' mtimes are propagated. TEST_F(GraphTest, PhonyDepsMtimes) { string err; ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule touch\n" " command = touch $out\n" "build in_ph: phony in1\n" "build out1: touch in_ph\n" )); fs_.Create("in1", ""); fs_.Create("out1", ""); Node* out1 = GetNode("out1"); Node* in1 = GetNode("in1"); EXPECT_TRUE(scan_.RecomputeDirty(out1, NULL, &err)); EXPECT_TRUE(!out1->dirty()); // Get the mtime of out1 ASSERT_TRUE(in1->Stat(&fs_, &err)); ASSERT_TRUE(out1->Stat(&fs_, &err)); TimeStamp out1Mtime1 = out1->mtime(); TimeStamp in1Mtime1 = in1->mtime(); // Touch in1. This should cause out1 to be dirty state_.Reset(); fs_.Tick(); fs_.Create("in1", ""); ASSERT_TRUE(in1->Stat(&fs_, &err)); EXPECT_GT(in1->mtime(), in1Mtime1); EXPECT_TRUE(scan_.RecomputeDirty(out1, NULL, &err)); EXPECT_GT(in1->mtime(), in1Mtime1); EXPECT_EQ(out1->mtime(), out1Mtime1); EXPECT_TRUE(out1->dirty()); } // Test that EdgeQueue correctly prioritizes by critical time TEST_F(GraphTest, EdgeQueuePriority) { ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, "rule r\n" " command = unused\n" "build out1: r in1\n" "build out2: r in2\n" "build out3: r in3\n" )); const int n_edges = 3; Edge *(edges)[n_edges] = { GetNode("out1")->in_edge(), GetNode("out2")->in_edge(), GetNode("out3")->in_edge(), }; // Output is largest critical time to smallest for (int i = 0; i < n_edges; ++i) { edges[i]->set_critical_path_weight(i * 10); } EdgePriorityQueue queue; for (int i = 0; i < n_edges; ++i) { queue.push(edges[i]); } EXPECT_EQ(queue.size(), static_cast(n_edges)); for (int i = 0; i < n_edges; ++i) { EXPECT_EQ(queue.top(), edges[n_edges - 1 - i]); queue.pop(); } EXPECT_TRUE(queue.empty()); // When there is ambiguity, the lowest edge id comes first for (int i = 0; i < n_edges; ++i) { edges[i]->set_critical_path_weight(0); } queue.push(edges[1]); queue.push(edges[2]); queue.push(edges[0]); for (int i = 0; i < n_edges; ++i) { EXPECT_EQ(queue.top(), edges[i]); queue.pop(); } EXPECT_TRUE(queue.empty()); } ninja-1.13.2/src/graphviz.cc000066400000000000000000000052021510764045400156320ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "graphviz.h" #include #include #include "dyndep.h" #include "graph.h" using namespace std; void GraphViz::AddTarget(Node* node) { if (visited_nodes_.find(node) != visited_nodes_.end()) return; string pathstr = node->path(); replace(pathstr.begin(), pathstr.end(), '\\', '/'); printf("\"%p\" [label=\"%s\"]\n", node, pathstr.c_str()); visited_nodes_.insert(node); Edge* edge = node->in_edge(); if (!edge) { // Leaf node. // Draw as a rect? return; } if (visited_edges_.find(edge) != visited_edges_.end()) return; visited_edges_.insert(edge); if (edge->dyndep_ && edge->dyndep_->dyndep_pending()) { std::string err; if (!dyndep_loader_.LoadDyndeps(edge->dyndep_, &err)) { Warning("%s\n", err.c_str()); } } if (edge->inputs_.size() == 1 && edge->outputs_.size() == 1) { // Can draw simply. // Note extra space before label text -- this is cosmetic and feels // like a graphviz bug. printf("\"%p\" -> \"%p\" [label=\" %s\"]\n", edge->inputs_[0], edge->outputs_[0], edge->rule_->name().c_str()); } else { printf("\"%p\" [label=\"%s\", shape=ellipse]\n", edge, edge->rule_->name().c_str()); for (vector::iterator out = edge->outputs_.begin(); out != edge->outputs_.end(); ++out) { printf("\"%p\" -> \"%p\"\n", edge, *out); } for (vector::iterator in = edge->inputs_.begin(); in != edge->inputs_.end(); ++in) { const char* order_only = ""; if (edge->is_order_only(in - edge->inputs_.begin())) order_only = " style=dotted"; printf("\"%p\" -> \"%p\" [arrowhead=none%s]\n", (*in), edge, order_only); } } for (vector::iterator in = edge->inputs_.begin(); in != edge->inputs_.end(); ++in) { AddTarget(*in); } } void GraphViz::Start() { printf("digraph ninja {\n"); printf("rankdir=\"LR\"\n"); printf("node [fontsize=10, shape=box, height=0.25]\n"); printf("edge [fontsize=10]\n"); } void GraphViz::Finish() { printf("}\n"); } ninja-1.13.2/src/graphviz.h000066400000000000000000000022011510764045400154700ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef NINJA_GRAPHVIZ_H_ #define NINJA_GRAPHVIZ_H_ #include #include "dyndep.h" #include "graph.h" struct DiskInterface; struct Node; struct Edge; struct State; /// Runs the process of creating GraphViz .dot file output. struct GraphViz { GraphViz(State* state, DiskInterface* disk_interface) : dyndep_loader_(state, disk_interface) {} void Start(); void AddTarget(Node* node); void Finish(); DyndepLoader dyndep_loader_; std::set visited_nodes_; EdgeSet visited_edges_; }; #endif // NINJA_GRAPHVIZ_H_ ninja-1.13.2/src/hash_collision_bench.cc000066400000000000000000000035471510764045400201470ustar00rootroot00000000000000// Copyright 2012 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "build_log.h" #include #include #include using namespace std; int random(int low, int high) { return int(low + (rand() / double(RAND_MAX)) * (high - low) + 0.5); } void RandomCommand(char** s) { int len = random(5, 100); *s = new char[len+1]; for (int i = 0; i < len; ++i) (*s)[i] = (char)random(32, 127); (*s)[len] = '\0'; } int main() { const int N = 20 * 1000 * 1000; // Leak these, else 10% of the runtime is spent destroying strings. char** commands = new char*[N]; pair* hashes = new pair[N]; srand((int)time(NULL)); for (int i = 0; i < N; ++i) { RandomCommand(&commands[i]); hashes[i] = make_pair(BuildLog::LogEntry::HashCommand(commands[i]), i); } sort(hashes, hashes + N); int collision_count = 0; for (int i = 1; i < N; ++i) { if (hashes[i - 1].first == hashes[i].first) { if (strcmp(commands[hashes[i - 1].second], commands[hashes[i].second]) != 0) { printf("collision!\n string 1: '%s'\n string 2: '%s'\n", commands[hashes[i - 1].second], commands[hashes[i].second]); collision_count++; } } } printf("\n\n%d collisions after %d runs\n", collision_count, N); } ninja-1.13.2/src/hash_map.h000066400000000000000000000025741510764045400154330ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef NINJA_MAP_H_ #define NINJA_MAP_H_ #include #include #include "string_piece.h" #include "util.h" #include "third_party/emhash/hash_table8.hpp" #include "third_party/rapidhash/rapidhash.h" namespace std { template<> struct hash { typedef StringPiece argument_type; typedef size_t result_type; size_t operator()(StringPiece key) const { return rapidhash(key.str_, key.len_); } }; } /// A template for hash_maps keyed by a StringPiece whose string is /// owned externally (typically by the values). Use like: /// ExternalStringHash::Type foos; to make foos into a hash /// mapping StringPiece => Foo*. template struct ExternalStringHashMap { typedef emhash8::HashMap Type; }; #endif // NINJA_MAP_H_ ninja-1.13.2/src/includes_normalize-win32.cc000066400000000000000000000132471510764045400206360ustar00rootroot00000000000000// Copyright 2012 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "includes_normalize.h" #include "string_piece.h" #include "string_piece_util.h" #include "util.h" #include #include #include #include using namespace std; namespace { bool InternalGetFullPathName(const StringPiece& file_name, char* buffer, size_t buffer_length, string *err) { DWORD result_size = GetFullPathNameA(file_name.AsString().c_str(), buffer_length, buffer, NULL); if (result_size == 0) { *err = "GetFullPathNameA(" + file_name.AsString() + "): " + GetLastErrorString(); return false; } else if (result_size > buffer_length) { *err = "path too long"; return false; } return true; } bool IsPathSeparator(char c) { return c == '/' || c == '\\'; } // Return true if paths a and b are on the same windows drive. // Return false if this function cannot check // whether or not on the same windows drive. bool SameDriveFast(StringPiece a, StringPiece b) { if (a.size() < 3 || b.size() < 3) { return false; } if (!islatinalpha(a[0]) || !islatinalpha(b[0])) { return false; } if (ToLowerASCII(a[0]) != ToLowerASCII(b[0])) { return false; } if (a[1] != ':' || b[1] != ':') { return false; } return IsPathSeparator(a[2]) && IsPathSeparator(b[2]); } // Return true if paths a and b are on the same Windows drive. bool SameDrive(StringPiece a, StringPiece b, string* err) { if (SameDriveFast(a, b)) { return true; } char a_absolute[_MAX_PATH]; char b_absolute[_MAX_PATH]; if (!InternalGetFullPathName(a, a_absolute, sizeof(a_absolute), err)) { return false; } if (!InternalGetFullPathName(b, b_absolute, sizeof(b_absolute), err)) { return false; } char a_drive[_MAX_DIR]; char b_drive[_MAX_DIR]; _splitpath(a_absolute, a_drive, NULL, NULL, NULL); _splitpath(b_absolute, b_drive, NULL, NULL, NULL); return _stricmp(a_drive, b_drive) == 0; } // Check path |s| is FullPath style returned by GetFullPathName. // This ignores difference of path separator. // This is used not to call very slow GetFullPathName API. bool IsFullPathName(StringPiece s) { if (s.size() < 3 || !islatinalpha(s[0]) || s[1] != ':' || !IsPathSeparator(s[2])) { return false; } // Check "." or ".." is contained in path. for (size_t i = 2; i < s.size(); ++i) { if (!IsPathSeparator(s[i])) { continue; } // Check ".". if (i + 1 < s.size() && s[i+1] == '.' && (i + 2 >= s.size() || IsPathSeparator(s[i+2]))) { return false; } // Check "..". if (i + 2 < s.size() && s[i+1] == '.' && s[i+2] == '.' && (i + 3 >= s.size() || IsPathSeparator(s[i+3]))) { return false; } } return true; } } // anonymous namespace IncludesNormalize::IncludesNormalize(const string& relative_to) { string err; relative_to_ = AbsPath(relative_to, &err); if (!err.empty()) { Fatal("Initializing IncludesNormalize(): %s", err.c_str()); } split_relative_to_ = SplitStringPiece(relative_to_, '/'); } string IncludesNormalize::AbsPath(StringPiece s, string* err) { if (IsFullPathName(s)) { string result = s.AsString(); for (size_t i = 0; i < result.size(); ++i) { if (result[i] == '\\') { result[i] = '/'; } } return result; } char result[_MAX_PATH]; if (!InternalGetFullPathName(s, result, sizeof(result), err)) { return ""; } for (char* c = result; *c; ++c) if (*c == '\\') *c = '/'; return result; } string IncludesNormalize::Relativize( StringPiece path, const vector& start_list, string* err) { string abs_path = AbsPath(path, err); if (!err->empty()) return ""; vector path_list = SplitStringPiece(abs_path, '/'); int i; for (i = 0; i < static_cast(min(start_list.size(), path_list.size())); ++i) { if (!EqualsCaseInsensitiveASCII(start_list[i], path_list[i])) { break; } } vector rel_list; rel_list.reserve(start_list.size() - i + path_list.size() - i); for (int j = 0; j < static_cast(start_list.size() - i); ++j) rel_list.push_back(".."); for (int j = i; j < static_cast(path_list.size()); ++j) rel_list.push_back(path_list[j]); if (rel_list.size() == 0) return "."; return JoinStringPiece(rel_list, '/'); } bool IncludesNormalize::Normalize(const string& input, string* result, string* err) const { char copy[_MAX_PATH + 1]; size_t len = input.size(); if (len > _MAX_PATH) { *err = "path too long"; return false; } strncpy(copy, input.c_str(), input.size() + 1); uint64_t slash_bits; CanonicalizePath(copy, &len, &slash_bits); StringPiece partially_fixed(copy, len); string abs_input = AbsPath(partially_fixed, err); if (!err->empty()) return false; if (!SameDrive(abs_input, relative_to_, err)) { if (!err->empty()) return false; *result = partially_fixed.AsString(); return true; } *result = Relativize(abs_input, split_relative_to_, err); if (!err->empty()) return false; return true; } ninja-1.13.2/src/includes_normalize.h000066400000000000000000000032461510764045400175360ustar00rootroot00000000000000// Copyright 2012 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef INCLUDES_NORMALIZE_H_ #define INCLUDES_NORMALIZE_H_ #include #include struct StringPiece; /// Utility functions for normalizing include paths on Windows. /// TODO: this likely duplicates functionality of CanonicalizePath; refactor. struct IncludesNormalize { /// Normalize path relative to |relative_to|. IncludesNormalize(const std::string& relative_to); // Internal utilities made available for testing, maybe useful otherwise. static std::string AbsPath(StringPiece s, std::string* err); static std::string Relativize(StringPiece path, const std::vector& start_list, std::string* err); /// Normalize by fixing slashes style, fixing redundant .. and . and makes the /// path |input| relative to |this->relative_to_| and store to |result|. bool Normalize(const std::string& input, std::string* result, std::string* err) const; private: std::string relative_to_; std::vector split_relative_to_; }; #endif // INCLUDES_NORMALIZE_H_ ninja-1.13.2/src/includes_normalize_test.cc000066400000000000000000000141601510764045400207300ustar00rootroot00000000000000// Copyright 2012 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "includes_normalize.h" #include #include #include "string_piece_util.h" #include "test.h" #include "util.h" using namespace std; namespace { string GetCurDir() { char buf[_MAX_PATH]; _getcwd(buf, sizeof(buf)); vector parts = SplitStringPiece(buf, '\\'); return parts[parts.size() - 1].AsString(); } string NormalizeAndCheckNoError(const string& input) { string result, err; IncludesNormalize normalizer("."); EXPECT_TRUE(normalizer.Normalize(input, &result, &err)); EXPECT_EQ("", err); return result; } string NormalizeRelativeAndCheckNoError(const string& input, const string& relative_to) { string result, err; IncludesNormalize normalizer(relative_to); EXPECT_TRUE(normalizer.Normalize(input, &result, &err)); EXPECT_EQ("", err); return result; } } // namespace TEST(IncludesNormalize, Simple) { EXPECT_EQ("b", NormalizeAndCheckNoError("a\\..\\b")); EXPECT_EQ("b", NormalizeAndCheckNoError("a\\../b")); EXPECT_EQ("a/b", NormalizeAndCheckNoError("a\\.\\b")); EXPECT_EQ("a/b", NormalizeAndCheckNoError("a\\./b")); } TEST(IncludesNormalize, WithRelative) { string err; string currentdir = GetCurDir(); EXPECT_EQ("c", NormalizeRelativeAndCheckNoError("a/b/c", "a/b")); EXPECT_EQ("a", NormalizeAndCheckNoError(IncludesNormalize::AbsPath("a", &err))); EXPECT_EQ("", err); EXPECT_EQ(string("../") + currentdir + string("/a"), NormalizeRelativeAndCheckNoError("a", "../b")); EXPECT_EQ(string("../") + currentdir + string("/a/b"), NormalizeRelativeAndCheckNoError("a/b", "../c")); EXPECT_EQ("../../a", NormalizeRelativeAndCheckNoError("a", "b/c")); EXPECT_EQ(".", NormalizeRelativeAndCheckNoError("a", "a")); } TEST(IncludesNormalize, Case) { EXPECT_EQ("b", NormalizeAndCheckNoError("Abc\\..\\b")); EXPECT_EQ("BdEf", NormalizeAndCheckNoError("Abc\\..\\BdEf")); EXPECT_EQ("A/b", NormalizeAndCheckNoError("A\\.\\b")); EXPECT_EQ("a/b", NormalizeAndCheckNoError("a\\./b")); EXPECT_EQ("A/B", NormalizeAndCheckNoError("A\\.\\B")); EXPECT_EQ("A/B", NormalizeAndCheckNoError("A\\./B")); } TEST(IncludesNormalize, DifferentDrive) { EXPECT_EQ("stuff.h", NormalizeRelativeAndCheckNoError("p:\\vs08\\stuff.h", "p:\\vs08")); EXPECT_EQ("stuff.h", NormalizeRelativeAndCheckNoError("P:\\Vs08\\stuff.h", "p:\\vs08")); EXPECT_EQ("p:/vs08/stuff.h", NormalizeRelativeAndCheckNoError("p:\\vs08\\stuff.h", "c:\\vs08")); EXPECT_EQ("P:/vs08/stufF.h", NormalizeRelativeAndCheckNoError( "P:\\vs08\\stufF.h", "D:\\stuff/things")); EXPECT_EQ("P:/vs08/stuff.h", NormalizeRelativeAndCheckNoError( "P:/vs08\\stuff.h", "D:\\stuff/things")); EXPECT_EQ("P:/wee/stuff.h", NormalizeRelativeAndCheckNoError("P:/vs08\\../wee\\stuff.h", "D:\\stuff/things")); } TEST(IncludesNormalize, LongInvalidPath) { const char kLongInputString[] = "C:\\Program Files (x86)\\Microsoft Visual Studio " "12.0\\VC\\INCLUDEwarning #31001: The dll for reading and writing the " "pdb (for example, mspdb110.dll) could not be found on your path. This " "is usually a configuration error. Compilation will continue using /Z7 " "instead of /Zi, but expect a similar error when you link your program."; // Too long, won't be canonicalized. Ensure doesn't crash. string result, err; IncludesNormalize normalizer("."); EXPECT_FALSE( normalizer.Normalize(kLongInputString, &result, &err)); EXPECT_EQ("path too long", err); // Construct max size path having cwd prefix. // kExactlyMaxPath = "$cwd\\a\\aaaa...aaaa\0"; char kExactlyMaxPath[_MAX_PATH + 1]; ASSERT_STRNE(_getcwd(kExactlyMaxPath, sizeof kExactlyMaxPath), NULL); int cwd_len = strlen(kExactlyMaxPath); ASSERT_LE(cwd_len + 3 + 1, _MAX_PATH); kExactlyMaxPath[cwd_len] = '\\'; kExactlyMaxPath[cwd_len + 1] = 'a'; kExactlyMaxPath[cwd_len + 2] = '\\'; kExactlyMaxPath[cwd_len + 3] = 'a'; for (int i = cwd_len + 4; i < _MAX_PATH; ++i) { if (i > cwd_len + 4 && i < _MAX_PATH - 1 && i % 10 == 0) kExactlyMaxPath[i] = '\\'; else kExactlyMaxPath[i] = 'a'; } kExactlyMaxPath[_MAX_PATH] = '\0'; // This is a relatively safe cast as we can expect that _MAX_PATH will never be negative EXPECT_EQ(strlen(kExactlyMaxPath), static_cast(_MAX_PATH)); string forward_slashes(kExactlyMaxPath); replace(forward_slashes.begin(), forward_slashes.end(), '\\', '/'); // Make sure a path that's exactly _MAX_PATH long is canonicalized. EXPECT_EQ(forward_slashes.substr(cwd_len + 1), NormalizeAndCheckNoError(kExactlyMaxPath)); } TEST(IncludesNormalize, ShortRelativeButTooLongAbsolutePath) { string result, err; IncludesNormalize normalizer("."); // A short path should work EXPECT_TRUE(normalizer.Normalize("a", &result, &err)); EXPECT_EQ("", err); // Construct max size path having cwd prefix. // kExactlyMaxPath = "aaaa\\aaaa...aaaa\0"; char kExactlyMaxPath[_MAX_PATH + 1]; for (int i = 0; i < _MAX_PATH; ++i) { if (i < _MAX_PATH - 1 && i % 10 == 4) kExactlyMaxPath[i] = '\\'; else kExactlyMaxPath[i] = 'a'; } kExactlyMaxPath[_MAX_PATH] = '\0'; EXPECT_EQ(strlen(kExactlyMaxPath), static_cast(_MAX_PATH)); // Make sure a path that's exactly _MAX_PATH long fails with a proper error. EXPECT_FALSE(normalizer.Normalize(kExactlyMaxPath, &result, &err)); EXPECT_TRUE(err.find("GetFullPathName") != string::npos); } ninja-1.13.2/src/inline.sh000077500000000000000000000024331510764045400153110ustar00rootroot00000000000000#!/bin/sh # # Copyright 2001 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This quick script converts a text file into an #include-able header. # It expects the name of the variable as its first argument, and reads # stdin and writes stdout. varname="$1" # 'od' and 'sed' may not be available on all platforms, and may not support the # flags used here. We must ensure that the script exits with a non-zero exit # code in those cases. byte_vals=$(od -t x1 -A n -v) || exit 1 escaped_byte_vals=$(echo "${byte_vals}" \ | sed -e 's|^[\t ]\{0,\}$||g; s|[\t ]\{1,\}| |g; s| \{1,\}$||g; s| |\\x|g; s|^|"|; s|$|"|') \ || exit 1 # Only write output once we have successfully generated the required data printf "const char %s[] = \n%s;" "${varname}" "${escaped_byte_vals}" ninja-1.13.2/src/jobserver-posix.cc000066400000000000000000000071521510764045400171470ustar00rootroot00000000000000// Copyright 2024 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include #include #include #include #include #include #include #include "jobserver.h" #include "util.h" namespace { // Return true if |fd| is a fifo or pipe descriptor. bool IsFifoDescriptor(int fd) { struct stat info; int ret = ::fstat(fd, &info); return (ret == 0) && ((info.st_mode & S_IFMT) == S_IFIFO); } // Implementation of Jobserver::Client for Posix systems class PosixJobserverClient : public Jobserver::Client { public: virtual ~PosixJobserverClient() { if (write_fd_ >= 0) ::close(write_fd_); if (read_fd_ >= 0) ::close(read_fd_); } Jobserver::Slot TryAcquire() override { if (has_implicit_slot_) { has_implicit_slot_ = false; return Jobserver::Slot::CreateImplicit(); } uint8_t slot_char = '\0'; ssize_t ret; do { ret = ::read(read_fd_, &slot_char, 1); } while (ret < 0 && errno == EINTR); if (ret == 1) { return Jobserver::Slot::CreateExplicit(slot_char); } return Jobserver::Slot(); } void Release(Jobserver::Slot slot) override { if (!slot.IsValid()) return; if (slot.IsImplicit()) { assert(!has_implicit_slot_ && "Implicit slot cannot be released twice!"); has_implicit_slot_ = true; return; } uint8_t slot_char = slot.GetExplicitValue(); ssize_t ret; do { ret = ::write(write_fd_, &slot_char, 1); } while (ret < 0 && errno == EINTR); (void)ret; // Nothing can be done in case of error here. } // Initialize with FIFO file path. bool InitWithFifo(const std::string& fifo_path, std::string* error) { if (fifo_path.empty()) { *error = "Empty fifo path"; return false; } read_fd_ = ::open(fifo_path.c_str(), O_RDONLY | O_NONBLOCK | O_CLOEXEC); if (read_fd_ < 0) { *error = std::string("Error opening fifo for reading: ") + strerror(errno); return false; } if (!IsFifoDescriptor(read_fd_)) { *error = "Not a fifo path: " + fifo_path; // Let destructor close read_fd_. return false; } write_fd_ = ::open(fifo_path.c_str(), O_WRONLY | O_NONBLOCK | O_CLOEXEC); if (write_fd_ < 0) { *error = std::string("Error opening fifo for writing: ") + strerror(errno); // Let destructor close read_fd_ return false; } return true; } private: // Set to true if the implicit slot has not been acquired yet. bool has_implicit_slot_ = true; // read and write descriptors. int read_fd_ = -1; int write_fd_ = -1; }; } // namespace // static std::unique_ptr Jobserver::Client::Create( const Jobserver::Config& config, std::string* error) { bool success = false; auto client = std::unique_ptr(new PosixJobserverClient); if (config.mode == Jobserver::Config::kModePosixFifo) { success = client->InitWithFifo(config.path, error); } else { *error = "Unsupported jobserver mode"; } if (!success) client.reset(); return client; } ninja-1.13.2/src/jobserver-win32.cc000066400000000000000000000060021510764045400167400ustar00rootroot00000000000000// Copyright 2024 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include #include #include "jobserver.h" #include "util.h" namespace { // Implementation of Jobserver::Client for Win32 systems. // At the moment, only the semaphore scheme is supported, // even when running under Cygwin which could support the // pipe version, in theory. class Win32JobserverClient : public Jobserver::Client { public: virtual ~Win32JobserverClient() { // NOTE: OpenSemaphore() returns NULL on failure. if (IsValid()) { ::CloseHandle(handle_); } } Jobserver::Slot TryAcquire() override { if (IsValid()) { if (has_implicit_slot_) { has_implicit_slot_ = false; return Jobserver::Slot::CreateImplicit(); } DWORD ret = ::WaitForSingleObject(handle_, 0); if (ret == WAIT_OBJECT_0) { // Hard-code value 1 for the explicit slot value. return Jobserver::Slot::CreateExplicit(1); } } return Jobserver::Slot(); } void Release(Jobserver::Slot slot) override { if (!slot.IsValid()) return; if (slot.IsImplicit()) { assert(!has_implicit_slot_ && "Implicit slot cannot be released twice!"); has_implicit_slot_ = true; return; } // Nothing can be done in case of error here. (void)::ReleaseSemaphore(handle_, 1, NULL); } bool InitWithSemaphore(const std::string& name, std::string* error) { handle_ = ::OpenSemaphoreA(SYNCHRONIZE | SEMAPHORE_MODIFY_STATE, FALSE, name.c_str()); if (handle_ == NULL) { *error = "Error opening semaphore: " + GetLastErrorString(); return false; } return true; } protected: bool IsValid() const { // NOTE: OpenSemaphore() returns NULL on failure, not INVALID_HANDLE_VALUE. return handle_ != NULL; } // Set to true if the implicit slot has not been acquired yet. bool has_implicit_slot_ = true; // Semaphore handle. NULL means not in use. HANDLE handle_ = NULL; }; } // namespace // static std::unique_ptr Jobserver::Client::Create( const Jobserver::Config& config, std::string* error) { bool success = false; auto client = std::unique_ptr(new Win32JobserverClient()); if (config.mode == Jobserver::Config::kModeWin32Semaphore) { success = client->InitWithSemaphore(config.path, error); } else { *error = "Unsupported jobserver mode"; } if (!success) client.reset(); return client; } ninja-1.13.2/src/jobserver.cc000066400000000000000000000160331510764045400160050ustar00rootroot00000000000000// Copyright 2024 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "jobserver.h" #include #include #include #include "string_piece.h" namespace { // If |input| starts with |prefix|, return true and sets |*value| to the rest // of the input. Otherwise return false. bool GetPrefixedValue(StringPiece input, StringPiece prefix, StringPiece* value) { assert(prefix.len_ > 0); if (input.len_ < prefix.len_ || memcmp(prefix.str_, input.str_, prefix.len_)) return false; *value = StringPiece(input.str_ + prefix.len_, input.len_ - prefix.len_); return true; } // Try to read a comma-separated pair of file descriptors from |input|. // On success return true and set |config->mode| accordingly. Otherwise return // false if the input doesn't follow the appropriate format. Note that the // values are not saved since pipe mode is not supported. bool GetFileDescriptorPair(StringPiece input, Jobserver::Config* config) { int read_fd = 1, write_fd = -1; std::string pair = input.AsString(); if (sscanf(pair.c_str(), "%d,%d", &read_fd, &write_fd) != 2) return false; // From // https://www.gnu.org/software/make/manual/html_node/POSIX-Jobserver.html Any // negative descriptor means the feature is disabled. if (read_fd < 0 || write_fd < 0) config->mode = Jobserver::Config::kModeNone; else config->mode = Jobserver::Config::kModePipe; return true; } } // namespace // static const int16_t Jobserver::Slot::kImplicitValue; uint8_t Jobserver::Slot::GetExplicitValue() const { assert(IsExplicit()); return static_cast(value_); } bool Jobserver::ParseMakeFlagsValue(const char* makeflags_env, Jobserver::Config* config, std::string* error) { *config = Config(); if (!makeflags_env || !makeflags_env[0]) { /// Return default Config instance with kModeNone if input is null or empty. return true; } // Decompose input into vector of space or tab separated string pieces. std::vector args; const char* p = makeflags_env; while (*p) { const char* next_space = strpbrk(p, " \t"); if (!next_space) { args.emplace_back(p); break; } if (next_space > p) args.emplace_back(p, next_space - p); p = next_space + 1; } // clang-format off // // From: // https://www.gnu.org/software/make/manual/html_node/POSIX-Jobserver.html // // """ // Your tool may also examine the first word of the MAKEFLAGS variable and // look for the character n. If this character is present then make was // invoked with the ‘-n’ option and your tool may want to stop without // performing any operations. // """ // // Where according to // https://www.gnu.org/software/make/manual/html_node/Options_002fRecursion.html // MAKEFLAGS begins with all "flag letters" passed to make. // // Experimentation shows that GNU Make 4.3, at least, will set MAKEFLAGS with // an initial space if no letter flag are passed to its invocation (except -j), // i.e.: // // make -ks --> MAKEFLAGS="ks" // make -j --> MAKEFLAGS=" -j" // make -ksj --> MAKEFLAGS="ks -j" // make -ks -j3 --> MAKEFLAGS="ks -j3 --jobserver-auth=3,4" // make -j3 --> MAKEFLAGS=" -j3 --jobserver-auth=3,4" // // However, other jobserver implementation will not, for example the one // at https://github.com/rust-lang/jobserver-rs will set MAKEFLAGS to just // "--jobserver-fds=R,W --jobserver-auth=R,W" instead, without an initial // space. // // Another implementation is from Rust's Cargo itself which will set it to // "-j --jobserver-fds=R,W --jobserver-auth=R,W". // // For the record --jobserver-fds=R,W is an old undocumented and deprecated // version of --jobserver-auth=R,W that was implemented by GNU Make before 4.2 // was released, and some tooling may depend on it. Hence it makes sense to // define both --jobserver-fds and --jobserver-auth at the same time, since // the last recognized one should win in client code. // // The initial space will have been stripped by the loop above, but we can // still support the requirement by ignoring the first arg if it begins with a // dash (-). // // clang-format on if (!args.empty() && args[0][0] != '-' && memchr(args[0].str_, 'n', args[0].len_) != nullptr) { return true; } // Loop over all arguments, the last one wins, except in case of errors. for (const auto& arg : args) { StringPiece value; // Handle --jobserver-auth=... here. if (GetPrefixedValue(arg, "--jobserver-auth=", &value)) { if (GetFileDescriptorPair(value, config)) { continue; } StringPiece fifo_path; if (GetPrefixedValue(value, "fifo:", &fifo_path)) { config->mode = Jobserver::Config::kModePosixFifo; config->path = fifo_path.AsString(); } else { config->mode = Jobserver::Config::kModeWin32Semaphore; config->path = value.AsString(); } continue; } // Handle --jobserver-fds which is an old undocumented variant of // --jobserver-auth that only accepts a pair of file descriptor. // This was replaced by --jobserver-auth=R,W in GNU Make 4.2. if (GetPrefixedValue(arg, "--jobserver-fds=", &value)) { if (!GetFileDescriptorPair(value, config)) { *error = "Invalid file descriptor pair [" + value.AsString() + "]"; return false; } config->mode = Jobserver::Config::kModePipe; continue; } // Ignore this argument. This assumes that MAKEFLAGS does not // use spaces to separate the option from its argument, e.g. // `--jobserver-auth `, which has been confirmed with // Make 4.3, even if it receives such a value in its own env. } return true; } bool Jobserver::ParseNativeMakeFlagsValue(const char* makeflags_env, Jobserver::Config* config, std::string* error) { if (!ParseMakeFlagsValue(makeflags_env, config, error)) return false; if (config->mode == Jobserver::Config::kModePipe) { *error = "Pipe-based protocol is not supported!"; return false; } #ifdef _WIN32 if (config->mode == Jobserver::Config::kModePosixFifo) { *error = "FIFO mode is not supported on Windows!"; return false; } #else // !_WIN32 if (config->mode == Jobserver::Config::kModeWin32Semaphore) { *error = "Semaphore mode is not supported on Posix!"; return false; } #endif // !_WIN32 return true; } ninja-1.13.2/src/jobserver.h000066400000000000000000000207021510764045400156450ustar00rootroot00000000000000// Copyright 2024 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include #include #include #include /// Jobserver provides types related to managing a pool of "job slots" /// using the GNU Make jobserver ptocol described at: /// /// https://www.gnu.org/software/make/manual/html_node/Job-Slots.html /// struct Jobserver { /// A Jobserver::Slot models a single job slot that can be acquired from. /// or released to a jobserver pool. This class is move-only, and can /// wrap three types of values: /// /// - An "invalid" value (the default), used to indicate errors, e.g. /// that no slot could be acquired from the pool. /// /// - The "implicit" value, used to model the job slot that is implicitly /// assigned to a jobserver client by the parent process that spawned /// it. /// /// - The "explicit" values, which correspond to an actual byte read from /// the slot pool's pipe (for Posix), or a semaphore decrement operation /// (for Windows). /// /// Use IsValid(), IsImplicit(), HasValue() to test for categories. /// /// TECHNICAL NOTE: This design complies with the requirements laid out /// on https://www.gnu.org/software/make/manual/html_node/POSIX-Jobserver.html /// which requires clients to write back the exact token values they /// received from a Posix pipe. /// /// Note that *currently* all pool implementations write the same token /// values to the pipe ('+' for GNU Make, and '|' for the Rust jobserver), /// and do not care about the values written back by clients. /// struct Slot { /// Default constructor creates invalid instance. Slot() = default; /// Move operations are allowed. Slot(Slot&& o) noexcept : value_(o.value_) { o.value_ = -1; } Slot& operator=(Slot&& o) noexcept { if (this != &o) { this->value_ = o.value_; o.value_ = -1; } return *this; } /// Copy operations are disallowed. Slot(const Slot&) = delete; Slot& operator=(const Slot&) = delete; /// Return true if this instance is valid, i.e. that it is either /// implicit or explicit job slot. bool IsValid() const { return value_ >= 0; } /// Return true if this instance represents an implicit job slot. bool IsImplicit() const { return value_ == kImplicitValue; } /// Return true if this instance represents an explicit job slot bool IsExplicit() const { return IsValid() && !IsImplicit(); } /// Return value of an explicit slot. It is a runtime error to call /// this from an invalid instance. uint8_t GetExplicitValue() const; /// Create instance for explicit byte value. static Slot CreateExplicit(uint8_t value) { return Slot(static_cast(value)); } /// Create instance for the implicit value. static Slot CreateImplicit() { return Slot(kImplicitValue); } private: Slot(int16_t value) : value_(value) {} static constexpr int16_t kImplicitValue = 256; int16_t value_ = -1; }; /// A Jobserver::Config models how to access or implement a GNU jobserver /// implementation. struct Config { /// Different implementation modes for the slot pool. /// /// kModeNone means there is no pool. /// /// kModePipe means that `--jobserver-auth=R,W` is used to /// pass a pair of file descriptors to client processes. This also /// matches `--jobserver-fds=R,W` which is an old undocumented /// variant of the same scheme. This mode is not supported by /// Ninja, but recognized by the parser. /// /// kModePosixFifo means that `--jobserver-auth=fifo:PATH` is used to /// pass the path of a Posix FIFO to client processes. This is not /// supported on Windows. Implemented by GNU Make 4.4 and above /// when `--jobserver-style=fifo` is used. /// /// kModeWin32Semaphore means that `--jobserver-auth=SEMAPHORE_NAME` is /// used to pass the name of a Win32 semaphore to client processes. /// This is not supported on Posix. /// /// kModeDefault is the default mode to enable on the current platform. /// This is an alias for kModeWin32Semaphore on Windows ,and /// kModePosixFifo on Posix. enum Mode { kModeNone = 0, kModePipe, kModePosixFifo, kModeWin32Semaphore, #ifdef _WIN32 kModeDefault = kModeWin32Semaphore, #else // _WIN32 kModeDefault = kModePosixFifo, #endif // _WIN32 }; /// Implementation mode for the pool. Mode mode = kModeNone; /// For kModeFifo, this is the path to the Unix FIFO to use. /// For kModeSemaphore, this is the name of the Win32 semaphore to use. std::string path; /// Return true if this instance matches an active implementation mode. /// This does not try to validate configuration parameters though. bool HasMode() { return mode != kModeNone; } }; /// Parse the value of a MAKEFLAGS environment variable. On success return /// true and set |*config|. On failure, return false and set |*error| to /// explain what's wrong. If |makeflags_env| is nullptr or an empty string, /// this returns success and sets |config->mode| to Config::kModeNone. static bool ParseMakeFlagsValue(const char* makeflags_env, Config* config, std::string* error); /// A variant of ParseMakeFlagsValue() that will return an error if the parsed /// result is not compatible with the native system. I.e.: /// /// --jobserver-auth=R,W is not supported on any system (but recognized to /// provide a relevant error message to the user). /// /// --jobserver-auth=NAME onlw works on Windows. /// /// --jobserver-auth=fifo:PATH only works on Posix. /// static bool ParseNativeMakeFlagsValue(const char* makeflags_env, Config* config, std::string* error); /// A Jobserver::Client instance models a client of an external GNU jobserver /// pool, which can be implemented as a Unix FIFO, or a Windows named /// semaphore. Usage is the following: /// /// - Call Jobserver::Client::Create(), passing a Config value as argument, /// (e.g. one initialized with ParseNativeMakeFlagsValue()) to create /// a new instance. /// /// - Call TryAcquire() to try to acquire a job slot from the pool. /// If the result is not an invalid slot, store it until the /// corresponding command completes, then call Release() to send it /// back to the pool. /// /// - It is important that all acquired slots are released to the pool, /// even if Ninja terminates early (e.g. due to a build command failing). /// class Client { public: virtual ~Client() {} /// Try to acquire a slot from the pool. On failure, i.e. if no slot /// can be acquired, this returns an invalid Token instance. /// /// Note that this will always return the implicit slot value the first /// time this is called, without reading anything from the pool, as /// specified by the protocol. This implicit value *must* be released /// just like any other one. In general, users of this class should not /// care about this detail, except unit-tests. virtual Slot TryAcquire() { return Slot(); } /// Release a slot to the pool. Does nothing if slot is invalid, /// or if writing to the pool fails (and if this is not the implicit slot). /// If the pool is destroyed before Ninja, then only the implicit slot /// can be acquired in the next calls (if it was released). This simply /// enforces serialization of all commands, instead of blocking. virtual void Release(Slot slot) {} /// Create a new Client instance from a given configuration. On failure, /// this returns null after setting |*error|. Note that it is an error to /// call this function with |config.HasMode() == false|. static std::unique_ptr Create(const Config&, std::string* error); protected: Client() = default; }; }; ninja-1.13.2/src/jobserver_test.cc000066400000000000000000000311261510764045400170440ustar00rootroot00000000000000// Copyright 2024 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "jobserver.h" #include "test.h" #ifndef _WIN32 #include #include #endif namespace { #ifndef _WIN32 struct ScopedTestFd { explicit ScopedTestFd(int fd) : fd_(fd) {} ~ScopedTestFd() { if (IsValid()) ::close(fd_); } bool IsValid() const { return fd_ >= 0; } int fd_ = -1; }; #endif // !_WIN32 } // namespace TEST(Jobserver, SlotTest) { // Default construction. Jobserver::Slot slot; EXPECT_FALSE(slot.IsValid()); // Construct implicit slot Jobserver::Slot slot0 = Jobserver::Slot::CreateImplicit(); EXPECT_TRUE(slot0.IsValid()); EXPECT_TRUE(slot0.IsImplicit()); EXPECT_FALSE(slot0.IsExplicit()); // Construct explicit slots auto slot1 = Jobserver::Slot::CreateExplicit(10u); EXPECT_TRUE(slot1.IsValid()); EXPECT_FALSE(slot1.IsImplicit()); EXPECT_TRUE(slot1.IsExplicit()); EXPECT_EQ(10u, slot1.GetExplicitValue()); auto slot2 = Jobserver::Slot::CreateExplicit(42u); EXPECT_TRUE(slot2.IsValid()); EXPECT_FALSE(slot2.IsImplicit()); EXPECT_TRUE(slot2.IsExplicit()); EXPECT_EQ(42u, slot2.GetExplicitValue()); // Move operation. slot2 = std::move(slot1); EXPECT_FALSE(slot1.IsValid()); EXPECT_TRUE(slot2.IsValid()); EXPECT_TRUE(slot2.IsExplicit()); ASSERT_EQ(10u, slot2.GetExplicitValue()); slot1 = std::move(slot0); EXPECT_FALSE(slot0.IsValid()); EXPECT_TRUE(slot1.IsValid()); EXPECT_TRUE(slot1.IsImplicit()); EXPECT_FALSE(slot1.IsExplicit()); } TEST(Jobserver, ParseMakeFlagsValue) { Jobserver::Config config; std::string error; // Passing nullptr does not crash. config = {}; error.clear(); ASSERT_TRUE(Jobserver::ParseMakeFlagsValue(nullptr, &config, &error)); EXPECT_EQ(Jobserver::Config::kModeNone, config.mode); // Passing an empty string does not crash. config = {}; error.clear(); ASSERT_TRUE(Jobserver::ParseMakeFlagsValue("", &config, &error)); EXPECT_EQ(Jobserver::Config::kModeNone, config.mode); // Passing a string that only contains whitespace does not crash. config = {}; error.clear(); ASSERT_TRUE(Jobserver::ParseMakeFlagsValue(" \t", &config, &error)); EXPECT_EQ(Jobserver::Config::kModeNone, config.mode); // Passing an `n` in the first word reports no mode. config = {}; error.clear(); ASSERT_TRUE(Jobserver::ParseMakeFlagsValue("kns --jobserver-auth=fifo:foo", &config, &error)); EXPECT_EQ(Jobserver::Config::kModeNone, config.mode); // Passing "--jobserver-auth=fifo:" works. config = {}; error.clear(); ASSERT_TRUE(Jobserver::ParseMakeFlagsValue("--jobserver-auth=fifo:foo", &config, &error)); EXPECT_EQ(Jobserver::Config::kModePosixFifo, config.mode); EXPECT_EQ("foo", config.path); // Passing an initial " -j" or " -j" works. config = {}; error.clear(); ASSERT_TRUE(Jobserver::ParseMakeFlagsValue(" -j --jobserver-auth=fifo:foo", &config, &error)); EXPECT_EQ(Jobserver::Config::kModePosixFifo, config.mode); EXPECT_EQ("foo", config.path); // Passing an initial " -j" works. config = {}; error.clear(); ASSERT_TRUE(Jobserver::ParseMakeFlagsValue(" -j10 --jobserver-auth=fifo:foo", &config, &error)); EXPECT_EQ(Jobserver::Config::kModePosixFifo, config.mode); EXPECT_EQ("foo", config.path); // Passing an `n` in the first word _after_ a dash works though, i.e. // It is not interpreted as GNU Make dry-run flag. config = {}; error.clear(); ASSERT_TRUE(Jobserver::ParseMakeFlagsValue( "-one-flag --jobserver-auth=fifo:foo", &config, &error)); EXPECT_EQ(Jobserver::Config::kModePosixFifo, config.mode); config = {}; error.clear(); ASSERT_TRUE(Jobserver::ParseMakeFlagsValue("--jobserver-auth=semaphore_name", &config, &error)); EXPECT_EQ(Jobserver::Config::kModeWin32Semaphore, config.mode); EXPECT_EQ("semaphore_name", config.path); config = {}; error.clear(); ASSERT_TRUE(Jobserver::ParseMakeFlagsValue("--jobserver-auth=10,42", &config, &error)); EXPECT_EQ(Jobserver::Config::kModePipe, config.mode); config = {}; error.clear(); ASSERT_TRUE(Jobserver::ParseMakeFlagsValue("--jobserver-auth=-1,42", &config, &error)); EXPECT_EQ(Jobserver::Config::kModeNone, config.mode); config = {}; error.clear(); ASSERT_TRUE(Jobserver::ParseMakeFlagsValue("--jobserver-auth=10,-42", &config, &error)); EXPECT_EQ(Jobserver::Config::kModeNone, config.mode); config = {}; error.clear(); ASSERT_TRUE(Jobserver::ParseMakeFlagsValue( "--jobserver-auth=10,42 --jobserver-fds=12,44 " "--jobserver-auth=fifo:/tmp/fifo", &config, &error)); EXPECT_EQ(Jobserver::Config::kModePosixFifo, config.mode); EXPECT_EQ("/tmp/fifo", config.path); config = {}; error.clear(); ASSERT_FALSE( Jobserver::ParseMakeFlagsValue("--jobserver-fds=10,", &config, &error)); EXPECT_EQ("Invalid file descriptor pair [10,]", error); } TEST(Jobserver, ParseNativeMakeFlagsValue) { Jobserver::Config config; std::string error; // --jobserver-auth=R,W is not supported. config = {}; error.clear(); EXPECT_FALSE(Jobserver::ParseNativeMakeFlagsValue("--jobserver-auth=3,4", &config, &error)); EXPECT_EQ(error, "Pipe-based protocol is not supported!"); #ifdef _WIN32 // --jobserver-auth=NAME works on Windows. config = {}; error.clear(); ASSERT_TRUE(Jobserver::ParseNativeMakeFlagsValue( "--jobserver-auth=semaphore_name", &config, &error)); EXPECT_EQ(Jobserver::Config::kModeWin32Semaphore, config.mode); EXPECT_EQ("semaphore_name", config.path); // --jobserver-auth=fifo:PATH does not work on Windows. config = {}; error.clear(); ASSERT_FALSE(Jobserver::ParseNativeMakeFlagsValue("--jobserver-auth=fifo:foo", &config, &error)); EXPECT_EQ(error, "FIFO mode is not supported on Windows!"); #else // !_WIN32 // --jobserver-auth=NAME does not work on Posix config = {}; error.clear(); ASSERT_FALSE(Jobserver::ParseNativeMakeFlagsValue( "--jobserver-auth=semaphore_name", &config, &error)); EXPECT_EQ(error, "Semaphore mode is not supported on Posix!"); // --jobserver-auth=fifo:PATH works on Posix config = {}; error.clear(); ASSERT_TRUE(Jobserver::ParseNativeMakeFlagsValue("--jobserver-auth=fifo:foo", &config, &error)); EXPECT_EQ(Jobserver::Config::kModePosixFifo, config.mode); EXPECT_EQ("foo", config.path); #endif // !_WIN32 } TEST(Jobserver, NullJobserver) { Jobserver::Config config; ASSERT_EQ(Jobserver::Config::kModeNone, config.mode); std::string error; std::unique_ptr client = Jobserver::Client::Create(config, &error); EXPECT_FALSE(client.get()); EXPECT_EQ("Unsupported jobserver mode", error); } #ifdef _WIN32 #include // Scoped HANDLE class for the semaphore. struct ScopedSemaphoreHandle { ScopedSemaphoreHandle(HANDLE handle) : handle_(handle) {} ~ScopedSemaphoreHandle() { if (handle_) ::CloseHandle(handle_); } HANDLE get() const { return handle_; } private: HANDLE handle_ = NULL; }; TEST(Jobserver, Win32SemaphoreClient) { // Create semaphore with initial token count. const size_t kExplicitCount = 10; const char kSemaphoreName[] = "ninja_test_jobserver_semaphore"; ScopedSemaphoreHandle handle( ::CreateSemaphoreA(NULL, static_cast(kExplicitCount), static_cast(kExplicitCount), kSemaphoreName)); ASSERT_TRUE(handle.get()) << GetLastErrorString(); // Create new client instance. Jobserver::Config config; config.mode = Jobserver::Config::kModeWin32Semaphore; config.path = kSemaphoreName; std::string error; std::unique_ptr client = Jobserver::Client::Create(config, &error); EXPECT_TRUE(client.get()) << error; EXPECT_TRUE(error.empty()) << error; Jobserver::Slot slot; std::vector slots; // Read the implicit slot. slot = client->TryAcquire(); EXPECT_TRUE(slot.IsValid()); EXPECT_TRUE(slot.IsImplicit()); slots.push_back(std::move(slot)); // Read the explicit slots. for (size_t n = 0; n < kExplicitCount; ++n) { slot = client->TryAcquire(); EXPECT_TRUE(slot.IsValid()); EXPECT_TRUE(slot.IsExplicit()); slots.push_back(std::move(slot)); } // Pool should be empty now. slot = client->TryAcquire(); EXPECT_FALSE(slot.IsValid()); // Release the slots again. while (!slots.empty()) { client->Release(std::move(slots.back())); slots.pop_back(); } slot = client->TryAcquire(); EXPECT_TRUE(slot.IsValid()); EXPECT_TRUE(slot.IsImplicit()); slots.push_back(std::move(slot)); for (size_t n = 0; n < kExplicitCount; ++n) { slot = client->TryAcquire(); EXPECT_TRUE(slot.IsValid()); EXPECT_TRUE(slot.IsExplicit()) << n; slots.push_back(std::move(slot)); } // And the pool should be empty again. slot = client->TryAcquire(); EXPECT_FALSE(slot.IsValid()); } #else // !_WIN32 TEST(Jobserver, PosixFifoClient) { ScopedTempDir temp_dir; temp_dir.CreateAndEnter("ninja_test_jobserver_fifo"); // Create the Fifo, then write kSlotCount slots into it. std::string fifo_path = temp_dir.temp_dir_name_ + "fifo"; int ret = mknod(fifo_path.c_str(), S_IFIFO | 0666, 0); ASSERT_EQ(0, ret) << "Could not create FIFO at: " << fifo_path; const size_t kSlotCount = 5; ScopedTestFd write_fd(::open(fifo_path.c_str(), O_RDWR)); ASSERT_TRUE(write_fd.IsValid()) << "Cannot open FIFO at: " << strerror(errno); for (size_t n = 0; n < kSlotCount; ++n) { uint8_t slot_byte = static_cast('0' + n); ssize_t ret = ::write(write_fd.fd_, &slot_byte, 1); (void)ret; // make compiler happy } // Keep the file descriptor opened to ensure the fifo's content // persists in kernel memory. // Create new client instance. Jobserver::Config config; config.mode = Jobserver::Config::kModePosixFifo; config.path = fifo_path; std::string error; std::unique_ptr client = Jobserver::Client::Create(config, &error); EXPECT_TRUE(client.get()); EXPECT_TRUE(error.empty()) << error; // Read slots from the pool, and store them std::vector slots; // First slot is always implicit. slots.push_back(client->TryAcquire()); ASSERT_TRUE(slots.back().IsValid()); EXPECT_TRUE(slots.back().IsImplicit()); // Then read kSlotCount slots from the pipe and verify their value. for (size_t n = 0; n < kSlotCount; ++n) { Jobserver::Slot slot = client->TryAcquire(); ASSERT_TRUE(slot.IsValid()) << "Slot #" << n + 1; EXPECT_EQ(static_cast('0' + n), slot.GetExplicitValue()); slots.push_back(std::move(slot)); } // Pool should be empty now, so next TryAcquire() will fail. Jobserver::Slot slot = client->TryAcquire(); EXPECT_FALSE(slot.IsValid()); } TEST(Jobserver, PosixFifoClientWithWrongPath) { ScopedTempDir temp_dir; temp_dir.CreateAndEnter("ninja_test_jobserver_fifo"); // Create a regular file. std::string file_path = temp_dir.temp_dir_name_ + "not_a_fifo"; int fd = ::open(file_path.c_str(), O_CREAT | O_RDWR, 0660); ASSERT_GE(fd, 0) << "Could not create file: " << strerror(errno); ::close(fd); // Create new client instance, passing the file path for the fifo. Jobserver::Config config; config.mode = Jobserver::Config::kModePosixFifo; config.path = file_path; std::string error; std::unique_ptr client = Jobserver::Client::Create(config, &error); EXPECT_FALSE(client.get()); EXPECT_FALSE(error.empty()); EXPECT_EQ("Not a fifo path: " + file_path, error); // Do the same with an empty file path. error.clear(); config.path.clear(); client = Jobserver::Client::Create(config, &error); EXPECT_FALSE(client.get()); EXPECT_FALSE(error.empty()); EXPECT_EQ("Empty fifo path", error); } #endif // !_WIN32 ninja-1.13.2/src/json.cc000066400000000000000000000030021510764045400147450ustar00rootroot00000000000000// Copyright 2021 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "json.h" #include #include std::string EncodeJSONString(const std::string& in) { static const char* hex_digits = "0123456789abcdef"; std::string out; out.reserve(in.length() * 1.2); for (std::string::const_iterator it = in.begin(); it != in.end(); ++it) { char c = *it; if (c == '\b') out += "\\b"; else if (c == '\f') out += "\\f"; else if (c == '\n') out += "\\n"; else if (c == '\r') out += "\\r"; else if (c == '\t') out += "\\t"; else if (0x0 <= c && c < 0x20) { out += "\\u00"; out += hex_digits[c >> 4]; out += hex_digits[c & 0xf]; } else if (c == '\\') out += "\\\\"; else if (c == '\"') out += "\\\""; else out += c; } return out; } void PrintJSONString(const std::string& in) { std::string out = EncodeJSONString(in); fwrite(out.c_str(), 1, out.length(), stdout); } ninja-1.13.2/src/json.h000066400000000000000000000016141510764045400146160ustar00rootroot00000000000000// Copyright 2021 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef NINJA_JSON_H_ #define NINJA_JSON_H_ #include // Encode a string in JSON format without enclosing quotes std::string EncodeJSONString(const std::string& in); // Print a string in JSON format to stdout without enclosing quotes void PrintJSONString(const std::string& in); #endif ninja-1.13.2/src/json_test.cc000066400000000000000000000023361510764045400160150ustar00rootroot00000000000000// Copyright 2021 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "json.h" #include "test.h" TEST(JSONTest, RegularAscii) { EXPECT_EQ(EncodeJSONString("foo bar"), "foo bar"); } TEST(JSONTest, EscapedChars) { EXPECT_EQ(EncodeJSONString("\"\\\b\f\n\r\t"), "\\\"" "\\\\" "\\b\\f\\n\\r\\t"); } // codepoints between 0 and 0x1f should be escaped TEST(JSONTest, ControlChars) { EXPECT_EQ(EncodeJSONString("\x01\x1f"), "\\u0001\\u001f"); } // Leave them alone as JSON accepts unicode literals // out of control character range TEST(JSONTest, UTF8) { const char* utf8str = "\xe4\xbd\xa0\xe5\xa5\xbd"; EXPECT_EQ(EncodeJSONString(utf8str), utf8str); } ninja-1.13.2/src/lexer.cc000066400000000000000000000420111510764045400151160ustar00rootroot00000000000000/* Generated by re2c */ // Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "lexer.h" #include #include "eval_env.h" #include "util.h" using namespace std; bool Lexer::Error(const string& message, string* err) { // Compute line/column. int line = 1; const char* line_start = input_.str_; for (const char* p = input_.str_; p < last_token_; ++p) { if (*p == '\n') { ++line; line_start = p + 1; } } int col = last_token_ ? (int)(last_token_ - line_start) : 0; char buf[1024]; snprintf(buf, sizeof(buf), "%s:%d: ", filename_.AsString().c_str(), line); *err = buf; *err += message + "\n"; // Add some context to the message. const int kTruncateColumn = 72; if (col > 0 && col < kTruncateColumn) { int len; bool truncated = true; for (len = 0; len < kTruncateColumn; ++len) { if (line_start[len] == 0 || line_start[len] == '\n') { truncated = false; break; } } *err += string(line_start, len); if (truncated) *err += "..."; *err += "\n"; *err += string(col, ' '); *err += "^ near here"; } return false; } Lexer::Lexer(const char* input) { Start("input", input); } void Lexer::Start(StringPiece filename, StringPiece input) { filename_ = filename; input_ = input; ofs_ = input_.str_; last_token_ = NULL; } const char* Lexer::TokenName(Token t) { switch (t) { case ERROR: return "lexing error"; case BUILD: return "'build'"; case COLON: return "':'"; case DEFAULT: return "'default'"; case EQUALS: return "'='"; case IDENT: return "identifier"; case INCLUDE: return "'include'"; case INDENT: return "indent"; case NEWLINE: return "newline"; case PIPE2: return "'||'"; case PIPE: return "'|'"; case PIPEAT: return "'|@'"; case POOL: return "'pool'"; case RULE: return "'rule'"; case SUBNINJA: return "'subninja'"; case TEOF: return "eof"; } return NULL; // not reached } const char* Lexer::TokenErrorHint(Token expected) { switch (expected) { case COLON: return " ($ also escapes ':')"; default: return ""; } } string Lexer::DescribeLastError() { if (last_token_) { switch (last_token_[0]) { case '\t': return "tabs are not allowed, use spaces"; } } return "lexing error"; } void Lexer::UnreadToken() { ofs_ = last_token_; } Lexer::Token Lexer::ReadToken() { const char* p = ofs_; const char* q; const char* start; Lexer::Token token; for (;;) { start = p; { unsigned char yych; unsigned int yyaccept = 0; static const unsigned char yybm[] = { 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 160, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 192, 192, 128, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 128, 128, 128, 128, 128, 128, 128, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 128, 128, 128, 128, 192, 128, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, }; yych = *p; if (yybm[0+yych] & 32) { goto yy6; } if (yych <= '^') { if (yych <= ',') { if (yych <= '\f') { if (yych <= 0x00) goto yy1; if (yych == '\n') goto yy4; goto yy2; } else { if (yych <= '\r') goto yy5; if (yych == '#') goto yy8; goto yy2; } } else { if (yych <= ':') { if (yych == '/') goto yy2; if (yych <= '9') goto yy9; goto yy11; } else { if (yych <= '=') { if (yych <= '<') goto yy2; goto yy12; } else { if (yych <= '@') goto yy2; if (yych <= 'Z') goto yy9; goto yy2; } } } } else { if (yych <= 'i') { if (yych <= 'b') { if (yych == '`') goto yy2; if (yych <= 'a') goto yy9; goto yy13; } else { if (yych == 'd') goto yy14; if (yych <= 'h') goto yy9; goto yy15; } } else { if (yych <= 'r') { if (yych == 'p') goto yy16; if (yych <= 'q') goto yy9; goto yy17; } else { if (yych <= 'z') { if (yych <= 's') goto yy18; goto yy9; } else { if (yych == '|') goto yy19; goto yy2; } } } } yy1: ++p; { token = TEOF; break; } yy2: ++p; yy3: { token = ERROR; break; } yy4: ++p; { token = NEWLINE; break; } yy5: yych = *++p; if (yych == '\n') goto yy20; goto yy3; yy6: yyaccept = 0; yych = *(q = ++p); if (yybm[0+yych] & 32) { goto yy6; } if (yych <= '\f') { if (yych == '\n') goto yy4; } else { if (yych <= '\r') goto yy21; if (yych == '#') goto yy23; } yy7: { token = INDENT; break; } yy8: yyaccept = 1; yych = *(q = ++p); if (yych <= 0x00) goto yy3; goto yy24; yy9: yych = *++p; yy10: if (yybm[0+yych] & 64) { goto yy9; } { token = IDENT; break; } yy11: ++p; { token = COLON; break; } yy12: ++p; { token = EQUALS; break; } yy13: yych = *++p; if (yych == 'u') goto yy25; goto yy10; yy14: yych = *++p; if (yych == 'e') goto yy26; goto yy10; yy15: yych = *++p; if (yych == 'n') goto yy27; goto yy10; yy16: yych = *++p; if (yych == 'o') goto yy28; goto yy10; yy17: yych = *++p; if (yych == 'u') goto yy29; goto yy10; yy18: yych = *++p; if (yych == 'u') goto yy30; goto yy10; yy19: yych = *++p; if (yych == '@') goto yy31; if (yych == '|') goto yy32; { token = PIPE; break; } yy20: ++p; { token = NEWLINE; break; } yy21: yych = *++p; if (yych == '\n') goto yy20; yy22: p = q; if (yyaccept == 0) { goto yy7; } else { goto yy3; } yy23: yych = *++p; yy24: if (yybm[0+yych] & 128) { goto yy23; } if (yych <= 0x00) goto yy22; ++p; { continue; } yy25: yych = *++p; if (yych == 'i') goto yy33; goto yy10; yy26: yych = *++p; if (yych == 'f') goto yy34; goto yy10; yy27: yych = *++p; if (yych == 'c') goto yy35; goto yy10; yy28: yych = *++p; if (yych == 'o') goto yy36; goto yy10; yy29: yych = *++p; if (yych == 'l') goto yy37; goto yy10; yy30: yych = *++p; if (yych == 'b') goto yy38; goto yy10; yy31: ++p; { token = PIPEAT; break; } yy32: ++p; { token = PIPE2; break; } yy33: yych = *++p; if (yych == 'l') goto yy39; goto yy10; yy34: yych = *++p; if (yych == 'a') goto yy40; goto yy10; yy35: yych = *++p; if (yych == 'l') goto yy41; goto yy10; yy36: yych = *++p; if (yych == 'l') goto yy42; goto yy10; yy37: yych = *++p; if (yych == 'e') goto yy43; goto yy10; yy38: yych = *++p; if (yych == 'n') goto yy44; goto yy10; yy39: yych = *++p; if (yych == 'd') goto yy45; goto yy10; yy40: yych = *++p; if (yych == 'u') goto yy46; goto yy10; yy41: yych = *++p; if (yych == 'u') goto yy47; goto yy10; yy42: yych = *++p; if (yybm[0+yych] & 64) { goto yy9; } { token = POOL; break; } yy43: yych = *++p; if (yybm[0+yych] & 64) { goto yy9; } { token = RULE; break; } yy44: yych = *++p; if (yych == 'i') goto yy48; goto yy10; yy45: yych = *++p; if (yybm[0+yych] & 64) { goto yy9; } { token = BUILD; break; } yy46: yych = *++p; if (yych == 'l') goto yy49; goto yy10; yy47: yych = *++p; if (yych == 'd') goto yy50; goto yy10; yy48: yych = *++p; if (yych == 'n') goto yy51; goto yy10; yy49: yych = *++p; if (yych == 't') goto yy52; goto yy10; yy50: yych = *++p; if (yych == 'e') goto yy53; goto yy10; yy51: yych = *++p; if (yych == 'j') goto yy54; goto yy10; yy52: yych = *++p; if (yybm[0+yych] & 64) { goto yy9; } { token = DEFAULT; break; } yy53: yych = *++p; if (yybm[0+yych] & 64) { goto yy9; } { token = INCLUDE; break; } yy54: yych = *++p; if (yych != 'a') goto yy10; yych = *++p; if (yybm[0+yych] & 64) { goto yy9; } { token = SUBNINJA; break; } } } last_token_ = start; ofs_ = p; if (token != NEWLINE && token != TEOF) EatWhitespace(); return token; } bool Lexer::PeekToken(Token token) { Token t = ReadToken(); if (t == token) return true; UnreadToken(); return false; } void Lexer::EatWhitespace() { const char* p = ofs_; const char* q; for (;;) { ofs_ = p; { unsigned char yych; static const unsigned char yybm[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; yych = *p; if (yybm[0+yych] & 128) { goto yy59; } if (yych <= 0x00) goto yy56; if (yych == '$') goto yy60; goto yy57; yy56: ++p; { break; } yy57: ++p; yy58: { break; } yy59: yych = *++p; if (yybm[0+yych] & 128) { goto yy59; } { continue; } yy60: yych = *(q = ++p); if (yych == '\n') goto yy61; if (yych == '\r') goto yy62; goto yy58; yy61: ++p; { continue; } yy62: yych = *++p; if (yych == '\n') goto yy63; p = q; goto yy58; yy63: ++p; { continue; } } } } bool Lexer::ReadIdent(string* out) { const char* p = ofs_; const char* start; for (;;) { start = p; { unsigned char yych; static const unsigned char yybm[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 128, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 0, 0, 0, 0, 0, 0, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 0, 0, 0, 0, 128, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; yych = *p; if (yybm[0+yych] & 128) { goto yy65; } ++p; { last_token_ = start; return false; } yy65: yych = *++p; if (yybm[0+yych] & 128) { goto yy65; } { out->assign(start, p - start); break; } } } last_token_ = start; ofs_ = p; EatWhitespace(); return true; } bool Lexer::ReadEvalString(EvalString* eval, bool path, string* err) { const char* p = ofs_; const char* q; const char* start; for (;;) { start = p; { unsigned char yych; static const unsigned char yybm[] = { 0, 16, 16, 16, 16, 16, 16, 16, 16, 16, 0, 16, 16, 0, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 32, 16, 16, 16, 0, 16, 16, 16, 16, 16, 16, 16, 16, 208, 144, 16, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 0, 16, 16, 16, 16, 16, 16, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 16, 16, 16, 16, 208, 16, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 16, 0, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, }; yych = *p; if (yybm[0+yych] & 16) { goto yy68; } if (yych <= '\r') { if (yych <= 0x00) goto yy67; if (yych <= '\n') goto yy69; goto yy70; } else { if (yych <= ' ') goto yy69; if (yych <= '$') goto yy71; goto yy69; } yy67: ++p; { last_token_ = start; return Error("unexpected EOF", err); } yy68: yych = *++p; if (yybm[0+yych] & 16) { goto yy68; } { eval->AddText(StringPiece(start, p - start)); continue; } yy69: ++p; { if (path) { p = start; break; } else { if (*start == '\n') break; eval->AddText(StringPiece(start, 1)); continue; } } yy70: yych = *++p; if (yych == '\n') goto yy72; { last_token_ = start; return Error(DescribeLastError(), err); } yy71: yych = *++p; if (yybm[0+yych] & 64) { goto yy79; } if (yych <= ' ') { if (yych <= '\f') { if (yych == '\n') goto yy75; goto yy73; } else { if (yych <= '\r') goto yy76; if (yych <= 0x1F) goto yy73; goto yy77; } } else { if (yych <= '/') { if (yych == '$') goto yy78; goto yy73; } else { if (yych <= ':') goto yy80; if (yych <= '`') goto yy73; if (yych <= '{') goto yy81; goto yy73; } } yy72: ++p; { if (path) p = start; break; } yy73: ++p; yy74: { last_token_ = start; return Error("bad $-escape (literal $ must be written as $$)", err); } yy75: yych = *++p; if (yybm[0+yych] & 32) { goto yy75; } { continue; } yy76: yych = *++p; if (yych == '\n') goto yy82; goto yy74; yy77: ++p; { eval->AddText(StringPiece(" ", 1)); continue; } yy78: ++p; { eval->AddText(StringPiece("$", 1)); continue; } yy79: yych = *++p; if (yybm[0+yych] & 64) { goto yy79; } { eval->AddSpecial(StringPiece(start + 1, p - start - 1)); continue; } yy80: ++p; { eval->AddText(StringPiece(":", 1)); continue; } yy81: yych = *(q = ++p); if (yybm[0+yych] & 128) { goto yy83; } goto yy74; yy82: yych = *++p; if (yych == ' ') goto yy82; { continue; } yy83: yych = *++p; if (yybm[0+yych] & 128) { goto yy83; } if (yych == '}') goto yy84; p = q; goto yy74; yy84: ++p; { eval->AddSpecial(StringPiece(start + 2, p - start - 3)); continue; } } } last_token_ = start; ofs_ = p; if (path) EatWhitespace(); // Non-path strings end in newlines, so there's no whitespace to eat. return true; } ninja-1.13.2/src/lexer.h000066400000000000000000000053511510764045400147660ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef NINJA_LEXER_H_ #define NINJA_LEXER_H_ #include "string_piece.h" // Windows may #define ERROR. #ifdef ERROR #undef ERROR #endif struct EvalString; struct Lexer { Lexer() {} /// Helper ctor useful for tests. explicit Lexer(const char* input); enum Token { ERROR, BUILD, COLON, DEFAULT, EQUALS, IDENT, INCLUDE, INDENT, NEWLINE, PIPE, PIPE2, PIPEAT, POOL, RULE, SUBNINJA, TEOF, }; /// Return a human-readable form of a token, used in error messages. static const char* TokenName(Token t); /// Return a human-readable token hint, used in error messages. static const char* TokenErrorHint(Token expected); /// If the last token read was an ERROR token, provide more info /// or the empty string. std::string DescribeLastError(); /// Start parsing some input. void Start(StringPiece filename, StringPiece input); /// Read a Token from the Token enum. Token ReadToken(); /// Rewind to the last read Token. void UnreadToken(); /// If the next token is \a token, read it and return true. bool PeekToken(Token token); /// Read a simple identifier (a rule or variable name). /// Returns false if a name can't be read. bool ReadIdent(std::string* out); /// Read a path (complete with $escapes). /// Returns false only on error, returned path may be empty if a delimiter /// (space, newline) is hit. bool ReadPath(EvalString* path, std::string* err) { return ReadEvalString(path, true, err); } /// Read the value side of a var = value line (complete with $escapes). /// Returns false only on error. bool ReadVarValue(EvalString* value, std::string* err) { return ReadEvalString(value, false, err); } /// Construct an error message with context. bool Error(const std::string& message, std::string* err); private: /// Skip past whitespace (called after each read token/ident/etc.). void EatWhitespace(); /// Read a $-escaped string. bool ReadEvalString(EvalString* eval, bool path, std::string* err); StringPiece filename_; StringPiece input_; const char* ofs_; const char* last_token_; }; #endif // NINJA_LEXER_H_ ninja-1.13.2/src/lexer.in.cc000066400000000000000000000145031510764045400155300ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "lexer.h" #include #include "eval_env.h" #include "util.h" using namespace std; bool Lexer::Error(const string& message, string* err) { // Compute line/column. int line = 1; const char* line_start = input_.str_; for (const char* p = input_.str_; p < last_token_; ++p) { if (*p == '\n') { ++line; line_start = p + 1; } } int col = last_token_ ? (int)(last_token_ - line_start) : 0; char buf[1024]; snprintf(buf, sizeof(buf), "%s:%d: ", filename_.AsString().c_str(), line); *err = buf; *err += message + "\n"; // Add some context to the message. const int kTruncateColumn = 72; if (col > 0 && col < kTruncateColumn) { int len; bool truncated = true; for (len = 0; len < kTruncateColumn; ++len) { if (line_start[len] == 0 || line_start[len] == '\n') { truncated = false; break; } } *err += string(line_start, len); if (truncated) *err += "..."; *err += "\n"; *err += string(col, ' '); *err += "^ near here"; } return false; } Lexer::Lexer(const char* input) { Start("input", input); } void Lexer::Start(StringPiece filename, StringPiece input) { filename_ = filename; input_ = input; ofs_ = input_.str_; last_token_ = NULL; } const char* Lexer::TokenName(Token t) { switch (t) { case ERROR: return "lexing error"; case BUILD: return "'build'"; case COLON: return "':'"; case DEFAULT: return "'default'"; case EQUALS: return "'='"; case IDENT: return "identifier"; case INCLUDE: return "'include'"; case INDENT: return "indent"; case NEWLINE: return "newline"; case PIPE2: return "'||'"; case PIPE: return "'|'"; case PIPEAT: return "'|@'"; case POOL: return "'pool'"; case RULE: return "'rule'"; case SUBNINJA: return "'subninja'"; case TEOF: return "eof"; } return NULL; // not reached } const char* Lexer::TokenErrorHint(Token expected) { switch (expected) { case COLON: return " ($ also escapes ':')"; default: return ""; } } string Lexer::DescribeLastError() { if (last_token_) { switch (last_token_[0]) { case '\t': return "tabs are not allowed, use spaces"; } } return "lexing error"; } void Lexer::UnreadToken() { ofs_ = last_token_; } Lexer::Token Lexer::ReadToken() { const char* p = ofs_; const char* q; const char* start; Lexer::Token token; for (;;) { start = p; /*!re2c re2c:define:YYCTYPE = "unsigned char"; re2c:define:YYCURSOR = p; re2c:define:YYMARKER = q; re2c:yyfill:enable = 0; nul = "\000"; simple_varname = [a-zA-Z0-9_-]+; varname = [a-zA-Z0-9_.-]+; [ ]*"#"[^\000\n]*"\n" { continue; } [ ]*"\r\n" { token = NEWLINE; break; } [ ]*"\n" { token = NEWLINE; break; } [ ]+ { token = INDENT; break; } "build" { token = BUILD; break; } "pool" { token = POOL; break; } "rule" { token = RULE; break; } "default" { token = DEFAULT; break; } "=" { token = EQUALS; break; } ":" { token = COLON; break; } "|@" { token = PIPEAT; break; } "||" { token = PIPE2; break; } "|" { token = PIPE; break; } "include" { token = INCLUDE; break; } "subninja" { token = SUBNINJA; break; } varname { token = IDENT; break; } nul { token = TEOF; break; } [^] { token = ERROR; break; } */ } last_token_ = start; ofs_ = p; if (token != NEWLINE && token != TEOF) EatWhitespace(); return token; } bool Lexer::PeekToken(Token token) { Token t = ReadToken(); if (t == token) return true; UnreadToken(); return false; } void Lexer::EatWhitespace() { const char* p = ofs_; const char* q; for (;;) { ofs_ = p; /*!re2c [ ]+ { continue; } "$\r\n" { continue; } "$\n" { continue; } nul { break; } [^] { break; } */ } } bool Lexer::ReadIdent(string* out) { const char* p = ofs_; const char* start; for (;;) { start = p; /*!re2c varname { out->assign(start, p - start); break; } [^] { last_token_ = start; return false; } */ } last_token_ = start; ofs_ = p; EatWhitespace(); return true; } bool Lexer::ReadEvalString(EvalString* eval, bool path, string* err) { const char* p = ofs_; const char* q; const char* start; for (;;) { start = p; /*!re2c [^$ :\r\n|\000]+ { eval->AddText(StringPiece(start, p - start)); continue; } "\r\n" { if (path) p = start; break; } [ :|\n] { if (path) { p = start; break; } else { if (*start == '\n') break; eval->AddText(StringPiece(start, 1)); continue; } } "$$" { eval->AddText(StringPiece("$", 1)); continue; } "$ " { eval->AddText(StringPiece(" ", 1)); continue; } "$\r\n"[ ]* { continue; } "$\n"[ ]* { continue; } "${"varname"}" { eval->AddSpecial(StringPiece(start + 2, p - start - 3)); continue; } "$"simple_varname { eval->AddSpecial(StringPiece(start + 1, p - start - 1)); continue; } "$:" { eval->AddText(StringPiece(":", 1)); continue; } "$". { last_token_ = start; return Error("bad $-escape (literal $ must be written as $$)", err); } nul { last_token_ = start; return Error("unexpected EOF", err); } [^] { last_token_ = start; return Error(DescribeLastError(), err); } */ } last_token_ = start; ofs_ = p; if (path) EatWhitespace(); // Non-path strings end in newlines, so there's no whitespace to eat. return true; } ninja-1.13.2/src/lexer_test.cc000066400000000000000000000054301510764045400161610ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "lexer.h" #include "eval_env.h" #include "test.h" using namespace std; TEST(Lexer, ReadVarValue) { Lexer lexer("plain text $var $VaR ${x}\n"); EvalString eval; string err; EXPECT_TRUE(lexer.ReadVarValue(&eval, &err)); EXPECT_EQ("", err); EXPECT_EQ("[plain text ][$var][ ][$VaR][ ][$x]", eval.Serialize()); } TEST(Lexer, ReadEvalStringEscapes) { Lexer lexer("$ $$ab c$: $\ncde\n"); EvalString eval; string err; EXPECT_TRUE(lexer.ReadVarValue(&eval, &err)); EXPECT_EQ("", err); EXPECT_EQ("[ $ab c: cde]", eval.Serialize()); } TEST(Lexer, ReadIdent) { Lexer lexer("foo baR baz_123 foo-bar"); string ident; EXPECT_TRUE(lexer.ReadIdent(&ident)); EXPECT_EQ("foo", ident); EXPECT_TRUE(lexer.ReadIdent(&ident)); EXPECT_EQ("baR", ident); EXPECT_TRUE(lexer.ReadIdent(&ident)); EXPECT_EQ("baz_123", ident); EXPECT_TRUE(lexer.ReadIdent(&ident)); EXPECT_EQ("foo-bar", ident); } TEST(Lexer, ReadIdentCurlies) { // Verify that ReadIdent includes dots in the name, // but in an expansion $bar.dots stops at the dot. Lexer lexer("foo.dots $bar.dots ${bar.dots}\n"); string ident; EXPECT_TRUE(lexer.ReadIdent(&ident)); EXPECT_EQ("foo.dots", ident); EvalString eval; string err; EXPECT_TRUE(lexer.ReadVarValue(&eval, &err)); EXPECT_EQ("", err); EXPECT_EQ("[$bar][.dots ][$bar.dots]", eval.Serialize()); } TEST(Lexer, Error) { Lexer lexer("foo$\nbad $"); EvalString eval; string err; ASSERT_FALSE(lexer.ReadVarValue(&eval, &err)); EXPECT_EQ("input:2: bad $-escape (literal $ must be written as $$)\n" "bad $\n" " ^ near here" , err); } TEST(Lexer, CommentEOF) { // Verify we don't run off the end of the string when the EOF is // mid-comment. Lexer lexer("# foo"); Lexer::Token token = lexer.ReadToken(); EXPECT_EQ(Lexer::ERROR, token); } TEST(Lexer, Tabs) { // Verify we print a useful error on a disallowed character. Lexer lexer(" \tfoobar"); Lexer::Token token = lexer.ReadToken(); EXPECT_EQ(Lexer::INDENT, token); token = lexer.ReadToken(); EXPECT_EQ(Lexer::ERROR, token); EXPECT_EQ("tabs are not allowed, use spaces", lexer.DescribeLastError()); } ninja-1.13.2/src/line_printer.cc000066400000000000000000000121411510764045400164720ustar00rootroot00000000000000// Copyright 2013 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "line_printer.h" #include #include #ifdef _WIN32 #include #ifndef ENABLE_VIRTUAL_TERMINAL_PROCESSING #define ENABLE_VIRTUAL_TERMINAL_PROCESSING 0x4 #endif #else #include #include #include #include #endif #include "elide_middle.h" #include "util.h" using namespace std; LinePrinter::LinePrinter() : have_blank_line_(true), console_locked_(false) { const char* term = getenv("TERM"); #ifndef _WIN32 smart_terminal_ = isatty(1) && term && string(term) != "dumb"; #else if (term && string(term) == "dumb") { smart_terminal_ = false; } else { console_ = GetStdHandle(STD_OUTPUT_HANDLE); CONSOLE_SCREEN_BUFFER_INFO csbi; smart_terminal_ = GetConsoleScreenBufferInfo(console_, &csbi); } #endif supports_color_ = smart_terminal_; #ifdef _WIN32 // Try enabling ANSI escape sequence support on Windows 10 terminals. if (supports_color_) { DWORD mode; if (GetConsoleMode(console_, &mode)) { if (!SetConsoleMode(console_, mode | ENABLE_VIRTUAL_TERMINAL_PROCESSING)) { supports_color_ = false; } } } #endif if (!supports_color_) { const char* clicolor_force = getenv("CLICOLOR_FORCE"); supports_color_ = clicolor_force && std::string(clicolor_force) != "0"; } } void LinePrinter::Print(string to_print, LineType type) { if (console_locked_) { line_buffer_ = to_print; line_type_ = type; return; } if (smart_terminal_) { printf("\r"); // Print over previous line, if any. // On Windows, calling a C library function writing to stdout also handles // pausing the executable when the "Pause" key or Ctrl-S is pressed. } if (smart_terminal_ && type == ELIDE) { #ifdef _WIN32 CONSOLE_SCREEN_BUFFER_INFO csbi; GetConsoleScreenBufferInfo(console_, &csbi); ElideMiddleInPlace(to_print, static_cast(csbi.dwSize.X)); if (supports_color_) { // this means ENABLE_VIRTUAL_TERMINAL_PROCESSING // succeeded printf("%s\x1B[K", to_print.c_str()); // Clear to end of line. fflush(stdout); } else { // We don't want to have the cursor spamming back and forth, so instead of // printf use WriteConsoleOutput which updates the contents of the buffer, // but doesn't move the cursor position. COORD buf_size = { csbi.dwSize.X, 1 }; COORD zero_zero = { 0, 0 }; SMALL_RECT target = { csbi.dwCursorPosition.X, csbi.dwCursorPosition.Y, static_cast(csbi.dwCursorPosition.X + csbi.dwSize.X - 1), csbi.dwCursorPosition.Y }; vector char_data(csbi.dwSize.X); for (size_t i = 0; i < static_cast(csbi.dwSize.X); ++i) { char_data[i].Char.AsciiChar = i < to_print.size() ? to_print[i] : ' '; char_data[i].Attributes = csbi.wAttributes; } WriteConsoleOutput(console_, &char_data[0], buf_size, zero_zero, &target); } #else // Limit output to width of the terminal if provided so we don't cause // line-wrapping. winsize size; if ((ioctl(STDOUT_FILENO, TIOCGWINSZ, &size) == 0) && size.ws_col) { ElideMiddleInPlace(to_print, size.ws_col); } printf("%s", to_print.c_str()); printf("\x1B[K"); // Clear to end of line. fflush(stdout); #endif have_blank_line_ = false; } else { printf("%s\n", to_print.c_str()); fflush(stdout); } } void LinePrinter::PrintOrBuffer(const char* data, size_t size) { if (console_locked_) { output_buffer_.append(data, size); } else { // Avoid printf and C strings, since the actual output might contain null // bytes like UTF-16 does (yuck). fwrite(data, 1, size, stdout); } } void LinePrinter::PrintOnNewLine(const string& to_print) { if (console_locked_ && !line_buffer_.empty()) { output_buffer_.append(line_buffer_); output_buffer_.append(1, '\n'); line_buffer_.clear(); } if (!have_blank_line_) { PrintOrBuffer("\n", 1); } if (!to_print.empty()) { PrintOrBuffer(&to_print[0], to_print.size()); } have_blank_line_ = to_print.empty() || *to_print.rbegin() == '\n'; } void LinePrinter::SetConsoleLocked(bool locked) { if (locked == console_locked_) return; if (locked) PrintOnNewLine(""); console_locked_ = locked; if (!locked) { PrintOnNewLine(output_buffer_); if (!line_buffer_.empty()) { Print(line_buffer_, line_type_); } output_buffer_.clear(); line_buffer_.clear(); } } ninja-1.13.2/src/line_printer.h000066400000000000000000000044001510764045400163330ustar00rootroot00000000000000// Copyright 2013 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef NINJA_LINE_PRINTER_H_ #define NINJA_LINE_PRINTER_H_ #include #include /// Prints lines of text, possibly overprinting previously printed lines /// if the terminal supports it. struct LinePrinter { LinePrinter(); bool is_smart_terminal() const { return smart_terminal_; } void set_smart_terminal(bool smart) { smart_terminal_ = smart; } bool supports_color() const { return supports_color_; } enum LineType { FULL, ELIDE }; /// Overprints the current line. If type is ELIDE, elides to_print to fit on /// one line. void Print(std::string to_print, LineType type); /// Prints a string on a new line, not overprinting previous output. void PrintOnNewLine(const std::string& to_print); /// Lock or unlock the console. Any output sent to the LinePrinter while the /// console is locked will not be printed until it is unlocked. void SetConsoleLocked(bool locked); private: /// Whether we can do fancy terminal control codes. bool smart_terminal_; /// Whether we can use ISO 6429 (ANSI) color sequences. bool supports_color_; /// Whether the caret is at the beginning of a blank line. bool have_blank_line_; /// Whether console is locked. bool console_locked_; /// Buffered current line while console is locked. std::string line_buffer_; /// Buffered line type while console is locked. LineType line_type_; /// Buffered console output while console is locked. std::string output_buffer_; #ifdef _WIN32 void* console_; #endif /// Print the given data to the console, or buffer it if it is locked. void PrintOrBuffer(const char *data, size_t size); }; #endif // NINJA_LINE_PRINTER_H_ ninja-1.13.2/src/load_status.h000066400000000000000000000014031510764045400161630ustar00rootroot00000000000000// Copyright 2019 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef NINJA_LOAD_STATUS_H_ #define NINJA_LOAD_STATUS_H_ enum LoadStatus { LOAD_ERROR, LOAD_SUCCESS, LOAD_NOT_FOUND, }; #endif // NINJA_LOAD_STATUS_H_ ninja-1.13.2/src/manifest_parser.cc000066400000000000000000000277401510764045400171750ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "manifest_parser.h" #include #include #include #include #include #include "graph.h" #include "state.h" #include "util.h" #include "version.h" using namespace std; ManifestParser::ManifestParser(State* state, FileReader* file_reader, ManifestParserOptions options) : Parser(state, file_reader), options_(options), quiet_(false) { env_ = &state->bindings_; } bool ManifestParser::Parse(const string& filename, const string& input, string* err) { lexer_.Start(filename, input); for (;;) { Lexer::Token token = lexer_.ReadToken(); switch (token) { case Lexer::POOL: if (!ParsePool(err)) return false; break; case Lexer::BUILD: if (!ParseEdge(err)) return false; break; case Lexer::RULE: if (!ParseRule(err)) return false; break; case Lexer::DEFAULT: if (!ParseDefault(err)) return false; break; case Lexer::IDENT: { lexer_.UnreadToken(); string name; EvalString let_value; if (!ParseLet(&name, &let_value, err)) return false; string value = let_value.Evaluate(env_); // Check ninja_required_version immediately so we can exit // before encountering any syntactic surprises. if (name == "ninja_required_version") CheckNinjaVersion(value); env_->AddBinding(name, value); break; } case Lexer::INCLUDE: if (!ParseFileInclude(false, err)) return false; break; case Lexer::SUBNINJA: if (!ParseFileInclude(true, err)) return false; break; case Lexer::ERROR: { return lexer_.Error(lexer_.DescribeLastError(), err); } case Lexer::TEOF: return true; case Lexer::NEWLINE: break; default: return lexer_.Error(string("unexpected ") + Lexer::TokenName(token), err); } } return false; // not reached } bool ManifestParser::ParsePool(string* err) { string name; if (!lexer_.ReadIdent(&name)) return lexer_.Error("expected pool name", err); if (!ExpectToken(Lexer::NEWLINE, err)) return false; if (state_->LookupPool(name) != NULL) return lexer_.Error("duplicate pool '" + name + "'", err); int depth = -1; while (lexer_.PeekToken(Lexer::INDENT)) { string key; EvalString value; if (!ParseLet(&key, &value, err)) return false; if (key == "depth") { string depth_string = value.Evaluate(env_); depth = atoi(depth_string.c_str()); if (depth < 0) return lexer_.Error("invalid pool depth", err); } else { return lexer_.Error("unexpected variable '" + key + "'", err); } } if (depth < 0) return lexer_.Error("expected 'depth =' line", err); state_->AddPool(new Pool(name, depth)); return true; } bool ManifestParser::ParseRule(string* err) { string name; if (!lexer_.ReadIdent(&name)) return lexer_.Error("expected rule name", err); if (!ExpectToken(Lexer::NEWLINE, err)) return false; if (env_->LookupRuleCurrentScope(name) != NULL) return lexer_.Error("duplicate rule '" + name + "'", err); auto rule = std::unique_ptr(new Rule(name)); while (lexer_.PeekToken(Lexer::INDENT)) { string key; EvalString value; if (!ParseLet(&key, &value, err)) return false; if (Rule::IsReservedBinding(key)) { rule->AddBinding(key, value); } else { // Die on other keyvals for now; revisit if we want to add a // scope here. return lexer_.Error("unexpected variable '" + key + "'", err); } } if (rule->bindings_["rspfile"].empty() != rule->bindings_["rspfile_content"].empty()) { return lexer_.Error("rspfile and rspfile_content need to be " "both specified", err); } if (rule->bindings_["command"].empty()) return lexer_.Error("expected 'command =' line", err); env_->AddRule(std::move(rule)); return true; } bool ManifestParser::ParseLet(string* key, EvalString* value, string* err) { if (!lexer_.ReadIdent(key)) return lexer_.Error("expected variable name", err); if (!ExpectToken(Lexer::EQUALS, err)) return false; if (!lexer_.ReadVarValue(value, err)) return false; return true; } bool ManifestParser::ParseDefault(string* err) { EvalString eval; if (!lexer_.ReadPath(&eval, err)) return false; if (eval.empty()) return lexer_.Error("expected target name", err); do { string path = eval.Evaluate(env_); if (path.empty()) return lexer_.Error("empty path", err); uint64_t slash_bits; // Unused because this only does lookup. CanonicalizePath(&path, &slash_bits); std::string default_err; if (!state_->AddDefault(path, &default_err)) return lexer_.Error(default_err, err); eval.Clear(); if (!lexer_.ReadPath(&eval, err)) return false; } while (!eval.empty()); return ExpectToken(Lexer::NEWLINE, err); } bool ManifestParser::ParseEdge(string* err) { ins_.clear(); outs_.clear(); validations_.clear(); { EvalString out; if (!lexer_.ReadPath(&out, err)) return false; while (!out.empty()) { outs_.push_back(std::move(out)); out.Clear(); if (!lexer_.ReadPath(&out, err)) return false; } } // Add all implicit outs, counting how many as we go. int implicit_outs = 0; if (lexer_.PeekToken(Lexer::PIPE)) { for (;;) { EvalString out; if (!lexer_.ReadPath(&out, err)) return false; if (out.empty()) break; outs_.push_back(std::move(out)); ++implicit_outs; } } if (outs_.empty()) return lexer_.Error("expected path", err); if (!ExpectToken(Lexer::COLON, err)) return false; string rule_name; if (!lexer_.ReadIdent(&rule_name)) return lexer_.Error("expected build command name", err); const Rule* rule = env_->LookupRule(rule_name); if (!rule) return lexer_.Error("unknown build rule '" + rule_name + "'", err); for (;;) { // XXX should we require one path here? EvalString in; if (!lexer_.ReadPath(&in, err)) return false; if (in.empty()) break; ins_.push_back(std::move(in)); } // Add all implicit deps, counting how many as we go. int implicit = 0; if (lexer_.PeekToken(Lexer::PIPE)) { for (;;) { EvalString in; if (!lexer_.ReadPath(&in, err)) return false; if (in.empty()) break; ins_.push_back(std::move(in)); ++implicit; } } // Add all order-only deps, counting how many as we go. int order_only = 0; if (lexer_.PeekToken(Lexer::PIPE2)) { for (;;) { EvalString in; if (!lexer_.ReadPath(&in, err)) return false; if (in.empty()) break; ins_.push_back(std::move(in)); ++order_only; } } // Add all validations, counting how many as we go. if (lexer_.PeekToken(Lexer::PIPEAT)) { for (;;) { EvalString validation; if (!lexer_.ReadPath(&validation, err)) return false; if (validation.empty()) break; validations_.push_back(std::move(validation)); } } if (!ExpectToken(Lexer::NEWLINE, err)) return false; // Bindings on edges are rare, so allocate per-edge envs only when needed. bool has_indent_token = lexer_.PeekToken(Lexer::INDENT); BindingEnv* env = has_indent_token ? new BindingEnv(env_) : env_; while (has_indent_token) { string key; EvalString val; if (!ParseLet(&key, &val, err)) return false; env->AddBinding(key, val.Evaluate(env_)); has_indent_token = lexer_.PeekToken(Lexer::INDENT); } Edge* edge = state_->AddEdge(rule); edge->env_ = env; string pool_name = edge->GetBinding("pool"); if (!pool_name.empty()) { Pool* pool = state_->LookupPool(pool_name); if (pool == NULL) return lexer_.Error("unknown pool name '" + pool_name + "'", err); edge->pool_ = pool; } edge->outputs_.reserve(outs_.size()); for (size_t i = 0, e = outs_.size(); i != e; ++i) { string path = outs_[i].Evaluate(env); if (path.empty()) return lexer_.Error("empty path", err); uint64_t slash_bits; CanonicalizePath(&path, &slash_bits); if (!state_->AddOut(edge, path, slash_bits, err)) { lexer_.Error(std::string(*err), err); return false; } } if (edge->outputs_.empty()) { // All outputs of the edge are already created by other edges. Don't add // this edge. Do this check before input nodes are connected to the edge. state_->edges_.pop_back(); delete edge; return true; } edge->implicit_outs_ = implicit_outs; edge->inputs_.reserve(ins_.size()); for (vector::iterator i = ins_.begin(); i != ins_.end(); ++i) { string path = i->Evaluate(env); if (path.empty()) return lexer_.Error("empty path", err); uint64_t slash_bits; CanonicalizePath(&path, &slash_bits); state_->AddIn(edge, path, slash_bits); } edge->implicit_deps_ = implicit; edge->order_only_deps_ = order_only; edge->validations_.reserve(validations_.size()); for (std::vector::iterator v = validations_.begin(); v != validations_.end(); ++v) { string path = v->Evaluate(env); if (path.empty()) return lexer_.Error("empty path", err); uint64_t slash_bits; CanonicalizePath(&path, &slash_bits); state_->AddValidation(edge, path, slash_bits); } if (options_.phony_cycle_action_ == kPhonyCycleActionWarn && edge->maybe_phonycycle_diagnostic()) { // CMake 2.8.12.x and 3.0.x incorrectly write phony build statements // that reference themselves. Ninja used to tolerate these in the // build graph but that has since been fixed. Filter them out to // support users of those old CMake versions. Node* out = edge->outputs_[0]; vector::iterator new_end = remove(edge->inputs_.begin(), edge->inputs_.end(), out); if (new_end != edge->inputs_.end()) { edge->inputs_.erase(new_end, edge->inputs_.end()); if (!quiet_) { Warning("phony target '%s' names itself as an input; " "ignoring [-w phonycycle=warn]", out->path().c_str()); } } } // Lookup, validate, and save any dyndep binding. It will be used later // to load generated dependency information dynamically, but it must // be one of our manifest-specified inputs. string dyndep = edge->GetUnescapedDyndep(); if (!dyndep.empty()) { uint64_t slash_bits; CanonicalizePath(&dyndep, &slash_bits); edge->dyndep_ = state_->GetNode(dyndep, slash_bits); edge->dyndep_->set_dyndep_pending(true); vector::iterator dgi = std::find(edge->inputs_.begin(), edge->inputs_.end(), edge->dyndep_); if (dgi == edge->inputs_.end()) { return lexer_.Error("dyndep '" + dyndep + "' is not an input", err); } assert(!edge->dyndep_->generated_by_dep_loader()); } return true; } bool ManifestParser::ParseFileInclude(bool new_scope, string* err) { EvalString eval; if (!lexer_.ReadPath(&eval, err)) return false; string path = eval.Evaluate(env_); if (subparser_ == nullptr) { subparser_.reset(new ManifestParser(state_, file_reader_, options_)); } if (new_scope) { subparser_->env_ = new BindingEnv(env_); } else { subparser_->env_ = env_; } if (!subparser_->Load(path, err, &lexer_)) return false; if (!ExpectToken(Lexer::NEWLINE, err)) return false; return true; } ninja-1.13.2/src/manifest_parser.h000066400000000000000000000044601510764045400170310ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef NINJA_MANIFEST_PARSER_H_ #define NINJA_MANIFEST_PARSER_H_ #include "parser.h" #include #include struct BindingEnv; struct EvalString; enum DupeEdgeAction { kDupeEdgeActionWarn, kDupeEdgeActionError, }; enum PhonyCycleAction { kPhonyCycleActionWarn, kPhonyCycleActionError, }; struct ManifestParserOptions { PhonyCycleAction phony_cycle_action_ = kPhonyCycleActionWarn; }; /// Parses .ninja files. struct ManifestParser : public Parser { ManifestParser(State* state, FileReader* file_reader, ManifestParserOptions options = ManifestParserOptions()); /// Parse a text string of input. Used by tests. bool ParseTest(const std::string& input, std::string* err) { quiet_ = true; return Parse("input", input, err); } private: /// Parse a file, given its contents as a string. bool Parse(const std::string& filename, const std::string& input, std::string* err); /// Parse various statement types. bool ParsePool(std::string* err); bool ParseRule(std::string* err); bool ParseLet(std::string* key, EvalString* val, std::string* err); bool ParseEdge(std::string* err); bool ParseDefault(std::string* err); /// Parse either a 'subninja' or 'include' line. bool ParseFileInclude(bool new_scope, std::string* err); BindingEnv* env_; ManifestParserOptions options_; bool quiet_; // ins_/out_/validations_ are reused across invocations to ParseEdge(), // to save on the otherwise constant memory reallocation. // subparser_ is reused solely to get better reuse out ins_/outs_/validation_. std::unique_ptr subparser_; std::vector ins_, outs_, validations_; }; #endif // NINJA_MANIFEST_PARSER_H_ ninja-1.13.2/src/manifest_parser_perftest.cc000066400000000000000000000071461510764045400211070ustar00rootroot00000000000000// Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Tests manifest parser performance. Expects to be run in ninja's root // directory. #include #include #include #include #include #ifdef _WIN32 #include "getopt.h" #include #elif defined(_AIX) #include "getopt.h" #include #else #include #include #endif #include "disk_interface.h" #include "graph.h" #include "manifest_parser.h" #include "metrics.h" #include "state.h" #include "util.h" using namespace std; bool WriteFakeManifests(const string& dir, string* err) { RealDiskInterface disk_interface; TimeStamp mtime = disk_interface.Stat(dir + "/build.ninja", err); if (mtime != 0) // 0 means that the file doesn't exist yet. return mtime != -1; string command = "python misc/write_fake_manifests.py " + dir; printf("Creating manifest data..."); fflush(stdout); int exit_code = system(command.c_str()); printf("done.\n"); if (exit_code != 0) *err = "Failed to run " + command; return exit_code == 0; } int LoadManifests(bool measure_command_evaluation) { string err; RealDiskInterface disk_interface; State state; ManifestParser parser(&state, &disk_interface); if (!parser.Load("build.ninja", &err)) { fprintf(stderr, "Failed to read test data: %s\n", err.c_str()); exit(1); } // Doing an empty build involves reading the manifest and evaluating all // commands required for the requested targets. So include command // evaluation in the perftest by default. int optimization_guard = 0; if (measure_command_evaluation) for (size_t i = 0; i < state.edges_.size(); ++i) optimization_guard += state.edges_[i]->EvaluateCommand().size(); return optimization_guard; } int main(int argc, char* argv[]) { bool measure_command_evaluation = true; int opt; while ((opt = getopt(argc, argv, const_cast("fh"))) != -1) { switch (opt) { case 'f': measure_command_evaluation = false; break; case 'h': default: printf("usage: manifest_parser_perftest\n" "\n" "options:\n" " -f only measure manifest load time, not command evaluation time\n" ); return 1; } } const char kManifestDir[] = "build/manifest_perftest"; string err; if (!WriteFakeManifests(kManifestDir, &err)) { fprintf(stderr, "Failed to write test data: %s\n", err.c_str()); return 1; } if (chdir(kManifestDir) < 0) Fatal("chdir: %s", strerror(errno)); const int kNumRepetitions = 5; vector times; for (int i = 0; i < kNumRepetitions; ++i) { int64_t start = GetTimeMillis(); int optimization_guard = LoadManifests(measure_command_evaluation); int delta = (int)(GetTimeMillis() - start); printf("%dms (hash: %x)\n", delta, optimization_guard); times.push_back(delta); } int min = *min_element(times.begin(), times.end()); int max = *max_element(times.begin(), times.end()); float total = accumulate(times.begin(), times.end(), 0.0f); printf("min %dms max %dms avg %.1fms\n", min, max, total / times.size()); } ninja-1.13.2/src/manifest_parser_test.cc000066400000000000000000000776431510764045400202430ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "manifest_parser.h" #include #include #include "graph.h" #include "state.h" #include "test.h" using namespace std; struct ParserTest : public testing::Test { void AssertParse(const char* input) { ManifestParser parser(&state, &fs_); string err; EXPECT_TRUE(parser.ParseTest(input, &err)); ASSERT_EQ("", err); VerifyGraph(state); } State state; VirtualFileSystem fs_; }; TEST_F(ParserTest, Empty) { ASSERT_NO_FATAL_FAILURE(AssertParse("")); } TEST_F(ParserTest, Rules) { ASSERT_NO_FATAL_FAILURE(AssertParse( "rule cat\n" " command = cat $in > $out\n" "\n" "rule date\n" " command = date > $out\n" "\n" "build result: cat in_1.cc in-2.O\n")); ASSERT_EQ(3u, state.bindings_.GetRules().size()); const auto& rule = state.bindings_.GetRules().begin()->second; EXPECT_EQ("cat", rule->name()); EXPECT_EQ("[cat ][$in][ > ][$out]", rule->GetBinding("command")->Serialize()); } TEST_F(ParserTest, RuleAttributes) { // Check that all of the allowed rule attributes are parsed ok. ASSERT_NO_FATAL_FAILURE(AssertParse( "rule cat\n" " command = a\n" " depfile = a\n" " deps = a\n" " description = a\n" " generator = a\n" " restat = a\n" " rspfile = a\n" " rspfile_content = a\n" )); } TEST_F(ParserTest, IgnoreIndentedComments) { ASSERT_NO_FATAL_FAILURE(AssertParse( " #indented comment\n" "rule cat\n" " command = cat $in > $out\n" " #generator = 1\n" " restat = 1 # comment\n" " #comment\n" "build result: cat in_1.cc in-2.O\n" " #comment\n")); ASSERT_EQ(2u, state.bindings_.GetRules().size()); const auto& rule = state.bindings_.GetRules().begin()->second; EXPECT_EQ("cat", rule->name()); Edge* edge = state.GetNode("result", 0)->in_edge(); EXPECT_TRUE(edge->GetBindingBool("restat")); EXPECT_FALSE(edge->GetBindingBool("generator")); } TEST_F(ParserTest, IgnoreIndentedBlankLines) { // the indented blanks used to cause parse errors ASSERT_NO_FATAL_FAILURE(AssertParse( " \n" "rule cat\n" " command = cat $in > $out\n" " \n" "build result: cat in_1.cc in-2.O\n" " \n" "variable=1\n")); // the variable must be in the top level environment EXPECT_EQ("1", state.bindings_.LookupVariable("variable")); } TEST_F(ParserTest, ResponseFiles) { ASSERT_NO_FATAL_FAILURE(AssertParse( "rule cat_rsp\n" " command = cat $rspfile > $out\n" " rspfile = $rspfile\n" " rspfile_content = $in\n" "\n" "build out: cat_rsp in\n" " rspfile=out.rsp\n")); ASSERT_EQ(2u, state.bindings_.GetRules().size()); const auto& rule = state.bindings_.GetRules().begin()->second; EXPECT_EQ("cat_rsp", rule->name()); EXPECT_EQ("[cat ][$rspfile][ > ][$out]", rule->GetBinding("command")->Serialize()); EXPECT_EQ("[$rspfile]", rule->GetBinding("rspfile")->Serialize()); EXPECT_EQ("[$in]", rule->GetBinding("rspfile_content")->Serialize()); } TEST_F(ParserTest, InNewline) { ASSERT_NO_FATAL_FAILURE(AssertParse( "rule cat_rsp\n" " command = cat $in_newline > $out\n" "\n" "build out: cat_rsp in in2\n" " rspfile=out.rsp\n")); ASSERT_EQ(2u, state.bindings_.GetRules().size()); const auto& rule = state.bindings_.GetRules().begin()->second; EXPECT_EQ("cat_rsp", rule->name()); EXPECT_EQ("[cat ][$in_newline][ > ][$out]", rule->GetBinding("command")->Serialize()); Edge* edge = state.edges_[0]; EXPECT_EQ("cat in\nin2 > out", edge->EvaluateCommand()); } TEST_F(ParserTest, Variables) { ASSERT_NO_FATAL_FAILURE(AssertParse( "l = one-letter-test\n" "rule link\n" " command = ld $l $extra $with_under -o $out $in\n" "\n" "extra = -pthread\n" "with_under = -under\n" "build a: link b c\n" "nested1 = 1\n" "nested2 = $nested1/2\n" "build supernested: link x\n" " extra = $nested2/3\n")); ASSERT_EQ(2u, state.edges_.size()); Edge* edge = state.edges_[0]; EXPECT_EQ("ld one-letter-test -pthread -under -o a b c", edge->EvaluateCommand()); EXPECT_EQ("1/2", state.bindings_.LookupVariable("nested2")); edge = state.edges_[1]; EXPECT_EQ("ld one-letter-test 1/2/3 -under -o supernested x", edge->EvaluateCommand()); } TEST_F(ParserTest, VariableScope) { ASSERT_NO_FATAL_FAILURE(AssertParse( "foo = bar\n" "rule cmd\n" " command = cmd $foo $in $out\n" "\n" "build inner: cmd a\n" " foo = baz\n" "build outer: cmd b\n" "\n" // Extra newline after build line tickles a regression. )); ASSERT_EQ(2u, state.edges_.size()); EXPECT_EQ("cmd baz a inner", state.edges_[0]->EvaluateCommand()); EXPECT_EQ("cmd bar b outer", state.edges_[1]->EvaluateCommand()); } TEST_F(ParserTest, Continuation) { ASSERT_NO_FATAL_FAILURE(AssertParse( "rule link\n" " command = foo bar $\n" " baz\n" "\n" "build a: link c $\n" " d e f\n")); ASSERT_EQ(2u, state.bindings_.GetRules().size()); const auto& rule = state.bindings_.GetRules().begin()->second; EXPECT_EQ("link", rule->name()); EXPECT_EQ("[foo bar baz]", rule->GetBinding("command")->Serialize()); } TEST_F(ParserTest, Backslash) { ASSERT_NO_FATAL_FAILURE(AssertParse( "foo = bar\\baz\n" "foo2 = bar\\ baz\n" )); EXPECT_EQ("bar\\baz", state.bindings_.LookupVariable("foo")); EXPECT_EQ("bar\\ baz", state.bindings_.LookupVariable("foo2")); } TEST_F(ParserTest, Comment) { ASSERT_NO_FATAL_FAILURE(AssertParse( "# this is a comment\n" "foo = not # a comment\n")); EXPECT_EQ("not # a comment", state.bindings_.LookupVariable("foo")); } TEST_F(ParserTest, Dollars) { ASSERT_NO_FATAL_FAILURE(AssertParse( "rule foo\n" " command = ${out}bar$$baz$$$\n" "blah\n" "x = $$dollar\n" "build $x: foo y\n" )); EXPECT_EQ("$dollar", state.bindings_.LookupVariable("x")); #ifdef _WIN32 EXPECT_EQ("$dollarbar$baz$blah", state.edges_[0]->EvaluateCommand()); #else EXPECT_EQ("'$dollar'bar$baz$blah", state.edges_[0]->EvaluateCommand()); #endif } TEST_F(ParserTest, EscapeSpaces) { ASSERT_NO_FATAL_FAILURE(AssertParse( "rule spaces\n" " command = something\n" "build foo$ bar: spaces $$one two$$$ three\n" )); EXPECT_TRUE(state.LookupNode("foo bar")); EXPECT_EQ(state.edges_[0]->outputs_[0]->path(), "foo bar"); EXPECT_EQ(state.edges_[0]->inputs_[0]->path(), "$one"); EXPECT_EQ(state.edges_[0]->inputs_[1]->path(), "two$ three"); EXPECT_EQ(state.edges_[0]->EvaluateCommand(), "something"); } TEST_F(ParserTest, CanonicalizeFile) { ASSERT_NO_FATAL_FAILURE(AssertParse( "rule cat\n" " command = cat $in > $out\n" "build out: cat in/1 in//2\n" "build in/1: cat\n" "build in/2: cat\n")); EXPECT_TRUE(state.LookupNode("in/1")); EXPECT_TRUE(state.LookupNode("in/2")); EXPECT_FALSE(state.LookupNode("in//1")); EXPECT_FALSE(state.LookupNode("in//2")); } #ifdef _WIN32 TEST_F(ParserTest, CanonicalizeFileBackslashes) { ASSERT_NO_FATAL_FAILURE(AssertParse( "rule cat\n" " command = cat $in > $out\n" "build out: cat in\\1 in\\\\2\n" "build in\\1: cat\n" "build in\\2: cat\n")); Node* node = state.LookupNode("in/1");; EXPECT_TRUE(node); EXPECT_EQ(1, node->slash_bits()); node = state.LookupNode("in/2"); EXPECT_TRUE(node); EXPECT_EQ(1, node->slash_bits()); EXPECT_FALSE(state.LookupNode("in//1")); EXPECT_FALSE(state.LookupNode("in//2")); } #endif TEST_F(ParserTest, PathVariables) { ASSERT_NO_FATAL_FAILURE(AssertParse( "rule cat\n" " command = cat $in > $out\n" "dir = out\n" "build $dir/exe: cat src\n")); EXPECT_FALSE(state.LookupNode("$dir/exe")); EXPECT_TRUE(state.LookupNode("out/exe")); } TEST_F(ParserTest, CanonicalizePaths) { ASSERT_NO_FATAL_FAILURE(AssertParse( "rule cat\n" " command = cat $in > $out\n" "build ./out.o: cat ./bar/baz/../foo.cc\n")); EXPECT_FALSE(state.LookupNode("./out.o")); EXPECT_TRUE(state.LookupNode("out.o")); EXPECT_FALSE(state.LookupNode("./bar/baz/../foo.cc")); EXPECT_TRUE(state.LookupNode("bar/foo.cc")); } #ifdef _WIN32 TEST_F(ParserTest, CanonicalizePathsBackslashes) { ASSERT_NO_FATAL_FAILURE(AssertParse( "rule cat\n" " command = cat $in > $out\n" "build ./out.o: cat ./bar/baz/../foo.cc\n" "build .\\out2.o: cat .\\bar/baz\\..\\foo.cc\n" "build .\\out3.o: cat .\\bar\\baz\\..\\foo3.cc\n" )); EXPECT_FALSE(state.LookupNode("./out.o")); EXPECT_FALSE(state.LookupNode(".\\out2.o")); EXPECT_FALSE(state.LookupNode(".\\out3.o")); EXPECT_TRUE(state.LookupNode("out.o")); EXPECT_TRUE(state.LookupNode("out2.o")); EXPECT_TRUE(state.LookupNode("out3.o")); EXPECT_FALSE(state.LookupNode("./bar/baz/../foo.cc")); EXPECT_FALSE(state.LookupNode(".\\bar/baz\\..\\foo.cc")); EXPECT_FALSE(state.LookupNode(".\\bar/baz\\..\\foo3.cc")); Node* node = state.LookupNode("bar/foo.cc"); EXPECT_TRUE(node); EXPECT_EQ(0, node->slash_bits()); node = state.LookupNode("bar/foo3.cc"); EXPECT_TRUE(node); EXPECT_EQ(1, node->slash_bits()); } #endif TEST_F(ParserTest, DuplicateEdgeWithMultipleOutputsError) { const char kInput[] = "rule cat\n" " command = cat $in > $out\n" "build out1 out2: cat in1\n" "build out1: cat in2\n" "build final: cat out1\n"; ManifestParser parser(&state, &fs_); string err; EXPECT_FALSE(parser.ParseTest(kInput, &err)); EXPECT_EQ("input:5: multiple rules generate out1\n", err); } TEST_F(ParserTest, DuplicateEdgeInIncludedFile) { fs_.Create("sub.ninja", "rule cat\n" " command = cat $in > $out\n" "build out1 out2: cat in1\n" "build out1: cat in2\n" "build final: cat out1\n"); const char kInput[] = "subninja sub.ninja\n"; ManifestParser parser(&state, &fs_); string err; EXPECT_FALSE(parser.ParseTest(kInput, &err)); EXPECT_EQ("sub.ninja:5: multiple rules generate out1\n", err); } TEST_F(ParserTest, PhonySelfReferenceIgnored) { ASSERT_NO_FATAL_FAILURE(AssertParse( "build a: phony a\n" )); Node* node = state.LookupNode("a"); Edge* edge = node->in_edge(); ASSERT_TRUE(edge->inputs_.empty()); } TEST_F(ParserTest, PhonySelfReferenceKept) { const char kInput[] = "build a: phony a\n"; ManifestParserOptions parser_opts; parser_opts.phony_cycle_action_ = kPhonyCycleActionError; ManifestParser parser(&state, &fs_, parser_opts); string err; EXPECT_TRUE(parser.ParseTest(kInput, &err)); EXPECT_EQ("", err); Node* node = state.LookupNode("a"); Edge* edge = node->in_edge(); ASSERT_EQ(edge->inputs_.size(), size_t(1)); ASSERT_EQ(edge->inputs_[0], node); } TEST_F(ParserTest, ReservedWords) { ASSERT_NO_FATAL_FAILURE(AssertParse( "rule build\n" " command = rule run $out\n" "build subninja: build include default foo.cc\n" "default subninja\n")); } TEST_F(ParserTest, Errors) { { State local_state; ManifestParser parser(&local_state, NULL); string err; EXPECT_FALSE(parser.ParseTest(string("subn", 4), &err)); EXPECT_EQ("input:1: expected '=', got eof\n" "subn\n" " ^ near here" , err); } { State local_state; ManifestParser parser(&local_state, NULL); string err; EXPECT_FALSE(parser.ParseTest("foobar", &err)); EXPECT_EQ("input:1: expected '=', got eof\n" "foobar\n" " ^ near here" , err); } { State local_state; ManifestParser parser(&local_state, NULL); string err; EXPECT_FALSE(parser.ParseTest("x 3", &err)); EXPECT_EQ("input:1: expected '=', got identifier\n" "x 3\n" " ^ near here" , err); } { State local_state; ManifestParser parser(&local_state, NULL); string err; EXPECT_FALSE(parser.ParseTest("x = 3", &err)); EXPECT_EQ("input:1: unexpected EOF\n" "x = 3\n" " ^ near here" , err); } { State local_state; ManifestParser parser(&local_state, NULL); string err; EXPECT_FALSE(parser.ParseTest("x = 3\ny 2", &err)); EXPECT_EQ("input:2: expected '=', got identifier\n" "y 2\n" " ^ near here" , err); } { State local_state; ManifestParser parser(&local_state, NULL); string err; EXPECT_FALSE(parser.ParseTest("x = $", &err)); EXPECT_EQ("input:1: bad $-escape (literal $ must be written as $$)\n" "x = $\n" " ^ near here" , err); } { State local_state; ManifestParser parser(&local_state, NULL); string err; EXPECT_FALSE(parser.ParseTest("x = $\n $[\n", &err)); EXPECT_EQ("input:2: bad $-escape (literal $ must be written as $$)\n" " $[\n" " ^ near here" , err); } { State local_state; ManifestParser parser(&local_state, NULL); string err; EXPECT_FALSE(parser.ParseTest("x = a$\n b$\n $\n", &err)); EXPECT_EQ("input:4: unexpected EOF\n" , err); } { State local_state; ManifestParser parser(&local_state, NULL); string err; EXPECT_FALSE(parser.ParseTest("build\n", &err)); EXPECT_EQ("input:1: expected path\n" "build\n" " ^ near here" , err); } { State local_state; ManifestParser parser(&local_state, NULL); string err; EXPECT_FALSE(parser.ParseTest("build x: y z\n", &err)); EXPECT_EQ("input:1: unknown build rule 'y'\n" "build x: y z\n" " ^ near here" , err); } { State local_state; ManifestParser parser(&local_state, NULL); string err; EXPECT_FALSE(parser.ParseTest("build x:: y z\n", &err)); EXPECT_EQ("input:1: expected build command name\n" "build x:: y z\n" " ^ near here" , err); } { State local_state; ManifestParser parser(&local_state, NULL); string err; EXPECT_FALSE(parser.ParseTest("rule cat\n command = cat ok\n" "build x: cat $\n :\n", &err)); EXPECT_EQ("input:4: expected newline, got ':'\n" " :\n" " ^ near here" , err); } { State local_state; ManifestParser parser(&local_state, NULL); string err; EXPECT_FALSE(parser.ParseTest("rule cat\n", &err)); EXPECT_EQ("input:2: expected 'command =' line\n", err); } { State local_state; ManifestParser parser(&local_state, NULL); string err; EXPECT_FALSE(parser.ParseTest("rule cat\n" " command = echo\n" "rule cat\n" " command = echo\n", &err)); EXPECT_EQ("input:3: duplicate rule 'cat'\n" "rule cat\n" " ^ near here" , err); } { State local_state; ManifestParser parser(&local_state, NULL); string err; EXPECT_FALSE(parser.ParseTest("rule cat\n" " command = echo\n" " rspfile = cat.rsp\n", &err)); EXPECT_EQ( "input:4: rspfile and rspfile_content need to be both specified\n", err); } { State local_state; ManifestParser parser(&local_state, NULL); string err; EXPECT_FALSE(parser.ParseTest("rule cat\n" " command = ${fafsd\n" "foo = bar\n", &err)); EXPECT_EQ("input:2: bad $-escape (literal $ must be written as $$)\n" " command = ${fafsd\n" " ^ near here" , err); } { State local_state; ManifestParser parser(&local_state, NULL); string err; EXPECT_FALSE(parser.ParseTest("rule cat\n" " command = cat\n" "build $.: cat foo\n", &err)); EXPECT_EQ("input:3: bad $-escape (literal $ must be written as $$)\n" "build $.: cat foo\n" " ^ near here" , err); } { State local_state; ManifestParser parser(&local_state, NULL); string err; EXPECT_FALSE(parser.ParseTest("rule cat\n" " command = cat\n" "build $: cat foo\n", &err)); EXPECT_EQ("input:3: expected ':', got newline ($ also escapes ':')\n" "build $: cat foo\n" " ^ near here" , err); } { State local_state; ManifestParser parser(&local_state, NULL); string err; EXPECT_FALSE(parser.ParseTest("rule %foo\n", &err)); EXPECT_EQ("input:1: expected rule name\n" "rule %foo\n" " ^ near here", err); } { State local_state; ManifestParser parser(&local_state, NULL); string err; EXPECT_FALSE(parser.ParseTest("rule cc\n" " command = foo\n" " othervar = bar\n", &err)); EXPECT_EQ("input:3: unexpected variable 'othervar'\n" " othervar = bar\n" " ^ near here" , err); } { State local_state; ManifestParser parser(&local_state, NULL); string err; EXPECT_FALSE(parser.ParseTest("rule cc\n command = foo\n" "build $.: cc bar.cc\n", &err)); EXPECT_EQ("input:3: bad $-escape (literal $ must be written as $$)\n" "build $.: cc bar.cc\n" " ^ near here" , err); } { State local_state; ManifestParser parser(&local_state, NULL); string err; EXPECT_FALSE(parser.ParseTest("rule cc\n command = foo\n && bar", &err)); EXPECT_EQ("input:3: expected variable name\n" " && bar\n" " ^ near here", err); } { State local_state; ManifestParser parser(&local_state, NULL); string err; EXPECT_FALSE(parser.ParseTest("rule cc\n command = foo\n" "build $: cc bar.cc\n", &err)); EXPECT_EQ("input:3: expected ':', got newline ($ also escapes ':')\n" "build $: cc bar.cc\n" " ^ near here" , err); } { State local_state; ManifestParser parser(&local_state, NULL); string err; EXPECT_FALSE(parser.ParseTest("default\n", &err)); EXPECT_EQ("input:1: expected target name\n" "default\n" " ^ near here" , err); } { State local_state; ManifestParser parser(&local_state, NULL); string err; EXPECT_FALSE(parser.ParseTest("default nonexistent\n", &err)); EXPECT_EQ("input:1: unknown target 'nonexistent'\n" "default nonexistent\n" " ^ near here" , err); } { State local_state; ManifestParser parser(&local_state, NULL); string err; EXPECT_FALSE(parser.ParseTest("rule r\n command = r\n" "build b: r\n" "default b:\n", &err)); EXPECT_EQ("input:4: expected newline, got ':'\n" "default b:\n" " ^ near here" , err); } { State local_state; ManifestParser parser(&local_state, NULL); string err; EXPECT_FALSE(parser.ParseTest("default $a\n", &err)); EXPECT_EQ("input:1: empty path\n" "default $a\n" " ^ near here" , err); } { State local_state; ManifestParser parser(&local_state, NULL); string err; EXPECT_FALSE(parser.ParseTest("rule r\n" " command = r\n" "build $a: r $c\n", &err)); // XXX the line number is wrong; we should evaluate paths in ParseEdge // as we see them, not after we've read them all! EXPECT_EQ("input:4: empty path\n", err); } { State local_state; ManifestParser parser(&local_state, NULL); string err; // the indented blank line must terminate the rule // this also verifies that "unexpected (token)" errors are correct EXPECT_FALSE(parser.ParseTest("rule r\n" " command = r\n" " \n" " generator = 1\n", &err)); EXPECT_EQ("input:4: unexpected indent\n", err); } { State local_state; ManifestParser parser(&local_state, NULL); string err; EXPECT_FALSE(parser.ParseTest("pool\n", &err)); EXPECT_EQ("input:1: expected pool name\n" "pool\n" " ^ near here", err); } { State local_state; ManifestParser parser(&local_state, NULL); string err; EXPECT_FALSE(parser.ParseTest("pool foo\n", &err)); EXPECT_EQ("input:2: expected 'depth =' line\n", err); } { State local_state; ManifestParser parser(&local_state, NULL); string err; EXPECT_FALSE(parser.ParseTest("pool foo\n" " depth = 4\n" "pool foo\n", &err)); EXPECT_EQ("input:3: duplicate pool 'foo'\n" "pool foo\n" " ^ near here" , err); } { State local_state; ManifestParser parser(&local_state, NULL); string err; EXPECT_FALSE(parser.ParseTest("pool foo\n" " depth = -1\n", &err)); EXPECT_EQ("input:2: invalid pool depth\n" " depth = -1\n" " ^ near here" , err); } { State local_state; ManifestParser parser(&local_state, NULL); string err; EXPECT_FALSE(parser.ParseTest("pool foo\n" " bar = 1\n", &err)); EXPECT_EQ("input:2: unexpected variable 'bar'\n" " bar = 1\n" " ^ near here" , err); } { State local_state; ManifestParser parser(&local_state, NULL); string err; // Pool names are dereferenced at edge parsing time. EXPECT_FALSE(parser.ParseTest("rule run\n" " command = echo\n" " pool = unnamed_pool\n" "build out: run in\n", &err)); EXPECT_EQ("input:5: unknown pool name 'unnamed_pool'\n", err); } } TEST_F(ParserTest, MissingInput) { State local_state; ManifestParser parser(&local_state, &fs_); string err; EXPECT_FALSE(parser.Load("build.ninja", &err)); EXPECT_EQ("loading 'build.ninja': No such file or directory", err); } TEST_F(ParserTest, MultipleOutputs) { State local_state; ManifestParser parser(&local_state, NULL); string err; EXPECT_TRUE(parser.ParseTest("rule cc\n command = foo\n depfile = bar\n" "build a.o b.o: cc c.cc\n", &err)); EXPECT_EQ("", err); } TEST_F(ParserTest, MultipleOutputsWithDeps) { State local_state; ManifestParser parser(&local_state, NULL); string err; EXPECT_TRUE(parser.ParseTest("rule cc\n command = foo\n deps = gcc\n" "build a.o b.o: cc c.cc\n", &err)); EXPECT_EQ("", err); } TEST_F(ParserTest, SubNinja) { fs_.Create("test.ninja", "var = inner\n" "build $builddir/inner: varref\n"); ASSERT_NO_FATAL_FAILURE(AssertParse( "builddir = some_dir/\n" "rule varref\n" " command = varref $var\n" "var = outer\n" "build $builddir/outer: varref\n" "subninja test.ninja\n" "build $builddir/outer2: varref\n")); ASSERT_EQ(1u, fs_.files_read_.size()); EXPECT_EQ("test.ninja", fs_.files_read_[0]); EXPECT_TRUE(state.LookupNode("some_dir/outer")); // Verify our builddir setting is inherited. EXPECT_TRUE(state.LookupNode("some_dir/inner")); ASSERT_EQ(3u, state.edges_.size()); EXPECT_EQ("varref outer", state.edges_[0]->EvaluateCommand()); EXPECT_EQ("varref inner", state.edges_[1]->EvaluateCommand()); EXPECT_EQ("varref outer", state.edges_[2]->EvaluateCommand()); } TEST_F(ParserTest, MissingSubNinja) { ManifestParser parser(&state, &fs_); string err; EXPECT_FALSE(parser.ParseTest("subninja foo.ninja\n", &err)); EXPECT_EQ("input:1: loading 'foo.ninja': No such file or directory\n" "subninja foo.ninja\n" " ^ near here" , err); } TEST_F(ParserTest, DuplicateRuleInDifferentSubninjas) { // Test that rules are scoped to subninjas. fs_.Create("test.ninja", "rule cat\n" " command = cat\n"); ManifestParser parser(&state, &fs_); string err; EXPECT_TRUE(parser.ParseTest("rule cat\n" " command = cat\n" "subninja test.ninja\n", &err)); } TEST_F(ParserTest, DuplicateRuleInDifferentSubninjasWithInclude) { // Test that rules are scoped to subninjas even with includes. fs_.Create("rules.ninja", "rule cat\n" " command = cat\n"); fs_.Create("test.ninja", "include rules.ninja\n" "build x : cat\n"); ManifestParser parser(&state, &fs_); string err; EXPECT_TRUE(parser.ParseTest("include rules.ninja\n" "subninja test.ninja\n" "build y : cat\n", &err)); } TEST_F(ParserTest, Include) { fs_.Create("include.ninja", "var = inner\n"); ASSERT_NO_FATAL_FAILURE(AssertParse( "var = outer\n" "include include.ninja\n")); ASSERT_EQ(1u, fs_.files_read_.size()); EXPECT_EQ("include.ninja", fs_.files_read_[0]); EXPECT_EQ("inner", state.bindings_.LookupVariable("var")); } TEST_F(ParserTest, BrokenInclude) { fs_.Create("include.ninja", "build\n"); ManifestParser parser(&state, &fs_); string err; EXPECT_FALSE(parser.ParseTest("include include.ninja\n", &err)); EXPECT_EQ("include.ninja:1: expected path\n" "build\n" " ^ near here" , err); } TEST_F(ParserTest, Implicit) { ASSERT_NO_FATAL_FAILURE(AssertParse( "rule cat\n" " command = cat $in > $out\n" "build foo: cat bar | baz\n")); Edge* edge = state.LookupNode("foo")->in_edge(); ASSERT_TRUE(edge->is_implicit(1)); } TEST_F(ParserTest, OrderOnly) { ASSERT_NO_FATAL_FAILURE(AssertParse( "rule cat\n command = cat $in > $out\n" "build foo: cat bar || baz\n")); Edge* edge = state.LookupNode("foo")->in_edge(); ASSERT_TRUE(edge->is_order_only(1)); } TEST_F(ParserTest, Validations) { ASSERT_NO_FATAL_FAILURE(AssertParse( "rule cat\n command = cat $in > $out\n" "build foo: cat bar |@ baz\n")); Edge* edge = state.LookupNode("foo")->in_edge(); ASSERT_EQ(edge->validations_.size(), size_t(1)); EXPECT_EQ(edge->validations_[0]->path(), "baz"); } TEST_F(ParserTest, ImplicitOutput) { ASSERT_NO_FATAL_FAILURE(AssertParse( "rule cat\n" " command = cat $in > $out\n" "build foo | imp: cat bar\n")); Edge* edge = state.LookupNode("imp")->in_edge(); ASSERT_EQ(edge->outputs_.size(), size_t(2)); EXPECT_TRUE(edge->is_implicit_out(1)); } TEST_F(ParserTest, ImplicitOutputEmpty) { ASSERT_NO_FATAL_FAILURE(AssertParse( "rule cat\n" " command = cat $in > $out\n" "build foo | : cat bar\n")); Edge* edge = state.LookupNode("foo")->in_edge(); ASSERT_EQ(edge->outputs_.size(), size_t(1)); EXPECT_FALSE(edge->is_implicit_out(0)); } TEST_F(ParserTest, ImplicitOutputDupeError) { const char kInput[] = "rule cat\n" " command = cat $in > $out\n" "build foo baz | foo baq foo: cat bar\n"; ManifestParser parser(&state, &fs_); string err; EXPECT_FALSE(parser.ParseTest(kInput, &err)); EXPECT_EQ("input:4: foo is defined as an output multiple times\n", err); } TEST_F(ParserTest, ImplicitOutputDupesError) { const char kInput[] = "rule cat\n" " command = cat $in > $out\n" "build foo foo foo | foo foo foo foo: cat bar\n"; ManifestParser parser(&state, &fs_); string err; EXPECT_FALSE(parser.ParseTest(kInput, &err)); EXPECT_EQ("input:4: foo is defined as an output multiple times\n", err); } TEST_F(ParserTest, NoExplicitOutput) { ManifestParser parser(&state, NULL); string err; EXPECT_TRUE(parser.ParseTest( "rule cat\n" " command = cat $in > $out\n" "build | imp : cat bar\n", &err)); } TEST_F(ParserTest, DefaultDefault) { ASSERT_NO_FATAL_FAILURE(AssertParse( "rule cat\n command = cat $in > $out\n" "build a: cat foo\n" "build b: cat foo\n" "build c: cat foo\n" "build d: cat foo\n")); string err; EXPECT_EQ(4u, state.DefaultNodes(&err).size()); EXPECT_EQ("", err); } TEST_F(ParserTest, DefaultDefaultCycle) { ASSERT_NO_FATAL_FAILURE(AssertParse( "rule cat\n command = cat $in > $out\n" "build a: cat a\n")); string err; EXPECT_EQ(0u, state.DefaultNodes(&err).size()); EXPECT_EQ("could not determine root nodes of build graph", err); } TEST_F(ParserTest, DefaultStatements) { ASSERT_NO_FATAL_FAILURE(AssertParse( "rule cat\n command = cat $in > $out\n" "build a: cat foo\n" "build b: cat foo\n" "build c: cat foo\n" "build d: cat foo\n" "third = c\n" "default a b\n" "default $third\n")); string err; vector nodes = state.DefaultNodes(&err); EXPECT_EQ("", err); ASSERT_EQ(3u, nodes.size()); EXPECT_EQ("a", nodes[0]->path()); EXPECT_EQ("b", nodes[1]->path()); EXPECT_EQ("c", nodes[2]->path()); } TEST_F(ParserTest, UTF8) { ASSERT_NO_FATAL_FAILURE(AssertParse( "rule utf8\n" " command = true\n" " description = compilaci\xC3\xB3\n")); } TEST_F(ParserTest, CRLF) { State local_state; ManifestParser parser(&local_state, NULL); string err; EXPECT_TRUE(parser.ParseTest("# comment with crlf\r\n", &err)); EXPECT_TRUE(parser.ParseTest("foo = foo\nbar = bar\r\n", &err)); EXPECT_TRUE(parser.ParseTest( "pool link_pool\r\n" " depth = 15\r\n\r\n" "rule xyz\r\n" " command = something$expand \r\n" " description = YAY!\r\n", &err)); } TEST_F(ParserTest, DyndepNotSpecified) { ASSERT_NO_FATAL_FAILURE(AssertParse( "rule cat\n" " command = cat $in > $out\n" "build result: cat in\n")); Edge* edge = state.GetNode("result", 0)->in_edge(); ASSERT_FALSE(edge->dyndep_); } TEST_F(ParserTest, DyndepNotInput) { State lstate; ManifestParser parser(&lstate, NULL); string err; EXPECT_FALSE(parser.ParseTest( "rule touch\n" " command = touch $out\n" "build result: touch\n" " dyndep = notin\n", &err)); EXPECT_EQ("input:5: dyndep 'notin' is not an input\n", err); } TEST_F(ParserTest, DyndepExplicitInput) { ASSERT_NO_FATAL_FAILURE(AssertParse( "rule cat\n" " command = cat $in > $out\n" "build result: cat in\n" " dyndep = in\n")); Edge* edge = state.GetNode("result", 0)->in_edge(); ASSERT_TRUE(edge->dyndep_); EXPECT_TRUE(edge->dyndep_->dyndep_pending()); EXPECT_EQ(edge->dyndep_->path(), "in"); } TEST_F(ParserTest, DyndepImplicitInput) { ASSERT_NO_FATAL_FAILURE(AssertParse( "rule cat\n" " command = cat $in > $out\n" "build result: cat in | dd\n" " dyndep = dd\n")); Edge* edge = state.GetNode("result", 0)->in_edge(); ASSERT_TRUE(edge->dyndep_); EXPECT_TRUE(edge->dyndep_->dyndep_pending()); EXPECT_EQ(edge->dyndep_->path(), "dd"); } TEST_F(ParserTest, DyndepOrderOnlyInput) { ASSERT_NO_FATAL_FAILURE(AssertParse( "rule cat\n" " command = cat $in > $out\n" "build result: cat in || dd\n" " dyndep = dd\n")); Edge* edge = state.GetNode("result", 0)->in_edge(); ASSERT_TRUE(edge->dyndep_); EXPECT_TRUE(edge->dyndep_->dyndep_pending()); EXPECT_EQ(edge->dyndep_->path(), "dd"); } TEST_F(ParserTest, DyndepRuleInput) { ASSERT_NO_FATAL_FAILURE(AssertParse( "rule cat\n" " command = cat $in > $out\n" " dyndep = $in\n" "build result: cat in\n")); Edge* edge = state.GetNode("result", 0)->in_edge(); ASSERT_TRUE(edge->dyndep_); EXPECT_TRUE(edge->dyndep_->dyndep_pending()); EXPECT_EQ(edge->dyndep_->path(), "in"); } ninja-1.13.2/src/metrics.cc000066400000000000000000000060741510764045400154560ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "metrics.h" #include #include #include #include #include #include "util.h" using namespace std; Metrics* g_metrics = NULL; namespace { /// Compute a platform-specific high-res timer value that fits into an int64. int64_t HighResTimer() { auto now = chrono::steady_clock::now(); return chrono::duration_cast( now.time_since_epoch()) .count(); } int64_t TimerToMicros(int64_t dt) { // dt is in ticks. We want microseconds. return chrono::duration_cast( std::chrono::steady_clock::duration{ dt }) .count(); } int64_t TimerToMicros(double dt) { // dt is in ticks. We want microseconds. using DoubleSteadyClock = std::chrono::duration; return chrono::duration_cast(DoubleSteadyClock{ dt }) .count(); } } // anonymous namespace ScopedMetric::ScopedMetric(Metric* metric) { metric_ = metric; if (!metric_) return; start_ = HighResTimer(); } ScopedMetric::~ScopedMetric() { if (!metric_) return; metric_->count++; // Leave in the timer's natural frequency to avoid paying the conversion cost // on every measurement. int64_t dt = HighResTimer() - start_; metric_->sum += dt; } Metric* Metrics::NewMetric(const string& name) { Metric* metric = new Metric; metric->name = name; metric->count = 0; metric->sum = 0; metrics_.push_back(metric); return metric; } void Metrics::Report() { int width = 0; for (vector::iterator i = metrics_.begin(); i != metrics_.end(); ++i) { width = max((int)(*i)->name.size(), width); } printf("%-*s\t%-6s\t%-9s\t%s\n", width, "metric", "count", "avg (us)", "total (ms)"); for (vector::iterator i = metrics_.begin(); i != metrics_.end(); ++i) { Metric* metric = *i; uint64_t micros = TimerToMicros(metric->sum); double total = micros / (double)1000; double avg = micros / (double)metric->count; printf("%-*s\t%-6d\t%-8.1f\t%.1f\n", width, metric->name.c_str(), metric->count, avg, total); } } double Stopwatch::Elapsed() const { // Convert to micros after converting to double to minimize error. return 1e-6 * TimerToMicros(static_cast(NowRaw() - started_)); } uint64_t Stopwatch::NowRaw() const { return HighResTimer(); } int64_t GetTimeMillis() { return TimerToMicros(HighResTimer()) / 1000; } ninja-1.13.2/src/metrics.h000066400000000000000000000057521510764045400153220ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef NINJA_METRICS_H_ #define NINJA_METRICS_H_ #include #include #include "util.h" // For int64_t. /// The Metrics module is used for the debug mode that dumps timing stats of /// various actions. To use, see METRIC_RECORD below. /// A single metrics we're tracking, like "depfile load time". struct Metric { std::string name; /// Number of times we've hit the code path. int count; /// Total time (in platform-dependent units) we've spent on the code path. int64_t sum; }; /// A scoped object for recording a metric across the body of a function. /// Used by the METRIC_RECORD macro. struct ScopedMetric { explicit ScopedMetric(Metric* metric); ~ScopedMetric(); private: Metric* metric_; /// Timestamp when the measurement started. /// Value is platform-dependent. int64_t start_; }; /// The singleton that stores metrics and prints the report. struct Metrics { Metric* NewMetric(const std::string& name); /// Print a summary report to stdout. void Report(); private: std::vector metrics_; }; /// Get the current time as relative to some epoch. /// Epoch varies between platforms; only useful for measuring elapsed time. int64_t GetTimeMillis(); /// A simple stopwatch which returns the time /// in seconds since Restart() was called. struct Stopwatch { public: Stopwatch() : started_(0) {} /// Seconds since Restart() call. double Elapsed() const; void Restart() { started_ = NowRaw(); } private: uint64_t started_; // Return the current time using the native frequency of the high resolution // timer. uint64_t NowRaw() const; }; /// The primary interface to metrics. Use METRIC_RECORD("foobar") at the top /// of a function to get timing stats recorded for each call of the function. #define METRIC_RECORD(name) \ static Metric* metrics_h_metric = \ g_metrics ? g_metrics->NewMetric(name) : NULL; \ ScopedMetric metrics_h_scoped(metrics_h_metric); /// A variant of METRIC_RECORD that doesn't record anything if |condition| /// is false. #define METRIC_RECORD_IF(name, condition) \ static Metric* metrics_h_metric = \ g_metrics ? g_metrics->NewMetric(name) : NULL; \ ScopedMetric metrics_h_scoped((condition) ? metrics_h_metric : NULL); extern Metrics* g_metrics; #endif // NINJA_METRICS_H_ ninja-1.13.2/src/minidump-win32.cc000066400000000000000000000055651510764045400165760ustar00rootroot00000000000000// Copyright 2012 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifdef _MSC_VER #include #include #include "util.h" using namespace std; typedef BOOL (WINAPI *MiniDumpWriteDumpFunc) ( IN HANDLE, IN DWORD, IN HANDLE, IN MINIDUMP_TYPE, IN CONST PMINIDUMP_EXCEPTION_INFORMATION, OPTIONAL IN CONST PMINIDUMP_USER_STREAM_INFORMATION, OPTIONAL IN CONST PMINIDUMP_CALLBACK_INFORMATION OPTIONAL ); /// Creates a windows minidump in temp folder. void CreateWin32MiniDump(_EXCEPTION_POINTERS* pep) { char temp_path[MAX_PATH]; GetTempPathA(sizeof(temp_path), temp_path); char temp_file[MAX_PATH]; sprintf(temp_file, "%s\\ninja_crash_dump_%lu.dmp", temp_path, GetCurrentProcessId()); // Delete any previous minidump of the same name. DeleteFileA(temp_file); // Load DbgHelp.dll dynamically, as library is not present on all // Windows versions. HMODULE dbghelp = LoadLibraryA("dbghelp.dll"); if (dbghelp == NULL) { Error("failed to create minidump: LoadLibrary('dbghelp.dll'): %s", GetLastErrorString().c_str()); return; } MiniDumpWriteDumpFunc mini_dump_write_dump = FunctionCast (GetProcAddress(dbghelp, "MiniDumpWriteDump")); if (mini_dump_write_dump == NULL) { Error("failed to create minidump: GetProcAddress('MiniDumpWriteDump'): %s", GetLastErrorString().c_str()); return; } HANDLE hFile = CreateFileA(temp_file, GENERIC_READ | GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL); if (hFile == NULL) { Error("failed to create minidump: CreateFileA(%s): %s", temp_file, GetLastErrorString().c_str()); return; } MINIDUMP_EXCEPTION_INFORMATION mdei; mdei.ThreadId = GetCurrentThreadId(); mdei.ExceptionPointers = pep; mdei.ClientPointers = FALSE; MINIDUMP_TYPE mdt = (MINIDUMP_TYPE) (MiniDumpWithDataSegs | MiniDumpWithHandleData); BOOL rv = mini_dump_write_dump(GetCurrentProcess(), GetCurrentProcessId(), hFile, mdt, (pep != 0) ? &mdei : 0, 0, 0); CloseHandle(hFile); if (!rv) { Error("MiniDumpWriteDump failed: %s", GetLastErrorString().c_str()); return; } Warning("minidump created: %s", temp_file); } #endif // _MSC_VER ninja-1.13.2/src/missing_deps.cc000066400000000000000000000157011510764045400164710ustar00rootroot00000000000000// Copyright 2019 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "missing_deps.h" #include #include #include "depfile_parser.h" #include "deps_log.h" #include "disk_interface.h" #include "graph.h" #include "state.h" #include "util.h" namespace { /// ImplicitDepLoader variant that stores dep nodes into the given output /// without updating graph deps like the base loader does. struct NodeStoringImplicitDepLoader : public ImplicitDepLoader { NodeStoringImplicitDepLoader( State* state, DepsLog* deps_log, DiskInterface* disk_interface, DepfileParserOptions const* depfile_parser_options, Explanations* explanations, std::vector* dep_nodes_output) : ImplicitDepLoader(state, deps_log, disk_interface, depfile_parser_options, explanations), dep_nodes_output_(dep_nodes_output) {} protected: virtual bool ProcessDepfileDeps(Edge* edge, std::vector* depfile_ins, std::string* err); private: std::vector* dep_nodes_output_; }; bool NodeStoringImplicitDepLoader::ProcessDepfileDeps( Edge* edge, std::vector* depfile_ins, std::string* err) { for (std::vector::iterator i = depfile_ins->begin(); i != depfile_ins->end(); ++i) { uint64_t slash_bits; CanonicalizePath(const_cast(i->str_), &i->len_, &slash_bits); Node* node = state_->GetNode(*i, slash_bits); dep_nodes_output_->push_back(node); } return true; } } // namespace MissingDependencyScannerDelegate::~MissingDependencyScannerDelegate() {} void MissingDependencyPrinter::OnMissingDep(Node* node, const std::string& path, const Rule& generator) { std::cout << "Missing dep: " << node->path() << " uses " << path << " (generated by " << generator.name() << ")\n"; } MissingDependencyScanner::MissingDependencyScanner( MissingDependencyScannerDelegate* delegate, DepsLog* deps_log, State* state, DiskInterface* disk_interface) : delegate_(delegate), deps_log_(deps_log), state_(state), disk_interface_(disk_interface), missing_dep_path_count_(0) {} void MissingDependencyScanner::ProcessNode(Node* node) { if (!node) return; Edge* edge = node->in_edge(); if (!edge) return; if (!seen_.insert(node).second) return; for (std::vector::iterator in = edge->inputs_.begin(); in != edge->inputs_.end(); ++in) { ProcessNode(*in); } std::string deps_type = edge->GetBinding("deps"); if (!deps_type.empty()) { DepsLog::Deps* deps = deps_log_->GetDeps(node); if (deps) ProcessNodeDeps(node, deps->nodes, deps->node_count); } else { DepfileParserOptions parser_opts; std::vector depfile_deps; NodeStoringImplicitDepLoader dep_loader(state_, deps_log_, disk_interface_, &parser_opts, nullptr, &depfile_deps); std::string err; dep_loader.LoadDeps(edge, &err); if (!depfile_deps.empty()) ProcessNodeDeps(node, &depfile_deps[0], static_cast(depfile_deps.size())); } } void MissingDependencyScanner::ProcessNodeDeps(Node* node, Node** dep_nodes, int dep_nodes_count) { Edge* edge = node->in_edge(); std::set deplog_edges; for (int i = 0; i < dep_nodes_count; ++i) { Node* deplog_node = dep_nodes[i]; // Special exception: A dep on build.ninja can be used to mean "always // rebuild this target when the build is reconfigured", but build.ninja is // often generated by a configuration tool like cmake or gn. The rest of // the build "implicitly" depends on the entire build being reconfigured, // so a missing dep path to build.ninja is not an actual missing dependency // problem. if (deplog_node->path() == "build.ninja") return; Edge* deplog_edge = deplog_node->in_edge(); if (deplog_edge) { deplog_edges.insert(deplog_edge); } } std::vector missing_deps; for (std::set::iterator de = deplog_edges.begin(); de != deplog_edges.end(); ++de) { if (!PathExistsBetween(*de, edge)) { missing_deps.push_back(*de); } } if (!missing_deps.empty()) { std::set missing_deps_rule_names; for (std::vector::iterator ne = missing_deps.begin(); ne != missing_deps.end(); ++ne) { for (int i = 0; i < dep_nodes_count; ++i) { if (dep_nodes[i]->in_edge() == *ne) { generated_nodes_.insert(dep_nodes[i]); generator_rules_.insert(&(*ne)->rule()); missing_deps_rule_names.insert((*ne)->rule().name()); delegate_->OnMissingDep(node, dep_nodes[i]->path(), (*ne)->rule()); } } } missing_dep_path_count_ += missing_deps_rule_names.size(); nodes_missing_deps_.insert(node); } } void MissingDependencyScanner::PrintStats() { std::cout << "Processed " << seen_.size() << " nodes.\n"; if (HadMissingDeps()) { std::cout << "Error: There are " << missing_dep_path_count_ << " missing dependency paths.\n"; std::cout << nodes_missing_deps_.size() << " targets had depfile dependencies on " << generated_nodes_.size() << " distinct generated inputs " << "(from " << generator_rules_.size() << " rules) " << " without a non-depfile dep path to the generator.\n"; std::cout << "There might be build flakiness if any of the targets listed " "above are built alone, or not late enough, in a clean output " "directory.\n"; } else { std::cout << "No missing dependencies on generated files found.\n"; } } bool MissingDependencyScanner::PathExistsBetween(Edge* from, Edge* to) { AdjacencyMap::iterator it = adjacency_map_.find(from); if (it != adjacency_map_.end()) { InnerAdjacencyMap::iterator inner_it = it->second.find(to); if (inner_it != it->second.end()) { return inner_it->second; } } else { it = adjacency_map_.insert(std::make_pair(from, InnerAdjacencyMap())).first; } bool found = false; for (size_t i = 0; i < to->inputs_.size(); ++i) { Edge* e = to->inputs_[i]->in_edge(); if (e && (e == from || PathExistsBetween(from, e))) { found = true; break; } } it->second.insert(std::make_pair(to, found)); return found; } ninja-1.13.2/src/missing_deps.h000066400000000000000000000044701510764045400163340ustar00rootroot00000000000000// Copyright 2019 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef NINJA_MISSING_DEPS_H_ #define NINJA_MISSING_DEPS_H_ #include #include #include #include struct DepsLog; struct DiskInterface; struct Edge; struct Node; struct Rule; struct State; class MissingDependencyScannerDelegate { public: virtual ~MissingDependencyScannerDelegate(); virtual void OnMissingDep(Node* node, const std::string& path, const Rule& generator) = 0; }; class MissingDependencyPrinter : public MissingDependencyScannerDelegate { void OnMissingDep(Node* node, const std::string& path, const Rule& generator); void OnStats(int nodes_processed, int nodes_missing_deps, int missing_dep_path_count, int generated_nodes, int generator_rules); }; struct MissingDependencyScanner { public: MissingDependencyScanner(MissingDependencyScannerDelegate* delegate, DepsLog* deps_log, State* state, DiskInterface* disk_interface); void ProcessNode(Node* node); void PrintStats(); bool HadMissingDeps() { return !nodes_missing_deps_.empty(); } void ProcessNodeDeps(Node* node, Node** dep_nodes, int dep_nodes_count); bool PathExistsBetween(Edge* from, Edge* to); MissingDependencyScannerDelegate* delegate_; DepsLog* deps_log_; State* state_; DiskInterface* disk_interface_; std::set seen_; std::set nodes_missing_deps_; std::set generated_nodes_; std::set generator_rules_; int missing_dep_path_count_; private: using InnerAdjacencyMap = std::unordered_map; using AdjacencyMap = std::unordered_map; AdjacencyMap adjacency_map_; }; #endif // NINJA_MISSING_DEPS_H_ ninja-1.13.2/src/missing_deps_test.cc000066400000000000000000000140101510764045400175200ustar00rootroot00000000000000// Copyright 2019 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include #include "deps_log.h" #include "graph.h" #include "missing_deps.h" #include "state.h" #include "test.h" const char kTestDepsLogFilename[] = "MissingDepTest-tempdepslog"; class MissingDependencyTestDelegate : public MissingDependencyScannerDelegate { void OnMissingDep(Node* node, const std::string& path, const Rule& generator) {} }; struct MissingDependencyScannerTest : public testing::Test { MissingDependencyScannerTest() : generator_rule_("generator_rule"), compile_rule_("compile_rule"), scanner_(&delegate_, &deps_log_, &state_, &filesystem_) { std::string err; deps_log_.OpenForWrite(kTestDepsLogFilename, &err); EXPECT_EQ("", err); } ~MissingDependencyScannerTest() { // Remove test file. deps_log_.Close(); } MissingDependencyScanner& scanner() { return scanner_; } void RecordDepsLogDep(const std::string& from, const std::string& to) { Node* node_deps[] = { state_.LookupNode(to) }; deps_log_.RecordDeps(state_.LookupNode(from), 0, 1, node_deps); } void ProcessAllNodes() { std::string err; std::vector nodes = state_.RootNodes(&err); EXPECT_EQ("", err); for (std::vector::iterator it = nodes.begin(); it != nodes.end(); ++it) { scanner().ProcessNode(*it); } } void CreateInitialState() { EvalString deps_type; deps_type.AddText("gcc"); compile_rule_.AddBinding("deps", deps_type); generator_rule_.AddBinding("deps", deps_type); Edge* header_edge = state_.AddEdge(&generator_rule_); state_.AddOut(header_edge, "generated_header", 0, nullptr); Edge* compile_edge = state_.AddEdge(&compile_rule_); state_.AddOut(compile_edge, "compiled_object", 0, nullptr); } void CreateGraphDependencyBetween(const char* from, const char* to) { Node* from_node = state_.LookupNode(from); Edge* from_edge = from_node->in_edge(); state_.AddIn(from_edge, to, 0); } void AssertMissingDependencyBetween(const char* flaky, const char* generated, Rule* rule) { Node* flaky_node = state_.LookupNode(flaky); ASSERT_EQ(1u, scanner().nodes_missing_deps_.count(flaky_node)); Node* generated_node = state_.LookupNode(generated); ASSERT_EQ(1u, scanner().generated_nodes_.count(generated_node)); ASSERT_EQ(1u, scanner().generator_rules_.count(rule)); } ScopedFilePath scoped_file_path_ = kTestDepsLogFilename; MissingDependencyTestDelegate delegate_; Rule generator_rule_; Rule compile_rule_; DepsLog deps_log_; State state_; VirtualFileSystem filesystem_; MissingDependencyScanner scanner_; }; TEST_F(MissingDependencyScannerTest, EmptyGraph) { ProcessAllNodes(); ASSERT_FALSE(scanner().HadMissingDeps()); } TEST_F(MissingDependencyScannerTest, NoMissingDep) { CreateInitialState(); ProcessAllNodes(); ASSERT_FALSE(scanner().HadMissingDeps()); } TEST_F(MissingDependencyScannerTest, MissingDepPresent) { CreateInitialState(); // compiled_object uses generated_header, without a proper dependency RecordDepsLogDep("compiled_object", "generated_header"); ProcessAllNodes(); ASSERT_TRUE(scanner().HadMissingDeps()); ASSERT_EQ(1u, scanner().nodes_missing_deps_.size()); ASSERT_EQ(1u, scanner().missing_dep_path_count_); AssertMissingDependencyBetween("compiled_object", "generated_header", &generator_rule_); } TEST_F(MissingDependencyScannerTest, MissingDepFixedDirect) { CreateInitialState(); // Adding the direct dependency fixes the missing dep CreateGraphDependencyBetween("compiled_object", "generated_header"); RecordDepsLogDep("compiled_object", "generated_header"); ProcessAllNodes(); ASSERT_FALSE(scanner().HadMissingDeps()); } TEST_F(MissingDependencyScannerTest, MissingDepFixedIndirect) { CreateInitialState(); // Adding an indirect dependency also fixes the issue Edge* intermediate_edge = state_.AddEdge(&generator_rule_); state_.AddOut(intermediate_edge, "intermediate", 0, nullptr); CreateGraphDependencyBetween("compiled_object", "intermediate"); CreateGraphDependencyBetween("intermediate", "generated_header"); RecordDepsLogDep("compiled_object", "generated_header"); ProcessAllNodes(); ASSERT_FALSE(scanner().HadMissingDeps()); } TEST_F(MissingDependencyScannerTest, CyclicMissingDep) { CreateInitialState(); RecordDepsLogDep("generated_header", "compiled_object"); RecordDepsLogDep("compiled_object", "generated_header"); // In case of a cycle, both paths are reported (and there is // no way to fix the issue by adding deps). ProcessAllNodes(); ASSERT_TRUE(scanner().HadMissingDeps()); ASSERT_EQ(2u, scanner().nodes_missing_deps_.size()); ASSERT_EQ(2u, scanner().missing_dep_path_count_); AssertMissingDependencyBetween("compiled_object", "generated_header", &generator_rule_); AssertMissingDependencyBetween("generated_header", "compiled_object", &compile_rule_); } TEST_F(MissingDependencyScannerTest, CycleInGraph) { CreateInitialState(); CreateGraphDependencyBetween("compiled_object", "generated_header"); CreateGraphDependencyBetween("generated_header", "compiled_object"); // The missing-deps tool doesn't deal with cycles in the graph, because // there will be an error loading the graph before we get to the tool. // This test is to illustrate that. std::string err; std::vector nodes = state_.RootNodes(&err); ASSERT_NE("", err); } ninja-1.13.2/src/msvc_helper-win32.cc000066400000000000000000000065071510764045400172600ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "msvc_helper.h" #include #include "util.h" using namespace std; namespace { string Replace(const string& input, const string& find, const string& replace) { string result = input; size_t start_pos = 0; while ((start_pos = result.find(find, start_pos)) != string::npos) { result.replace(start_pos, find.length(), replace); start_pos += replace.length(); } return result; } } // anonymous namespace string EscapeForDepfile(const string& path) { // Depfiles don't escape single \. return Replace(path, " ", "\\ "); } int CLWrapper::Run(const string& command, string* output) { SECURITY_ATTRIBUTES security_attributes = {}; security_attributes.nLength = sizeof(SECURITY_ATTRIBUTES); security_attributes.bInheritHandle = TRUE; // Must be inheritable so subprocesses can dup to children. HANDLE nul = CreateFileA("NUL", GENERIC_READ, FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, &security_attributes, OPEN_EXISTING, 0, NULL); if (nul == INVALID_HANDLE_VALUE) Fatal("couldn't open nul"); HANDLE stdout_read, stdout_write; if (!CreatePipe(&stdout_read, &stdout_write, &security_attributes, 0)) Win32Fatal("CreatePipe"); if (!SetHandleInformation(stdout_read, HANDLE_FLAG_INHERIT, 0)) Win32Fatal("SetHandleInformation"); PROCESS_INFORMATION process_info = {}; STARTUPINFOA startup_info = {}; startup_info.cb = sizeof(STARTUPINFOA); startup_info.hStdInput = nul; startup_info.hStdError = ::GetStdHandle(STD_ERROR_HANDLE); startup_info.hStdOutput = stdout_write; startup_info.dwFlags |= STARTF_USESTDHANDLES; if (!CreateProcessA(NULL, (char*)command.c_str(), NULL, NULL, /* inherit handles */ TRUE, 0, env_block_, NULL, &startup_info, &process_info)) { Win32Fatal("CreateProcess"); } if (!CloseHandle(nul) || !CloseHandle(stdout_write)) { Win32Fatal("CloseHandle"); } // Read all output of the subprocess. DWORD read_len = 1; while (read_len) { char buf[64 << 10]; read_len = 0; if (!::ReadFile(stdout_read, buf, sizeof(buf), &read_len, NULL) && GetLastError() != ERROR_BROKEN_PIPE) { Win32Fatal("ReadFile"); } output->append(buf, read_len); } // Wait for it to exit and grab its exit code. if (WaitForSingleObject(process_info.hProcess, INFINITE) == WAIT_FAILED) Win32Fatal("WaitForSingleObject"); DWORD exit_code = 0; if (!GetExitCodeProcess(process_info.hProcess, &exit_code)) Win32Fatal("GetExitCodeProcess"); if (!CloseHandle(stdout_read) || !CloseHandle(process_info.hProcess) || !CloseHandle(process_info.hThread)) { Win32Fatal("CloseHandle"); } return exit_code; } ninja-1.13.2/src/msvc_helper.h000066400000000000000000000023061510764045400161530ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef MSVC_HELPER_H_ #define MSVC_HELPER_H_ #include std::string EscapeForDepfile(const std::string& path); /// Wraps a synchronous execution of a CL subprocess. struct CLWrapper { CLWrapper() : env_block_(NULL) {} /// Set the environment block (as suitable for CreateProcess) to be used /// by Run(). void SetEnvBlock(void* env_block) { env_block_ = env_block; } /// Start a process and gather its raw output. Returns its exit code. /// Crashes (calls Fatal()) on error. int Run(const std::string& command, std::string* output); void* env_block_; }; #endif // MSVC_HELPER_H_ ninja-1.13.2/src/msvc_helper_main-win32.cc000066400000000000000000000077511510764045400202660ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "msvc_helper.h" #include #include #include #include #include "clparser.h" #include "util.h" #include "getopt.h" using namespace std; namespace { void Usage() { printf( "usage: ninja -t msvc [options] -- cl.exe /showIncludes /otherArgs\n" "options:\n" " -e ENVFILE load environment block from ENVFILE as environment\n" " -o FILE write output dependency information to FILE.d\n" " -p STRING localized prefix of msvc's /showIncludes output\n" ); } void PushPathIntoEnvironment(const string& env_block) { const char* as_str = env_block.c_str(); while (as_str[0]) { if (_strnicmp(as_str, "path=", 5) == 0) { _putenv(as_str); return; } else { as_str = &as_str[strlen(as_str) + 1]; } } } void WriteDepFileOrDie(const char* object_path, const CLParser& parse) { string depfile_path = string(object_path) + ".d"; FILE* depfile = fopen(depfile_path.c_str(), "w"); if (!depfile) { platformAwareUnlink(object_path); Fatal("opening %s: %s", depfile_path.c_str(), GetLastErrorString().c_str()); } if (fprintf(depfile, "%s: ", object_path) < 0) { platformAwareUnlink(object_path); fclose(depfile); platformAwareUnlink(depfile_path.c_str()); Fatal("writing %s", depfile_path.c_str()); } const set& headers = parse.includes_; for (set::const_iterator i = headers.begin(); i != headers.end(); ++i) { if (fprintf(depfile, "%s\n", EscapeForDepfile(*i).c_str()) < 0) { platformAwareUnlink(object_path); fclose(depfile); platformAwareUnlink(depfile_path.c_str()); Fatal("writing %s", depfile_path.c_str()); } } fclose(depfile); } } // anonymous namespace int MSVCHelperMain(int argc, char** argv) { const char* output_filename = NULL; const char* envfile = NULL; const option kLongOptions[] = { { "help", no_argument, NULL, 'h' }, { NULL, 0, NULL, 0 } }; int opt; string deps_prefix; while ((opt = getopt_long(argc, argv, "e:o:p:h", kLongOptions, NULL)) != -1) { switch (opt) { case 'e': envfile = optarg; break; case 'o': output_filename = optarg; break; case 'p': deps_prefix = optarg; break; case 'h': default: Usage(); return 0; } } string env; if (envfile) { string err; if (ReadFile(envfile, &env, &err) != 0) Fatal("couldn't open %s: %s", envfile, err.c_str()); PushPathIntoEnvironment(env); } char* command = GetCommandLineA(); command = strstr(command, " -- "); if (!command) { Fatal("expected command line to end with \" -- command args\""); } command += 4; CLWrapper cl; if (!env.empty()) cl.SetEnvBlock((void*)env.data()); string output; int exit_code = cl.Run(command, &output); if (output_filename) { CLParser parser; string err; if (!parser.Parse(output, deps_prefix, &output, &err)) Fatal("%s\n", err.c_str()); WriteDepFileOrDie(output_filename, parser); } if (output.empty()) return exit_code; // CLWrapper's output already as \r\n line endings, make sure the C runtime // doesn't expand this to \r\r\n. _setmode(_fileno(stdout), _O_BINARY); // Avoid printf and C strings, since the actual output might contain null // bytes like UTF-16 does (yuck). fwrite(&output[0], 1, output.size(), stdout); return exit_code; } ninja-1.13.2/src/msvc_helper_test.cc000066400000000000000000000023261510764045400173520ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "msvc_helper.h" #include "test.h" #include "util.h" using namespace std; TEST(EscapeForDepfileTest, SpacesInFilename) { ASSERT_EQ("sub\\some\\ sdk\\foo.h", EscapeForDepfile("sub\\some sdk\\foo.h")); } TEST(MSVCHelperTest, EnvBlock) { char env_block[] = "foo=bar\0"; CLWrapper cl; cl.SetEnvBlock(env_block); string output; cl.Run("cmd /c \"echo foo is %foo%", &output); ASSERT_EQ("foo is bar\r\n", output); } TEST(MSVCHelperTest, NoReadOfStderr) { CLWrapper cl; string output; cl.Run("cmd /c \"echo to stdout&& echo to stderr 1>&2", &output); ASSERT_EQ("to stdout\r\n", output); } ninja-1.13.2/src/ninja.cc000066400000000000000000001576151510764045400151170ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include #include #include #include #include #include #include #include #include #ifdef _WIN32 #include "getopt.h" #include #include #elif defined(_AIX) #include "getopt.h" #include #else #include #include #endif #include "browse.h" #include "build.h" #include "build_log.h" #include "clean.h" #include "command_collector.h" #include "debug_flags.h" #include "deps_log.h" #include "disk_interface.h" #include "exit_status.h" #include "graph.h" #include "graphviz.h" #include "jobserver.h" #include "json.h" #include "manifest_parser.h" #include "metrics.h" #include "missing_deps.h" #include "state.h" #include "status.h" #include "util.h" #include "version.h" using namespace std; #ifdef _WIN32 // Defined in msvc_helper_main-win32.cc. int MSVCHelperMain(int argc, char** argv); // Defined in minidump-win32.cc. void CreateWin32MiniDump(_EXCEPTION_POINTERS* pep); #endif namespace { struct Tool; /// Command-line options. struct Options { /// Build file to load. const char* input_file; /// Directory to change into before running. const char* working_dir; /// Tool to run rather than building. const Tool* tool; /// Whether phony cycles should warn or print an error. bool phony_cycle_should_err; }; /// The Ninja main() loads up a series of data structures; various tools need /// to poke into these, so store them as fields on an object. struct NinjaMain : public BuildLogUser { NinjaMain(const char* ninja_command, const BuildConfig& config) : ninja_command_(ninja_command), config_(config), start_time_millis_(GetTimeMillis()) {} /// Command line used to run Ninja. const char* ninja_command_; /// Build configuration set from flags (e.g. parallelism). const BuildConfig& config_; /// Loaded state (rules, nodes). State state_; /// Functions for accessing the disk. RealDiskInterface disk_interface_; /// The build directory, used for storing the build log etc. string build_dir_; BuildLog build_log_; DepsLog deps_log_; /// The type of functions that are the entry points to tools (subcommands). typedef int (NinjaMain::*ToolFunc)(const Options*, int, char**); /// Get the Node for a given command-line path, handling features like /// spell correction. Node* CollectTarget(const char* cpath, string* err); /// CollectTarget for all command-line arguments, filling in \a targets. bool CollectTargetsFromArgs(int argc, char* argv[], vector* targets, string* err); // The various subcommands, run via "-t XXX". int ToolGraph(const Options* options, int argc, char* argv[]); int ToolQuery(const Options* options, int argc, char* argv[]); int ToolDeps(const Options* options, int argc, char* argv[]); int ToolMissingDeps(const Options* options, int argc, char* argv[]); int ToolBrowse(const Options* options, int argc, char* argv[]); int ToolMSVC(const Options* options, int argc, char* argv[]); int ToolTargets(const Options* options, int argc, char* argv[]); int ToolCommands(const Options* options, int argc, char* argv[]); int ToolInputs(const Options* options, int argc, char* argv[]); int ToolMultiInputs(const Options* options, int argc, char* argv[]); int ToolClean(const Options* options, int argc, char* argv[]); int ToolCleanDead(const Options* options, int argc, char* argv[]); int ToolCompilationDatabase(const Options* options, int argc, char* argv[]); int ToolCompilationDatabaseForTargets(const Options* options, int argc, char* argv[]); int ToolRecompact(const Options* options, int argc, char* argv[]); int ToolRestat(const Options* options, int argc, char* argv[]); int ToolUrtle(const Options* options, int argc, char** argv); int ToolRules(const Options* options, int argc, char* argv[]); int ToolWinCodePage(const Options* options, int argc, char* argv[]); /// Open the build log. /// @return false on error. bool OpenBuildLog(bool recompact_only = false); /// Open the deps log: load it, then open for writing. /// @return false on error. bool OpenDepsLog(bool recompact_only = false); /// Ensure the build directory exists, creating it if necessary. /// @return false on error. bool EnsureBuildDirExists(); /// Rebuild the manifest, if necessary. /// Fills in \a err on error. /// @return true if the manifest was rebuilt. bool RebuildManifest(const char* input_file, string* err, Status* status); /// For each edge, lookup in build log how long it took last time, /// and record that in the edge itself. It will be used for ETA prediction. void ParsePreviousElapsedTimes(); /// Create a jobserver client if needed. Return a nullptr value if /// not. Prints info and warnings to \a status. std::unique_ptr SetupJobserverClient(Status* status); /// Build the targets listed on the command line. /// @return an exit code. ExitStatus RunBuild(int argc, char** argv, Status* status); /// Dump the output requested by '-d stats'. void DumpMetrics(); virtual bool IsPathDead(StringPiece s) const { Node* n = state_.LookupNode(s); if (n && n->in_edge()) return false; // Just checking n isn't enough: If an old output is both in the build log // and in the deps log, it will have a Node object in state_. (It will also // have an in edge if one of its inputs is another output that's in the deps // log, but having a deps edge product an output that's input to another deps // edge is rare, and the first recompaction will delete all old outputs from // the deps log, and then a second recompaction will clear the build log, // which seems good enough for this corner case.) // Do keep entries around for files which still exist on disk, for // generators that want to use this information. string err; TimeStamp mtime = disk_interface_.Stat(s.AsString(), &err); if (mtime == -1) Error("%s", err.c_str()); // Log and ignore Stat() errors. return mtime == 0; } int64_t start_time_millis_; }; /// Subtools, accessible via "-t foo". struct Tool { /// Short name of the tool. const char* name; /// Description (shown in "-t list"). const char* desc; /// When to run the tool. enum { /// Run after parsing the command-line flags and potentially changing /// the current working directory (as early as possible). RUN_AFTER_FLAGS, /// Run after loading build.ninja. RUN_AFTER_LOAD, /// Run after loading the build/deps logs. RUN_AFTER_LOGS, } when; /// Implementation of the tool. NinjaMain::ToolFunc func; }; /// Print usage information. void Usage(const BuildConfig& config) { fprintf(stderr, "usage: ninja [options] [targets...]\n" "\n" "if targets are unspecified, builds the 'default' target (see manual).\n" "\n" "options:\n" " --version print ninja version (\"%s\")\n" " -v, --verbose show all command lines while building\n" " --quiet don't show progress status, just command output\n" "\n" " -C DIR change to DIR before doing anything else\n" " -f FILE specify input build file [default=build.ninja]\n" "\n" " -j N run N jobs in parallel (0 means infinity) [default=%d on this system]\n" " -k N keep going until N jobs fail (0 means infinity) [default=1]\n" " -l N do not start new jobs if the load average is greater than N\n" " -n dry run (don't run commands but act like they succeeded)\n" "\n" " -d MODE enable debugging (use '-d list' to list modes)\n" " -t TOOL run a subtool (use '-t list' to list subtools)\n" " terminates toplevel options; further flags are passed to the tool\n" " -w FLAG adjust warnings (use '-w list' to list warnings)\n", kNinjaVersion, config.parallelism); } /// Choose a default value for the -j (parallelism) flag. int GuessParallelism() { switch (int processors = GetProcessorCount()) { case 0: case 1: return 2; case 2: return 3; default: return processors + 2; } } /// Rebuild the build manifest, if necessary. /// Returns true if the manifest was rebuilt. bool NinjaMain::RebuildManifest(const char* input_file, string* err, Status* status) { string path = input_file; if (path.empty()) { *err = "empty path"; return false; } uint64_t slash_bits; // Unused because this path is only used for lookup. CanonicalizePath(&path, &slash_bits); Node* node = state_.LookupNode(path); if (!node) return false; Builder builder(&state_, config_, &build_log_, &deps_log_, &disk_interface_, status, start_time_millis_); if (!builder.AddTarget(node, err)) return false; if (builder.AlreadyUpToDate()) return false; // Not an error, but we didn't rebuild. if (builder.Build(err) != ExitSuccess) return false; // The manifest was only rebuilt if it is now dirty (it may have been cleaned // by a restat). if (!node->dirty()) { // Reset the state to prevent problems like // https://github.com/ninja-build/ninja/issues/874 state_.Reset(); return false; } return true; } void NinjaMain::ParsePreviousElapsedTimes() { for (Edge* edge : state_.edges_) { for (Node* out : edge->outputs_) { BuildLog::LogEntry* log_entry = build_log_.LookupByOutput(out->path()); if (!log_entry) continue; // Maybe we'll have log entry for next output of this edge? edge->prev_elapsed_time_millis = log_entry->end_time - log_entry->start_time; break; // Onto next edge. } } } Node* NinjaMain::CollectTarget(const char* cpath, string* err) { string path = cpath; if (path.empty()) { *err = "empty path"; return NULL; } uint64_t slash_bits; CanonicalizePath(&path, &slash_bits); // Special syntax: "foo.cc^" means "the first output of foo.cc". bool first_dependent = false; if (!path.empty() && path[path.size() - 1] == '^') { path.resize(path.size() - 1); first_dependent = true; } Node* node = state_.LookupNode(path); if (node) { if (first_dependent) { if (node->out_edges().empty()) { Node* rev_deps = deps_log_.GetFirstReverseDepsNode(node); if (!rev_deps) { *err = "'" + path + "' has no out edge"; return NULL; } node = rev_deps; } else { Edge* edge = node->out_edges()[0]; if (edge->outputs_.empty()) { edge->Dump(); Fatal("edge has no outputs"); } node = edge->outputs_[0]; } } return node; } else { *err = "unknown target '" + Node::PathDecanonicalized(path, slash_bits) + "'"; if (path == "clean") { *err += ", did you mean 'ninja -t clean'?"; } else if (path == "help") { *err += ", did you mean 'ninja -h'?"; } else { Node* suggestion = state_.SpellcheckNode(path); if (suggestion) { *err += ", did you mean '" + suggestion->path() + "'?"; } } return NULL; } } bool NinjaMain::CollectTargetsFromArgs(int argc, char* argv[], vector* targets, string* err) { if (argc == 0) { *targets = state_.DefaultNodes(err); return err->empty(); } for (int i = 0; i < argc; ++i) { Node* node = CollectTarget(argv[i], err); if (node == NULL) return false; targets->push_back(node); } return true; } int NinjaMain::ToolGraph(const Options* options, int argc, char* argv[]) { vector nodes; string err; if (!CollectTargetsFromArgs(argc, argv, &nodes, &err)) { Error("%s", err.c_str()); return 1; } GraphViz graph(&state_, &disk_interface_); graph.Start(); for (vector::const_iterator n = nodes.begin(); n != nodes.end(); ++n) graph.AddTarget(*n); graph.Finish(); return 0; } int NinjaMain::ToolQuery(const Options* options, int argc, char* argv[]) { if (argc == 0) { Error("expected a target to query"); return 1; } DyndepLoader dyndep_loader(&state_, &disk_interface_); for (int i = 0; i < argc; ++i) { string err; Node* node = CollectTarget(argv[i], &err); if (!node) { Error("%s", err.c_str()); return 1; } printf("%s:\n", node->path().c_str()); if (Edge* edge = node->in_edge()) { if (edge->dyndep_ && edge->dyndep_->dyndep_pending()) { if (!dyndep_loader.LoadDyndeps(edge->dyndep_, &err)) { Warning("%s\n", err.c_str()); } } printf(" input: %s\n", edge->rule_->name().c_str()); for (int in = 0; in < (int)edge->inputs_.size(); in++) { const char* label = ""; if (edge->is_implicit(in)) label = "| "; else if (edge->is_order_only(in)) label = "|| "; printf(" %s%s\n", label, edge->inputs_[in]->path().c_str()); } if (!edge->validations_.empty()) { printf(" validations:\n"); for (std::vector::iterator validation = edge->validations_.begin(); validation != edge->validations_.end(); ++validation) { printf(" %s\n", (*validation)->path().c_str()); } } } printf(" outputs:\n"); for (vector::const_iterator edge = node->out_edges().begin(); edge != node->out_edges().end(); ++edge) { for (vector::iterator out = (*edge)->outputs_.begin(); out != (*edge)->outputs_.end(); ++out) { printf(" %s\n", (*out)->path().c_str()); } } const std::vector validation_edges = node->validation_out_edges(); if (!validation_edges.empty()) { printf(" validation for:\n"); for (std::vector::const_iterator edge = validation_edges.begin(); edge != validation_edges.end(); ++edge) { for (vector::iterator out = (*edge)->outputs_.begin(); out != (*edge)->outputs_.end(); ++out) { printf(" %s\n", (*out)->path().c_str()); } } } } return 0; } #if defined(NINJA_HAVE_BROWSE) int NinjaMain::ToolBrowse(const Options* options, int argc, char* argv[]) { RunBrowsePython(&state_, ninja_command_, options->input_file, argc, argv); // If we get here, the browse failed. return 1; } #else int NinjaMain::ToolBrowse(const Options*, int, char**) { Fatal("browse tool not supported on this platform"); return 1; } #endif #if defined(_WIN32) int NinjaMain::ToolMSVC(const Options* options, int argc, char* argv[]) { // Reset getopt: push one argument onto the front of argv, reset optind. argc++; argv--; optind = 0; return MSVCHelperMain(argc, argv); } #endif int ToolTargetsList(const vector& nodes, int depth, int indent) { for (vector::const_iterator n = nodes.begin(); n != nodes.end(); ++n) { for (int i = 0; i < indent; ++i) printf(" "); const char* target = (*n)->path().c_str(); if ((*n)->in_edge()) { printf("%s: %s\n", target, (*n)->in_edge()->rule_->name().c_str()); if (depth > 1 || depth <= 0) ToolTargetsList((*n)->in_edge()->inputs_, depth - 1, indent + 1); } else { printf("%s\n", target); } } return 0; } int ToolTargetsSourceList(State* state) { for (vector::iterator e = state->edges_.begin(); e != state->edges_.end(); ++e) { for (vector::iterator inps = (*e)->inputs_.begin(); inps != (*e)->inputs_.end(); ++inps) { if (!(*inps)->in_edge()) printf("%s\n", (*inps)->path().c_str()); } } return 0; } int ToolTargetsList(State* state, const string& rule_name) { set rules; // Gather the outputs. for (vector::iterator e = state->edges_.begin(); e != state->edges_.end(); ++e) { if ((*e)->rule_->name() == rule_name) { for (vector::iterator out_node = (*e)->outputs_.begin(); out_node != (*e)->outputs_.end(); ++out_node) { rules.insert((*out_node)->path()); } } } // Print them. for (set::const_iterator i = rules.begin(); i != rules.end(); ++i) { printf("%s\n", (*i).c_str()); } return 0; } int ToolTargetsList(State* state) { for (vector::iterator e = state->edges_.begin(); e != state->edges_.end(); ++e) { for (vector::iterator out_node = (*e)->outputs_.begin(); out_node != (*e)->outputs_.end(); ++out_node) { printf("%s: %s\n", (*out_node)->path().c_str(), (*e)->rule_->name().c_str()); } } return 0; } int NinjaMain::ToolDeps(const Options* options, int argc, char** argv) { vector nodes; if (argc == 0) { for (vector::const_iterator ni = deps_log_.nodes().begin(); ni != deps_log_.nodes().end(); ++ni) { if (DepsLog::IsDepsEntryLiveFor(*ni)) nodes.push_back(*ni); } } else { string err; if (!CollectTargetsFromArgs(argc, argv, &nodes, &err)) { Error("%s", err.c_str()); return 1; } } RealDiskInterface disk_interface; for (vector::iterator it = nodes.begin(), end = nodes.end(); it != end; ++it) { DepsLog::Deps* deps = deps_log_.GetDeps(*it); if (!deps) { printf("%s: deps not found\n", (*it)->path().c_str()); continue; } string err; TimeStamp mtime = disk_interface.Stat((*it)->path(), &err); if (mtime == -1) Error("%s", err.c_str()); // Log and ignore Stat() errors; printf("%s: #deps %d, deps mtime %" PRId64 " (%s)\n", (*it)->path().c_str(), deps->node_count, deps->mtime, (!mtime || mtime > deps->mtime ? "STALE":"VALID")); for (int i = 0; i < deps->node_count; ++i) printf(" %s\n", deps->nodes[i]->path().c_str()); printf("\n"); } return 0; } int NinjaMain::ToolMissingDeps(const Options* options, int argc, char** argv) { vector nodes; string err; if (!CollectTargetsFromArgs(argc, argv, &nodes, &err)) { Error("%s", err.c_str()); return 1; } RealDiskInterface disk_interface; MissingDependencyPrinter printer; MissingDependencyScanner scanner(&printer, &deps_log_, &state_, &disk_interface); for (vector::iterator it = nodes.begin(); it != nodes.end(); ++it) { scanner.ProcessNode(*it); } scanner.PrintStats(); if (scanner.HadMissingDeps()) return 3; return 0; } int NinjaMain::ToolTargets(const Options* options, int argc, char* argv[]) { int depth = 1; if (argc >= 1) { string mode = argv[0]; if (mode == "rule") { string rule; if (argc > 1) rule = argv[1]; if (rule.empty()) return ToolTargetsSourceList(&state_); else return ToolTargetsList(&state_, rule); } else if (mode == "depth") { if (argc > 1) depth = atoi(argv[1]); } else if (mode == "all") { return ToolTargetsList(&state_); } else { const char* suggestion = SpellcheckString(mode.c_str(), "rule", "depth", "all", NULL); if (suggestion) { Error("unknown target tool mode '%s', did you mean '%s'?", mode.c_str(), suggestion); } else { Error("unknown target tool mode '%s'", mode.c_str()); } return 1; } } string err; vector root_nodes = state_.RootNodes(&err); if (err.empty()) { return ToolTargetsList(root_nodes, depth, 0); } else { Error("%s", err.c_str()); return 1; } } int NinjaMain::ToolRules(const Options* options, int argc, char* argv[]) { // Parse options. // The rules tool uses getopt, and expects argv[0] to contain the name of // the tool, i.e. "rules". argc++; argv--; bool print_description = false; optind = 1; int opt; while ((opt = getopt(argc, argv, const_cast("hd"))) != -1) { switch (opt) { case 'd': print_description = true; break; case 'h': default: printf("usage: ninja -t rules [options]\n" "\n" "options:\n" " -d also print the description of the rule\n" " -h print this message\n" ); return 1; } } argv += optind; argc -= optind; // Print rules typedef map> Rules; const Rules& rules = state_.bindings_.GetRules(); for (Rules::const_iterator i = rules.begin(); i != rules.end(); ++i) { printf("%s", i->first.c_str()); if (print_description) { const Rule* rule = i->second.get(); const EvalString* description = rule->GetBinding("description"); if (description != NULL) { printf(": %s", description->Unparse().c_str()); } } printf("\n"); fflush(stdout); } return 0; } #ifdef _WIN32 int NinjaMain::ToolWinCodePage(const Options* options, int argc, char* argv[]) { if (argc != 0) { printf("usage: ninja -t wincodepage\n"); return 1; } printf("Build file encoding: %s\n", GetACP() == CP_UTF8? "UTF-8" : "ANSI"); return 0; } #endif enum PrintCommandMode { PCM_Single, PCM_All }; void PrintCommands(Edge* edge, EdgeSet* seen, PrintCommandMode mode) { if (!edge) return; if (!seen->insert(edge).second) return; if (mode == PCM_All) { for (vector::iterator in = edge->inputs_.begin(); in != edge->inputs_.end(); ++in) PrintCommands((*in)->in_edge(), seen, mode); } if (!edge->is_phony()) puts(edge->EvaluateCommand().c_str()); } int NinjaMain::ToolCommands(const Options* options, int argc, char* argv[]) { // The commands tool uses getopt, and expects argv[0] to contain the name of // the tool, i.e. "commands". ++argc; --argv; PrintCommandMode mode = PCM_All; optind = 1; int opt; while ((opt = getopt(argc, argv, const_cast("hs"))) != -1) { switch (opt) { case 's': mode = PCM_Single; break; case 'h': default: printf("usage: ninja -t commands [options] [targets]\n" "\n" "options:\n" " -s only print the final command to build [target], not the whole chain\n" ); return 1; } } argv += optind; argc -= optind; vector nodes; string err; if (!CollectTargetsFromArgs(argc, argv, &nodes, &err)) { Error("%s", err.c_str()); return 1; } EdgeSet seen; for (vector::iterator in = nodes.begin(); in != nodes.end(); ++in) PrintCommands((*in)->in_edge(), &seen, mode); return 0; } int NinjaMain::ToolInputs(const Options* options, int argc, char* argv[]) { // The inputs tool uses getopt, and expects argv[0] to contain the name of // the tool, i.e. "inputs". argc++; argv--; bool print0 = false; bool shell_escape = true; bool dependency_order = false; optind = 1; int opt; const option kLongOptions[] = { { "help", no_argument, NULL, 'h' }, { "no-shell-escape", no_argument, NULL, 'E' }, { "print0", no_argument, NULL, '0' }, { "dependency-order", no_argument, NULL, 'd' }, { NULL, 0, NULL, 0 } }; while ((opt = getopt_long(argc, argv, "h0Ed", kLongOptions, NULL)) != -1) { switch (opt) { case 'd': dependency_order = true; break; case 'E': shell_escape = false; break; case '0': print0 = true; break; case 'h': default: // clang-format off printf( "Usage '-t inputs [options] [targets]\n" "\n" "List all inputs used for a set of targets, sorted in dependency order.\n" "Note that by default, results are shell escaped, and sorted alphabetically,\n" "and never include validation target paths.\n\n" "Options:\n" " -h, --help Print this message.\n" " -0, --print0 Use \\0, instead of \\n as a line terminator.\n" " -E, --no-shell-escape Do not shell escape the result.\n" " -d, --dependency-order Sort results by dependency order.\n" ); // clang-format on return 1; } } argv += optind; argc -= optind; std::vector nodes; std::string err; if (!CollectTargetsFromArgs(argc, argv, &nodes, &err)) { Error("%s", err.c_str()); return 1; } InputsCollector collector; for (const Node* node : nodes) collector.VisitNode(node); std::vector inputs = collector.GetInputsAsStrings(shell_escape); if (!dependency_order) std::sort(inputs.begin(), inputs.end()); if (print0) { for (const std::string& input : inputs) { fwrite(input.c_str(), input.size(), 1, stdout); fputc('\0', stdout); } fflush(stdout); } else { for (const std::string& input : inputs) puts(input.c_str()); } return 0; } int NinjaMain::ToolMultiInputs(const Options* options, int argc, char* argv[]) { // The inputs tool uses getopt, and expects argv[0] to contain the name of // the tool, i.e. "inputs". argc++; argv--; optind = 1; int opt; char terminator = '\n'; const char* delimiter = "\t"; const option kLongOptions[] = { { "help", no_argument, NULL, 'h' }, { "delimiter", required_argument, NULL, 'd' }, { "print0", no_argument, NULL, '0' }, { NULL, 0, NULL, 0 } }; while ((opt = getopt_long(argc, argv, "d:h0", kLongOptions, NULL)) != -1) { switch (opt) { case 'd': delimiter = optarg; break; case '0': terminator = '\0'; break; case 'h': default: // clang-format off printf( "Usage '-t multi-inputs [options] [targets]\n" "\n" "Print one or more sets of inputs required to build targets, sorted in dependency order.\n" "The tool works like inputs tool but with addition of the target for each line.\n" "The output will be a series of lines with the following elements:\n" " \n" "Note that a given input may appear for several targets if it is used by more than one targets.\n" "Options:\n" " -h, --help Print this message.\n" " -d --delimiter=DELIM Use DELIM instead of TAB for field delimiter.\n" " -0, --print0 Use \\0, instead of \\n as a line terminator.\n" ); // clang-format on return 1; } } argv += optind; argc -= optind; std::vector nodes; std::string err; if (!CollectTargetsFromArgs(argc, argv, &nodes, &err)) { Error("%s", err.c_str()); return 1; } for (const Node* node : nodes) { InputsCollector collector; collector.VisitNode(node); std::vector inputs = collector.GetInputsAsStrings(); for (const std::string& input : inputs) { printf("%s%s%s", node->path().c_str(), delimiter, input.c_str()); fputc(terminator, stdout); } } return 0; } int NinjaMain::ToolClean(const Options* options, int argc, char* argv[]) { // The clean tool uses getopt, and expects argv[0] to contain the name of // the tool, i.e. "clean". argc++; argv--; bool generator = false; bool clean_rules = false; optind = 1; int opt; while ((opt = getopt(argc, argv, const_cast("hgr"))) != -1) { switch (opt) { case 'g': generator = true; break; case 'r': clean_rules = true; break; case 'h': default: printf("usage: ninja -t clean [options] [targets]\n" "\n" "options:\n" " -g also clean files marked as ninja generator output\n" " -r interpret targets as a list of rules to clean instead\n" ); return 1; } } argv += optind; argc -= optind; if (clean_rules && argc == 0) { Error("expected a rule to clean"); return 1; } Cleaner cleaner(&state_, config_, &disk_interface_); if (argc >= 1) { if (clean_rules) return cleaner.CleanRules(argc, argv); else return cleaner.CleanTargets(argc, argv); } else { return cleaner.CleanAll(generator); } } int NinjaMain::ToolCleanDead(const Options* options, int argc, char* argv[]) { Cleaner cleaner(&state_, config_, &disk_interface_); return cleaner.CleanDead(build_log_.entries()); } enum EvaluateCommandMode { ECM_NORMAL, ECM_EXPAND_RSPFILE }; std::string EvaluateCommandWithRspfile(const Edge* edge, const EvaluateCommandMode mode) { string command = edge->EvaluateCommand(); if (mode == ECM_NORMAL) return command; string rspfile = edge->GetUnescapedRspfile(); if (rspfile.empty()) return command; size_t index = command.find(rspfile); if (index == 0 || index == string::npos || (command[index - 1] != '@' && command.find("--option-file=") != index - 14 && command.find("-f ") != index - 3)) return command; string rspfile_content = edge->GetBinding("rspfile_content"); size_t newline_index = 0; while ((newline_index = rspfile_content.find('\n', newline_index)) != string::npos) { rspfile_content.replace(newline_index, 1, 1, ' '); ++newline_index; } if (command[index - 1] == '@') { command.replace(index - 1, rspfile.length() + 1, rspfile_content); } else if (command.find("-f ") == index - 3) { command.replace(index - 3, rspfile.length() + 3, rspfile_content); } else { // --option-file syntax command.replace(index - 14, rspfile.length() + 14, rspfile_content); } return command; } void PrintOneCompdbObject(std::string const& directory, const Edge* const edge, const EvaluateCommandMode eval_mode) { printf("\n {\n \"directory\": \""); PrintJSONString(directory); printf("\",\n \"command\": \""); PrintJSONString(EvaluateCommandWithRspfile(edge, eval_mode)); printf("\",\n \"file\": \""); PrintJSONString(edge->inputs_[0]->path()); printf("\",\n \"output\": \""); PrintJSONString(edge->outputs_[0]->path()); printf("\"\n }"); } int NinjaMain::ToolCompilationDatabase(const Options* options, int argc, char* argv[]) { // The compdb tool uses getopt, and expects argv[0] to contain the name of // the tool, i.e. "compdb". argc++; argv--; EvaluateCommandMode eval_mode = ECM_NORMAL; optind = 1; int opt; while ((opt = getopt(argc, argv, const_cast("hx"))) != -1) { switch(opt) { case 'x': eval_mode = ECM_EXPAND_RSPFILE; break; case 'h': default: printf( "usage: ninja -t compdb [options] [rules]\n" "\n" "options:\n" " -x expand @rspfile style response file invocations\n" ); return 1; } } argv += optind; argc -= optind; bool first = true; std::string directory = GetWorkingDirectory(); putchar('['); for (const Edge* edge : state_.edges_) { if (edge->inputs_.empty()) continue; if (argc == 0) { if (!first) { putchar(','); } PrintOneCompdbObject(directory, edge, eval_mode); first = false; } else { for (int i = 0; i != argc; ++i) { if (edge->rule_->name() == argv[i]) { if (!first) { putchar(','); } PrintOneCompdbObject(directory, edge, eval_mode); first = false; } } } } puts("\n]"); return 0; } int NinjaMain::ToolRecompact(const Options* options, int argc, char* argv[]) { if (!EnsureBuildDirExists()) return 1; if (!OpenBuildLog(/*recompact_only=*/true) || !OpenDepsLog(/*recompact_only=*/true)) return 1; return 0; } int NinjaMain::ToolRestat(const Options* options, int argc, char* argv[]) { // The restat tool uses getopt, and expects argv[0] to contain the name of the // tool, i.e. "restat" argc++; argv--; optind = 1; int opt; while ((opt = getopt(argc, argv, const_cast("h"))) != -1) { switch (opt) { case 'h': default: printf("usage: ninja -t restat [outputs]\n"); return 1; } } argv += optind; argc -= optind; if (!EnsureBuildDirExists()) return 1; string log_path = ".ninja_log"; if (!build_dir_.empty()) log_path = build_dir_ + "/" + log_path; string err; const LoadStatus status = build_log_.Load(log_path, &err); if (status == LOAD_ERROR) { Error("loading build log %s: %s", log_path.c_str(), err.c_str()); return EXIT_FAILURE; } if (status == LOAD_NOT_FOUND) { // Nothing to restat, ignore this return EXIT_SUCCESS; } if (!err.empty()) { // Hack: Load() can return a warning via err by returning LOAD_SUCCESS. Warning("%s", err.c_str()); err.clear(); } bool success = build_log_.Restat(log_path, disk_interface_, argc, argv, &err); if (!success) { Error("failed recompaction: %s", err.c_str()); return EXIT_FAILURE; } if (!config_.dry_run) { if (!build_log_.OpenForWrite(log_path, *this, &err)) { Error("opening build log: %s", err.c_str()); return EXIT_FAILURE; } } return EXIT_SUCCESS; } struct CompdbTargets { enum class Action { kDisplayHelpAndExit, kEmitCommands }; Action action; EvaluateCommandMode eval_mode = ECM_NORMAL; std::vector targets; static CompdbTargets CreateFromArgs(int argc, char* argv[]) { // // grammar: // ninja -t compdb-targets [-hx] target [targets] // CompdbTargets ret; // getopt_long() expects argv[0] to contain the name of // the tool, i.e. "compdb-targets". argc++; argv--; // Phase 1: parse options: optind = 1; // see `man 3 getopt` for documentation on optind int opt; while ((opt = getopt(argc, argv, const_cast("hx"))) != -1) { switch (opt) { case 'x': ret.eval_mode = ECM_EXPAND_RSPFILE; break; case 'h': default: ret.action = CompdbTargets::Action::kDisplayHelpAndExit; return ret; } } // Phase 2: parse operands: int const targets_begin = optind; int const targets_end = argc; if (targets_begin == targets_end) { Error("compdb-targets expects the name of at least one target"); ret.action = CompdbTargets::Action::kDisplayHelpAndExit; } else { ret.action = CompdbTargets::Action::kEmitCommands; for (int i = targets_begin; i < targets_end; ++i) { ret.targets.push_back(argv[i]); } } return ret; } }; void PrintCompdb(std::string const& directory, std::vector const& edges, const EvaluateCommandMode eval_mode) { putchar('['); bool first = true; for (const Edge* edge : edges) { if (edge->is_phony() || edge->inputs_.empty()) continue; if (!first) putchar(','); PrintOneCompdbObject(directory, edge, eval_mode); first = false; } puts("\n]"); } int NinjaMain::ToolCompilationDatabaseForTargets(const Options* options, int argc, char* argv[]) { auto compdb = CompdbTargets::CreateFromArgs(argc, argv); switch (compdb.action) { case CompdbTargets::Action::kDisplayHelpAndExit: { printf( "usage: ninja -t compdb [-hx] target [targets]\n" "\n" "options:\n" " -h display this help message\n" " -x expand @rspfile style response file invocations\n"); return 1; } case CompdbTargets::Action::kEmitCommands: { CommandCollector collector; for (const std::string& target_arg : compdb.targets) { std::string err; Node* node = CollectTarget(target_arg.c_str(), &err); if (!node) { Fatal("%s", err.c_str()); return 1; } if (!node->in_edge()) { Fatal( "'%s' is not a target " "(i.e. it is not an output of any `build` statement)", node->path().c_str()); } collector.CollectFrom(node); } std::string directory = GetWorkingDirectory(); PrintCompdb(directory, collector.in_edges, compdb.eval_mode); } break; } return 0; } int NinjaMain::ToolUrtle(const Options* options, int argc, char** argv) { // RLE encoded. const char* urtle = " 13 ,3;2!2;\n8 ,;<11!;\n5 `'<10!(2`'2!\n11 ,6;, `\\. `\\9 .,c13$ec,.\n6 " ",2;11!>; `. ,;!2> .e8$2\".2 \"?7$e.\n <:<8!'` 2.3,.2` ,3!' ;,(?7\";2!2'<" "; `?6$PF ,;,\n2 `'4!8;<3;5! J2$b,`!>;2!:2!`,d?b`!>\n26 `'-;,(<9!> $F3 )3.:!.2 d\"" "2 ) !>\n30 7`2'<3!- \"=-='5 .2 `2-=\",!>\n25 .ze9$er2 .,cd16$bc.'\n22 .e" "14$,26$.\n21 z45$c .\n20 J50$c\n20 14$P\"`?34$b\n20 14$ dbc `2\"?22$?7$c" "\n20 ?18$c.6 4\"8?4\" c8$P\n9 .2,.8 \"20$c.3 ._14 J9$\n .2,2c9$bec,.2 `?" "21$c.3`4%,3%,3 c8$P\"\n22$c2 2\"?21$bc2,.2` .2,c7$P2\",cb\n23$b bc,.2\"2" "?14$2F2\"5?2\",J5$P\" ,zd3$\n24$ ?$3?%3 `2\"2?12$bcucd3$P3\"2 2=7$\n23$P" "\" ,3;<5!>2;,. `4\"6?2\"2 ,9;, `\"?2$\n"; int count = 0; for (const char* p = urtle; *p; p++) { if ('0' <= *p && *p <= '9') { count = count*10 + *p - '0'; } else { for (int i = 0; i < max(count, 1); ++i) printf("%c", *p); count = 0; } } return 0; } /// Find the function to execute for \a tool_name and return it via \a func. /// Returns a Tool, or NULL if Ninja should exit. const Tool* ChooseTool(const string& tool_name) { static const Tool kTools[] = { { "browse", "browse dependency graph in a web browser", Tool::RUN_AFTER_LOAD, &NinjaMain::ToolBrowse }, #ifdef _WIN32 { "msvc", "build helper for MSVC cl.exe (DEPRECATED)", Tool::RUN_AFTER_FLAGS, &NinjaMain::ToolMSVC }, #endif { "clean", "clean built files", Tool::RUN_AFTER_LOAD, &NinjaMain::ToolClean }, { "commands", "list all commands required to rebuild given targets", Tool::RUN_AFTER_LOAD, &NinjaMain::ToolCommands }, { "inputs", "list all inputs required to rebuild given targets", Tool::RUN_AFTER_LOAD, &NinjaMain::ToolInputs}, { "multi-inputs", "print one or more sets of inputs required to build targets", Tool::RUN_AFTER_LOAD, &NinjaMain::ToolMultiInputs}, { "deps", "show dependencies stored in the deps log", Tool::RUN_AFTER_LOGS, &NinjaMain::ToolDeps }, { "missingdeps", "check deps log dependencies on generated files", Tool::RUN_AFTER_LOGS, &NinjaMain::ToolMissingDeps }, { "graph", "output graphviz dot file for targets", Tool::RUN_AFTER_LOAD, &NinjaMain::ToolGraph }, { "query", "show inputs/outputs for a path", Tool::RUN_AFTER_LOGS, &NinjaMain::ToolQuery }, { "targets", "list targets by their rule or depth in the DAG", Tool::RUN_AFTER_LOAD, &NinjaMain::ToolTargets }, { "compdb", "dump JSON compilation database to stdout", Tool::RUN_AFTER_LOAD, &NinjaMain::ToolCompilationDatabase }, { "compdb-targets", "dump JSON compilation database for a given list of targets to stdout", Tool::RUN_AFTER_LOAD, &NinjaMain::ToolCompilationDatabaseForTargets }, { "recompact", "recompacts ninja-internal data structures", Tool::RUN_AFTER_LOAD, &NinjaMain::ToolRecompact }, { "restat", "restats all outputs in the build log", Tool::RUN_AFTER_FLAGS, &NinjaMain::ToolRestat }, { "rules", "list all rules", Tool::RUN_AFTER_LOAD, &NinjaMain::ToolRules }, { "cleandead", "clean built files that are no longer produced by the manifest", Tool::RUN_AFTER_LOGS, &NinjaMain::ToolCleanDead }, { "urtle", NULL, Tool::RUN_AFTER_FLAGS, &NinjaMain::ToolUrtle }, #ifdef _WIN32 { "wincodepage", "print the Windows code page used by ninja", Tool::RUN_AFTER_FLAGS, &NinjaMain::ToolWinCodePage }, #endif { NULL, NULL, Tool::RUN_AFTER_FLAGS, NULL } }; if (tool_name == "list") { printf("ninja subtools:\n"); for (const Tool* tool = &kTools[0]; tool->name; ++tool) { if (tool->desc) printf("%11s %s\n", tool->name, tool->desc); } return NULL; } for (const Tool* tool = &kTools[0]; tool->name; ++tool) { if (tool->name == tool_name) return tool; } vector words; for (const Tool* tool = &kTools[0]; tool->name; ++tool) words.push_back(tool->name); const char* suggestion = SpellcheckStringV(tool_name, words); if (suggestion) { Fatal("unknown tool '%s', did you mean '%s'?", tool_name.c_str(), suggestion); } else { Fatal("unknown tool '%s'", tool_name.c_str()); } return NULL; // Not reached. } /// Enable a debugging mode. Returns false if Ninja should exit instead /// of continuing. bool DebugEnable(const string& name) { if (name == "list") { printf("debugging modes:\n" " stats print operation counts/timing info\n" " explain explain what caused a command to execute\n" " keepdepfile don't delete depfiles after they're read by ninja\n" " keeprsp don't delete @response files on success\n" #ifdef _WIN32 " nostatcache don't batch stat() calls per directory and cache them\n" #endif "multiple modes can be enabled via -d FOO -d BAR\n"); return false; } else if (name == "stats") { g_metrics = new Metrics; return true; } else if (name == "explain") { g_explaining = true; return true; } else if (name == "keepdepfile") { g_keep_depfile = true; return true; } else if (name == "keeprsp") { g_keep_rsp = true; return true; } else if (name == "nostatcache") { g_experimental_statcache = false; return true; } else { const char* suggestion = SpellcheckString(name.c_str(), "stats", "explain", "keepdepfile", "keeprsp", "nostatcache", NULL); if (suggestion) { Error("unknown debug setting '%s', did you mean '%s'?", name.c_str(), suggestion); } else { Error("unknown debug setting '%s'", name.c_str()); } return false; } } /// Set a warning flag. Returns false if Ninja should exit instead of /// continuing. bool WarningEnable(const string& name, Options* options) { if (name == "list") { printf("warning flags:\n" " phonycycle={err,warn} phony build statement references itself\n" ); return false; } else if (name == "phonycycle=err") { options->phony_cycle_should_err = true; return true; } else if (name == "phonycycle=warn") { options->phony_cycle_should_err = false; return true; } else if (name == "dupbuild=err" || name == "dupbuild=warn") { Warning("deprecated warning 'dupbuild'"); return true; } else if (name == "depfilemulti=err" || name == "depfilemulti=warn") { Warning("deprecated warning 'depfilemulti'"); return true; } else { const char* suggestion = SpellcheckString(name.c_str(), "phonycycle=err", "phonycycle=warn", nullptr); if (suggestion) { Error("unknown warning flag '%s', did you mean '%s'?", name.c_str(), suggestion); } else { Error("unknown warning flag '%s'", name.c_str()); } return false; } } bool NinjaMain::OpenBuildLog(bool recompact_only) { string log_path = ".ninja_log"; if (!build_dir_.empty()) log_path = build_dir_ + "/" + log_path; string err; const LoadStatus status = build_log_.Load(log_path, &err); if (status == LOAD_ERROR) { Error("loading build log %s: %s", log_path.c_str(), err.c_str()); return false; } if (!err.empty()) { // Hack: Load() can return a warning via err by returning LOAD_SUCCESS. Warning("%s", err.c_str()); err.clear(); } if (recompact_only) { if (status == LOAD_NOT_FOUND) { return true; } bool success = build_log_.Recompact(log_path, *this, &err); if (!success) Error("failed recompaction: %s", err.c_str()); return success; } if (!config_.dry_run) { if (!build_log_.OpenForWrite(log_path, *this, &err)) { Error("opening build log: %s", err.c_str()); return false; } } return true; } /// Open the deps log: load it, then open for writing. /// @return false on error. bool NinjaMain::OpenDepsLog(bool recompact_only) { string path = ".ninja_deps"; if (!build_dir_.empty()) path = build_dir_ + "/" + path; string err; const LoadStatus status = deps_log_.Load(path, &state_, &err); if (status == LOAD_ERROR) { Error("loading deps log %s: %s", path.c_str(), err.c_str()); return false; } if (!err.empty()) { // Hack: Load() can return a warning via err by returning LOAD_SUCCESS. Warning("%s", err.c_str()); err.clear(); } if (recompact_only) { if (status == LOAD_NOT_FOUND) { return true; } bool success = deps_log_.Recompact(path, &err); if (!success) Error("failed recompaction: %s", err.c_str()); return success; } if (!config_.dry_run) { if (!deps_log_.OpenForWrite(path, &err)) { Error("opening deps log: %s", err.c_str()); return false; } } return true; } void NinjaMain::DumpMetrics() { g_metrics->Report(); printf("\n"); int count = (int)state_.paths_.size(); int buckets = (int)state_.paths_.bucket_count(); printf("path->node hash load %.2f (%d entries / %d buckets)\n", count / (double) buckets, count, buckets); } bool NinjaMain::EnsureBuildDirExists() { build_dir_ = state_.bindings_.LookupVariable("builddir"); if (!build_dir_.empty() && !config_.dry_run) { if (!disk_interface_.MakeDirs(build_dir_ + "/.") && errno != EEXIST) { Error("creating build directory %s: %s", build_dir_.c_str(), strerror(errno)); return false; } } return true; } std::unique_ptr NinjaMain::SetupJobserverClient( Status* status) { // Empty result by default. std::unique_ptr result; // If dry-run or explicit job count, don't even look at MAKEFLAGS if (config_.disable_jobserver_client) return result; const char* makeflags = getenv("MAKEFLAGS"); if (!makeflags) { // MAKEFLAGS is not defined. return result; } std::string err; Jobserver::Config jobserver_config; if (!Jobserver::ParseNativeMakeFlagsValue(makeflags, &jobserver_config, &err)) { // MAKEFLAGS is defined but could not be parsed correctly. if (config_.verbosity > BuildConfig::QUIET) status->Warning("Ignoring jobserver: %s [%s]", err.c_str(), makeflags); return result; } if (!jobserver_config.HasMode()) { // MAKEFLAGS is defined, but does not describe a jobserver mode. return result; } if (config_.verbosity > BuildConfig::NO_STATUS_UPDATE) { status->Info("Jobserver mode detected: %s", makeflags); } result = Jobserver::Client::Create(jobserver_config, &err); if (!result.get()) { // Jobserver client initialization failed !? if (config_.verbosity > BuildConfig::QUIET) status->Error("Could not initialize jobserver: %s", err.c_str()); } return result; } ExitStatus NinjaMain::RunBuild(int argc, char** argv, Status* status) { std::string err; std::vector targets; if (!CollectTargetsFromArgs(argc, argv, &targets, &err)) { status->Error("%s", err.c_str()); return ExitFailure; } disk_interface_.AllowStatCache(g_experimental_statcache); // Detect jobserver context and inject Jobserver::Client into the builder // if needed. std::unique_ptr jobserver_client = SetupJobserverClient(status); Builder builder(&state_, config_, &build_log_, &deps_log_, &disk_interface_, status, start_time_millis_); if (jobserver_client.get()) { builder.SetJobserverClient(std::move(jobserver_client)); } for (size_t i = 0; i < targets.size(); ++i) { if (!builder.AddTarget(targets[i], &err)) { if (!err.empty()) { status->Error("%s", err.c_str()); return ExitFailure; } else { // Added a target that is already up-to-date; not really // an error. } } } // Make sure restat rules do not see stale timestamps. disk_interface_.AllowStatCache(false); if (builder.AlreadyUpToDate()) { if (config_.verbosity != BuildConfig::NO_STATUS_UPDATE) { status->Info("no work to do."); } return ExitSuccess; } ExitStatus exit_status = builder.Build(&err); if (exit_status != ExitSuccess) { status->Info("build stopped: %s.", err.c_str()); if (err.find("interrupted by user") != string::npos) { return ExitInterrupted; } } return exit_status; } #ifdef _MSC_VER /// This handler processes fatal crashes that you can't catch /// Test example: C++ exception in a stack-unwind-block /// Real-world example: ninja launched a compiler to process a tricky /// C++ input file. The compiler got itself into a state where it /// generated 3 GB of output and caused ninja to crash. void TerminateHandler() { CreateWin32MiniDump(NULL); Fatal("terminate handler called"); } /// On Windows, we want to prevent error dialogs in case of exceptions. /// This function handles the exception, and writes a minidump. int ExceptionFilter(unsigned int code, struct _EXCEPTION_POINTERS *ep) { Error("exception: 0x%X", code); // e.g. EXCEPTION_ACCESS_VIOLATION fflush(stderr); CreateWin32MiniDump(ep); return EXCEPTION_EXECUTE_HANDLER; } #endif // _MSC_VER class DeferGuessParallelism { public: bool needGuess; BuildConfig* config; DeferGuessParallelism(BuildConfig* config) : needGuess(true), config(config) {} void Refresh() { if (needGuess) { needGuess = false; config->parallelism = GuessParallelism(); } } ~DeferGuessParallelism() { Refresh(); } }; /// Parse argv for command-line options. /// Returns an exit code, or -1 if Ninja should continue. int ReadFlags(int* argc, char*** argv, Options* options, BuildConfig* config) { DeferGuessParallelism deferGuessParallelism(config); enum { OPT_VERSION = 1, OPT_QUIET = 2 }; const option kLongOptions[] = { { "help", no_argument, NULL, 'h' }, { "version", no_argument, NULL, OPT_VERSION }, { "verbose", no_argument, NULL, 'v' }, { "quiet", no_argument, NULL, OPT_QUIET }, { NULL, 0, NULL, 0 } }; int opt; while (!options->tool && (opt = getopt_long(*argc, *argv, "d:f:j:k:l:nt:vw:C:h", kLongOptions, NULL)) != -1) { switch (opt) { case 'd': if (!DebugEnable(optarg)) return 1; break; case 'f': options->input_file = optarg; break; case 'j': { char* end; long value = strtol(optarg, &end, 10); if (*end != 0 || value < 0) Fatal("invalid -j parameter"); // We want to run N jobs in parallel. For N = 0, INT_MAX // is close enough to infinite for most sane builds. config->parallelism = static_cast((value > 0 && value < INT_MAX) ? value : INT_MAX); config->disable_jobserver_client = true; deferGuessParallelism.needGuess = false; break; } case 'k': { char* end; long value = strtol(optarg, &end, 10); if (*end != 0) Fatal("-k parameter not numeric; did you mean -k 0?"); // We want to go until N jobs fail, which means we should allow // N failures and then stop. For N <= 0, INT_MAX is close enough // to infinite for most sane builds. config->failures_allowed = static_cast((value > 0 && value < INT_MAX) ? value : INT_MAX); break; } case 'l': { char* end; double value = strtod(optarg, &end); if (end == optarg) Fatal("-l parameter not numeric: did you mean -l 0.0?"); config->max_load_average = value; break; } case 'n': config->dry_run = true; config->disable_jobserver_client = true; break; case 't': options->tool = ChooseTool(optarg); if (!options->tool) return 0; break; case 'v': config->verbosity = BuildConfig::VERBOSE; break; case OPT_QUIET: config->verbosity = BuildConfig::NO_STATUS_UPDATE; break; case 'w': if (!WarningEnable(optarg, options)) return 1; break; case 'C': options->working_dir = optarg; break; case OPT_VERSION: printf("%s\n", kNinjaVersion); return 0; case 'h': default: deferGuessParallelism.Refresh(); Usage(*config); return 1; } } *argv += optind; *argc -= optind; return -1; } NORETURN void real_main(int argc, char** argv) { // Use exit() instead of return in this function to avoid potentially // expensive cleanup when destructing NinjaMain. BuildConfig config; Options options = {}; options.input_file = "build.ninja"; setvbuf(stdout, NULL, _IOLBF, BUFSIZ); const char* ninja_command = argv[0]; int exit_code = ReadFlags(&argc, &argv, &options, &config); if (exit_code >= 0) exit(exit_code); Status* status = Status::factory(config); if (options.working_dir) { // The formatting of this string, complete with funny quotes, is // so Emacs can properly identify that the cwd has changed for // subsequent commands. // Don't print this if a tool is being used, so that tool output // can be piped into a file without this string showing up. if (!options.tool && config.verbosity != BuildConfig::NO_STATUS_UPDATE) status->Info("Entering directory `%s'", options.working_dir); if (chdir(options.working_dir) < 0) { Fatal("chdir to '%s' - %s", options.working_dir, strerror(errno)); } } if (options.tool && options.tool->when == Tool::RUN_AFTER_FLAGS) { // None of the RUN_AFTER_FLAGS actually use a NinjaMain, but it's needed // by other tools. NinjaMain ninja(ninja_command, config); exit((ninja.*options.tool->func)(&options, argc, argv)); } // Limit number of rebuilds, to prevent infinite loops. const int kCycleLimit = 100; for (int cycle = 1; cycle <= kCycleLimit; ++cycle) { NinjaMain ninja(ninja_command, config); ManifestParserOptions parser_opts; if (options.phony_cycle_should_err) { parser_opts.phony_cycle_action_ = kPhonyCycleActionError; } ManifestParser parser(&ninja.state_, &ninja.disk_interface_, parser_opts); string err; if (!parser.Load(options.input_file, &err)) { status->Error("%s", err.c_str()); exit(1); } if (options.tool && options.tool->when == Tool::RUN_AFTER_LOAD) exit((ninja.*options.tool->func)(&options, argc, argv)); if (!ninja.EnsureBuildDirExists()) exit(1); if (!ninja.OpenBuildLog() || !ninja.OpenDepsLog()) exit(1); if (options.tool && options.tool->when == Tool::RUN_AFTER_LOGS) exit((ninja.*options.tool->func)(&options, argc, argv)); // Attempt to rebuild the manifest before building anything else if (ninja.RebuildManifest(options.input_file, &err, status)) { // In dry_run mode the regeneration will succeed without changing the // manifest forever. Better to return immediately. if (config.dry_run) exit(0); // Start the build over with the new manifest. continue; } else if (!err.empty()) { status->Error("rebuilding '%s': %s", options.input_file, err.c_str()); exit(1); } ninja.ParsePreviousElapsedTimes(); ExitStatus result = ninja.RunBuild(argc, argv, status); if (g_metrics) ninja.DumpMetrics(); exit(result); } status->Error("manifest '%s' still dirty after %d tries, perhaps system time is not set", options.input_file, kCycleLimit); exit(1); } } // anonymous namespace int main(int argc, char** argv) { #if defined(_MSC_VER) // Set a handler to catch crashes not caught by the __try..__except // block (e.g. an exception in a stack-unwind-block). std::set_terminate(TerminateHandler); __try { // Running inside __try ... __except suppresses any Windows error // dialogs for errors such as bad_alloc. real_main(argc, argv); } __except(ExceptionFilter(GetExceptionCode(), GetExceptionInformation())) { // Common error situations return exitCode=1. 2 was chosen to // indicate a more serious problem. return 2; } #else real_main(argc, argv); #endif } ninja-1.13.2/src/ninja_test.cc000066400000000000000000000013421510764045400161370ustar00rootroot00000000000000// Copyright 2013 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include int main(int argc, char **argv) { testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } ninja-1.13.2/src/parser.cc000066400000000000000000000032331510764045400152760ustar00rootroot00000000000000// Copyright 2018 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "parser.h" #include "disk_interface.h" #include "metrics.h" using namespace std; bool Parser::Load(const string& filename, string* err, Lexer* parent) { // If |parent| is not NULL, metrics collection has been started by a parent // Parser::Load() in our call stack. Do not start a new one here to avoid // over-counting parsing times. METRIC_RECORD_IF(".ninja parse", parent == NULL); string contents; string read_err; if (file_reader_->ReadFile(filename, &contents, &read_err) != FileReader::Okay) { *err = "loading '" + filename + "': " + read_err; if (parent) parent->Error(string(*err), err); return false; } return Parse(filename, contents, err); } bool Parser::ExpectToken(Lexer::Token expected, string* err) { Lexer::Token token = lexer_.ReadToken(); if (token != expected) { string message = string("expected ") + Lexer::TokenName(expected); message += string(", got ") + Lexer::TokenName(token); message += Lexer::TokenErrorHint(expected); return lexer_.Error(message, err); } return true; } ninja-1.13.2/src/parser.h000066400000000000000000000026711510764045400151450ustar00rootroot00000000000000// Copyright 2018 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef NINJA_PARSER_H_ #define NINJA_PARSER_H_ #include #include "lexer.h" struct FileReader; struct State; /// Base class for parsers. struct Parser { Parser(State* state, FileReader* file_reader) : state_(state), file_reader_(file_reader) {} virtual ~Parser() {} /// Load and parse a file. bool Load(const std::string& filename, std::string* err, Lexer* parent = NULL); protected: /// If the next token is not \a expected, produce an error string /// saying "expected foo, got bar". bool ExpectToken(Lexer::Token expected, std::string* err); State* state_; FileReader* file_reader_; Lexer lexer_; private: /// Parse a file, given its contents as a string. virtual bool Parse(const std::string& filename, const std::string& input, std::string* err) = 0; }; #endif // NINJA_PARSER_H_ ninja-1.13.2/src/real_command_runner.cc000066400000000000000000000067241510764045400200240ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "build.h" #include "jobserver.h" #include "limits.h" #include "subprocess.h" struct RealCommandRunner : public CommandRunner { explicit RealCommandRunner(const BuildConfig& config, Jobserver::Client* jobserver) : config_(config), jobserver_(jobserver) {} size_t CanRunMore() const override; bool StartCommand(Edge* edge) override; bool WaitForCommand(Result* result) override; std::vector GetActiveEdges() override; void Abort() override; void ClearJobTokens() { if (jobserver_) { for (Edge* edge : GetActiveEdges()) { jobserver_->Release(std::move(edge->job_slot_)); } } } const BuildConfig& config_; SubprocessSet subprocs_; Jobserver::Client* jobserver_ = nullptr; std::map subproc_to_edge_; }; std::vector RealCommandRunner::GetActiveEdges() { std::vector edges; for (std::map::iterator e = subproc_to_edge_.begin(); e != subproc_to_edge_.end(); ++e) edges.push_back(e->second); return edges; } void RealCommandRunner::Abort() { ClearJobTokens(); subprocs_.Clear(); } size_t RealCommandRunner::CanRunMore() const { size_t subproc_number = subprocs_.running_.size() + subprocs_.finished_.size(); int64_t capacity = config_.parallelism - subproc_number; if (jobserver_) { // When a jobserver token pool is used, make the // capacity infinite, and let FindWork() limit jobs // through token acquisitions instead. capacity = INT_MAX; } if (config_.max_load_average > 0.0f) { int load_capacity = config_.max_load_average - GetLoadAverage(); if (load_capacity < capacity) capacity = load_capacity; } if (capacity < 0) capacity = 0; if (capacity == 0 && subprocs_.running_.empty()) // Ensure that we make progress. capacity = 1; return capacity; } bool RealCommandRunner::StartCommand(Edge* edge) { std::string command = edge->EvaluateCommand(); Subprocess* subproc = subprocs_.Add(command, edge->use_console()); if (!subproc) return false; subproc_to_edge_.insert(std::make_pair(subproc, edge)); return true; } bool RealCommandRunner::WaitForCommand(Result* result) { Subprocess* subproc; while ((subproc = subprocs_.NextFinished()) == NULL) { bool interrupted = subprocs_.DoWork(); if (interrupted) { result->status = ExitInterrupted; return false; } } result->status = subproc->Finish(); result->output = subproc->GetOutput(); std::map::iterator e = subproc_to_edge_.find(subproc); result->edge = e->second; subproc_to_edge_.erase(e); delete subproc; return true; } CommandRunner* CommandRunner::factory(const BuildConfig& config, Jobserver::Client* jobserver) { return new RealCommandRunner(config, jobserver); } ninja-1.13.2/src/state.cc000066400000000000000000000134661510764045400151330ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "state.h" #include #include #include "edit_distance.h" #include "graph.h" #include "util.h" using namespace std; void Pool::EdgeScheduled(const Edge& edge) { if (depth_ != 0) current_use_ += edge.weight(); } void Pool::EdgeFinished(const Edge& edge) { if (depth_ != 0) current_use_ -= edge.weight(); } void Pool::DelayEdge(Edge* edge) { assert(depth_ != 0); delayed_.insert(edge); } void Pool::RetrieveReadyEdges(EdgePriorityQueue* ready_queue) { DelayedEdges::iterator it = delayed_.begin(); while (it != delayed_.end()) { Edge* edge = *it; if (current_use_ + edge->weight() > depth_) break; ready_queue->push(edge); EdgeScheduled(*edge); ++it; } delayed_.erase(delayed_.begin(), it); } void Pool::Dump() const { printf("%s (%d/%d) ->\n", name_.c_str(), current_use_, depth_); for (DelayedEdges::const_iterator it = delayed_.begin(); it != delayed_.end(); ++it) { printf("\t"); (*it)->Dump(); } } Pool State::kDefaultPool("", 0); Pool State::kConsolePool("console", 1); State::State() { bindings_.AddRule(Rule::Phony()); AddPool(&kDefaultPool); AddPool(&kConsolePool); } void State::AddPool(Pool* pool) { assert(LookupPool(pool->name()) == NULL); pools_[pool->name()] = pool; } Pool* State::LookupPool(const string& pool_name) { map::iterator i = pools_.find(pool_name); if (i == pools_.end()) return NULL; return i->second; } Edge* State::AddEdge(const Rule* rule) { Edge* edge = new Edge(); edge->rule_ = rule; edge->pool_ = &State::kDefaultPool; edge->env_ = &bindings_; edge->id_ = edges_.size(); edges_.push_back(edge); return edge; } Node* State::GetNode(StringPiece path, uint64_t slash_bits) { Node* node = LookupNode(path); if (node) return node; node = new Node(path.AsString(), slash_bits); paths_[node->path()] = node; return node; } Node* State::LookupNode(StringPiece path) const { Paths::const_iterator i = paths_.find(path); if (i != paths_.end()) return i->second; return NULL; } Node* State::SpellcheckNode(const string& path) { const bool kAllowReplacements = true; const int kMaxValidEditDistance = 3; int min_distance = kMaxValidEditDistance + 1; Node* result = NULL; for (Paths::iterator i = paths_.begin(); i != paths_.end(); ++i) { int distance = EditDistance( i->first, path, kAllowReplacements, kMaxValidEditDistance); if (distance < min_distance && i->second) { min_distance = distance; result = i->second; } } return result; } void State::AddIn(Edge* edge, StringPiece path, uint64_t slash_bits) { Node* node = GetNode(path, slash_bits); node->set_generated_by_dep_loader(false); edge->inputs_.push_back(node); node->AddOutEdge(edge); } bool State::AddOut(Edge* edge, StringPiece path, uint64_t slash_bits, std::string* err) { Node* node = GetNode(path, slash_bits); if (Edge* other = node->in_edge()) { if (other == edge) { *err = path.AsString() + " is defined as an output multiple times"; } else { *err = "multiple rules generate " + path.AsString(); } return false; } edge->outputs_.push_back(node); node->set_in_edge(edge); node->set_generated_by_dep_loader(false); return true; } void State::AddValidation(Edge* edge, StringPiece path, uint64_t slash_bits) { Node* node = GetNode(path, slash_bits); edge->validations_.push_back(node); node->AddValidationOutEdge(edge); node->set_generated_by_dep_loader(false); } bool State::AddDefault(StringPiece path, string* err) { Node* node = LookupNode(path); if (!node) { *err = "unknown target '" + path.AsString() + "'"; return false; } defaults_.push_back(node); return true; } vector State::RootNodes(string* err) const { vector root_nodes; // Search for nodes with no output. for (vector::const_iterator e = edges_.begin(); e != edges_.end(); ++e) { for (vector::const_iterator out = (*e)->outputs_.begin(); out != (*e)->outputs_.end(); ++out) { if ((*out)->out_edges().empty()) root_nodes.push_back(*out); } } if (!edges_.empty() && root_nodes.empty()) *err = "could not determine root nodes of build graph"; return root_nodes; } vector State::DefaultNodes(string* err) const { return defaults_.empty() ? RootNodes(err) : defaults_; } void State::Reset() { for (Paths::iterator i = paths_.begin(); i != paths_.end(); ++i) i->second->ResetState(); for (vector::iterator e = edges_.begin(); e != edges_.end(); ++e) { (*e)->outputs_ready_ = false; (*e)->deps_loaded_ = false; (*e)->mark_ = Edge::VisitNone; } } void State::Dump() { for (Paths::iterator i = paths_.begin(); i != paths_.end(); ++i) { Node* node = i->second; printf("%s %s [id:%d]\n", node->path().c_str(), node->status_known() ? (node->dirty() ? "dirty" : "clean") : "unknown", node->id()); } if (!pools_.empty()) { printf("resource_pools:\n"); for (map::const_iterator it = pools_.begin(); it != pools_.end(); ++it) { if (!it->second->name().empty()) { it->second->Dump(); } } } } ninja-1.13.2/src/state.h000066400000000000000000000112501510764045400147620ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef NINJA_STATE_H_ #define NINJA_STATE_H_ #include #include #include #include #include "eval_env.h" #include "graph.h" #include "hash_map.h" #include "util.h" struct Edge; struct Node; struct Rule; /// A pool for delayed edges. /// Pools are scoped to a State. Edges within a State will share Pools. A Pool /// will keep a count of the total 'weight' of the currently scheduled edges. If /// a Plan attempts to schedule an Edge which would cause the total weight to /// exceed the depth of the Pool, the Pool will enqueue the Edge instead of /// allowing the Plan to schedule it. The Pool will relinquish queued Edges when /// the total scheduled weight diminishes enough (i.e. when a scheduled edge /// completes). struct Pool { Pool(const std::string& name, int depth) : name_(name), current_use_(0), depth_(depth), delayed_() {} // A depth of 0 is infinite bool is_valid() const { return depth_ >= 0; } int depth() const { return depth_; } const std::string& name() const { return name_; } int current_use() const { return current_use_; } /// true if the Pool might delay this edge bool ShouldDelayEdge() const { return depth_ != 0; } /// informs this Pool that the given edge is committed to be run. /// Pool will count this edge as using resources from this pool. void EdgeScheduled(const Edge& edge); /// informs this Pool that the given edge is no longer runnable, and should /// relinquish its resources back to the pool void EdgeFinished(const Edge& edge); /// adds the given edge to this Pool to be delayed. void DelayEdge(Edge* edge); /// Pool will add zero or more edges to the ready_queue void RetrieveReadyEdges(EdgePriorityQueue* ready_queue); /// Dump the Pool and its edges (useful for debugging). void Dump() const; private: std::string name_; /// |current_use_| is the total of the weights of the edges which are /// currently scheduled in the Plan (i.e. the edges in Plan::ready_). int current_use_; int depth_; struct WeightedEdgeCmp { bool operator()(const Edge* a, const Edge* b) const { if (!a) return b; if (!b) return false; int weight_diff = a->weight() - b->weight(); if (weight_diff != 0) { return weight_diff < 0; } return EdgePriorityGreater()(a, b); } }; typedef std::set DelayedEdges; DelayedEdges delayed_; }; /// Global state (file status) for a single run. struct State { static Pool kDefaultPool; static Pool kConsolePool; State(); void AddPool(Pool* pool); Pool* LookupPool(const std::string& pool_name); Edge* AddEdge(const Rule* rule); Node* GetNode(StringPiece path, uint64_t slash_bits); Node* LookupNode(StringPiece path) const; Node* SpellcheckNode(const std::string& path); /// Add input / output / validation nodes to a given edge. This also /// ensures that the generated_by_dep_loader() flag for all these nodes /// is set to false, to indicate that they come from the input manifest. void AddIn(Edge* edge, StringPiece path, uint64_t slash_bits); bool AddOut(Edge* edge, StringPiece path, uint64_t slash_bits, std::string* err); void AddValidation(Edge* edge, StringPiece path, uint64_t slash_bits); bool AddDefault(StringPiece path, std::string* error); /// Reset state. Keeps all nodes and edges, but restores them to the /// state where we haven't yet examined the disk for dirty state. void Reset(); /// Dump the nodes and Pools (useful for debugging). void Dump(); /// @return the root node(s) of the graph. (Root nodes have no output edges). /// @param error where to write the error message if somethings went wrong. std::vector RootNodes(std::string* error) const; std::vector DefaultNodes(std::string* error) const; /// Mapping of path -> Node. typedef ExternalStringHashMap::Type Paths; Paths paths_; /// All the pools used in the graph. std::map pools_; /// All the edges of the graph. std::vector edges_; BindingEnv bindings_; std::vector defaults_; }; #endif // NINJA_STATE_H_ ninja-1.13.2/src/state_test.cc000066400000000000000000000025321510764045400161620ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "graph.h" #include "state.h" #include "test.h" using namespace std; namespace { TEST(State, Basic) { State state; EvalString command; command.AddText("cat "); command.AddSpecial("in"); command.AddText(" > "); command.AddSpecial("out"); Rule* rule = new Rule("cat"); rule->AddBinding("command", command); state.bindings_.AddRule(std::unique_ptr(rule)); Edge* edge = state.AddEdge(rule); state.AddIn(edge, "in1", 0); state.AddIn(edge, "in2", 0); state.AddOut(edge, "out", 0, nullptr); EXPECT_EQ("cat in1 in2 > out", edge->EvaluateCommand()); EXPECT_FALSE(state.GetNode("in1", 0)->dirty()); EXPECT_FALSE(state.GetNode("in2", 0)->dirty()); EXPECT_FALSE(state.GetNode("out", 0)->dirty()); } } // namespace ninja-1.13.2/src/status.h000066400000000000000000000035601510764045400151720ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef NINJA_STATUS_H_ #define NINJA_STATUS_H_ #include #include "exit_status.h" struct BuildConfig; struct Edge; struct Explanations; /// Abstract interface to object that tracks the status of a build: /// completion fraction, printing updates. struct Status { virtual void EdgeAddedToPlan(const Edge* edge) = 0; virtual void EdgeRemovedFromPlan(const Edge* edge) = 0; virtual void BuildEdgeStarted(const Edge* edge, int64_t start_time_millis) = 0; virtual void BuildEdgeFinished(Edge* edge, int64_t start_time_millis, int64_t end_time_millis, ExitStatus exit_code, const std::string& output) = 0; virtual void BuildStarted() = 0; virtual void BuildFinished() = 0; /// Set the Explanations instance to use to report explanations, /// argument can be nullptr if no explanations need to be printed /// (which is the default). virtual void SetExplanations(Explanations*) = 0; virtual void Info(const char* msg, ...) = 0; virtual void Warning(const char* msg, ...) = 0; virtual void Error(const char* msg, ...) = 0; virtual ~Status() { } /// creates the actual implementation static Status* factory(const BuildConfig&); }; #endif // NINJA_STATUS_H_ ninja-1.13.2/src/status_printer.cc000066400000000000000000000356141510764045400171000ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "status_printer.h" #ifdef _WIN32 #include "win32port.h" #else #ifndef __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS #endif #include #endif #include #include #ifdef _WIN32 #include #include #endif #include "build.h" #include "debug_flags.h" #include "exit_status.h" using namespace std; Status* Status::factory(const BuildConfig& config) { return new StatusPrinter(config); } StatusPrinter::StatusPrinter(const BuildConfig& config) : config_(config), started_edges_(0), finished_edges_(0), total_edges_(0), running_edges_(0), progress_status_format_(NULL), current_rate_(config.parallelism) { // Don't do anything fancy in verbose mode. if (config_.verbosity != BuildConfig::NORMAL) printer_.set_smart_terminal(false); progress_status_format_ = getenv("NINJA_STATUS"); if (!progress_status_format_) progress_status_format_ = "[%f/%t] "; } void StatusPrinter::EdgeAddedToPlan(const Edge* edge) { ++total_edges_; // Do we know how long did this edge take last time? if (edge->prev_elapsed_time_millis != -1) { ++eta_predictable_edges_total_; ++eta_predictable_edges_remaining_; eta_predictable_cpu_time_total_millis_ += edge->prev_elapsed_time_millis; eta_predictable_cpu_time_remaining_millis_ += edge->prev_elapsed_time_millis; } else ++eta_unpredictable_edges_remaining_; } void StatusPrinter::EdgeRemovedFromPlan(const Edge* edge) { --total_edges_; // Do we know how long did this edge take last time? if (edge->prev_elapsed_time_millis != -1) { --eta_predictable_edges_total_; --eta_predictable_edges_remaining_; eta_predictable_cpu_time_total_millis_ -= edge->prev_elapsed_time_millis; eta_predictable_cpu_time_remaining_millis_ -= edge->prev_elapsed_time_millis; } else --eta_unpredictable_edges_remaining_; } void StatusPrinter::BuildEdgeStarted(const Edge* edge, int64_t start_time_millis) { ++started_edges_; ++running_edges_; time_millis_ = start_time_millis; if (edge->use_console() || printer_.is_smart_terminal()) PrintStatus(edge, start_time_millis); if (edge->use_console()) printer_.SetConsoleLocked(true); } void StatusPrinter::RecalculateProgressPrediction() { time_predicted_percentage_ = 0.0; // Sometimes, the previous and actual times may be wildly different. // For example, the previous build may have been fully recovered from ccache, // so it was blazing fast, while the new build no longer gets hits from ccache // for whatever reason, so it actually compiles code, which takes much longer. // We should detect such cases, and avoid using "wrong" previous times. // Note that we will only use the previous times if there are edges with // previous time knowledge remaining. bool use_previous_times = eta_predictable_edges_remaining_ && eta_predictable_cpu_time_remaining_millis_; // Iff we have sufficient statistical information for the current run, // that is, if we have took at least 15 sec AND finished at least 5% of edges, // we can check whether our performance so far matches the previous one. if (use_previous_times && total_edges_ && finished_edges_ && (time_millis_ >= 15 * 1e3) && (((double)finished_edges_ / total_edges_) >= 0.05)) { // Over the edges we've just run, how long did they take on average? double actual_average_cpu_time_millis = (double)cpu_time_millis_ / finished_edges_; // What is the previous average, for the edges with such knowledge? double previous_average_cpu_time_millis = (double)eta_predictable_cpu_time_total_millis_ / eta_predictable_edges_total_; double ratio = std::max(previous_average_cpu_time_millis, actual_average_cpu_time_millis) / std::min(previous_average_cpu_time_millis, actual_average_cpu_time_millis); // Let's say that the average times should differ by less than 10x use_previous_times = ratio < 10; } int edges_with_known_runtime = finished_edges_; if (use_previous_times) edges_with_known_runtime += eta_predictable_edges_remaining_; if (edges_with_known_runtime == 0) return; int edges_with_unknown_runtime = use_previous_times ? eta_unpredictable_edges_remaining_ : (total_edges_ - finished_edges_); // Given the time elapsed on the edges we've just run, // and the runtime of the edges for which we know previous runtime, // what's the edge's average runtime? int64_t edges_known_runtime_total_millis = cpu_time_millis_; if (use_previous_times) edges_known_runtime_total_millis += eta_predictable_cpu_time_remaining_millis_; double average_cpu_time_millis = (double)edges_known_runtime_total_millis / edges_with_known_runtime; // For the edges for which we do not have the previous runtime, // let's assume that their average runtime is the same as for the other edges, // and we therefore can predict their remaining runtime. double unpredictable_cpu_time_remaining_millis = average_cpu_time_millis * edges_with_unknown_runtime; // And therefore we can predict the remaining and total runtimes. double total_cpu_time_remaining_millis = unpredictable_cpu_time_remaining_millis; if (use_previous_times) total_cpu_time_remaining_millis += eta_predictable_cpu_time_remaining_millis_; double total_cpu_time_millis = cpu_time_millis_ + total_cpu_time_remaining_millis; if (total_cpu_time_millis == 0.0) return; // After that we can tell how much work we've completed, in time units. time_predicted_percentage_ = cpu_time_millis_ / total_cpu_time_millis; } void StatusPrinter::BuildEdgeFinished(Edge* edge, int64_t start_time_millis, int64_t end_time_millis, ExitStatus exit_code, const string& output) { time_millis_ = end_time_millis; ++finished_edges_; int64_t elapsed = end_time_millis - start_time_millis; cpu_time_millis_ += elapsed; // Do we know how long did this edge take last time? if (edge->prev_elapsed_time_millis != -1) { --eta_predictable_edges_remaining_; eta_predictable_cpu_time_remaining_millis_ -= edge->prev_elapsed_time_millis; } else --eta_unpredictable_edges_remaining_; if (edge->use_console()) printer_.SetConsoleLocked(false); if (config_.verbosity == BuildConfig::QUIET) return; if (!edge->use_console()) PrintStatus(edge, end_time_millis); --running_edges_; // Print the command that is spewing before printing its output. if (exit_code != ExitSuccess) { string outputs; for (vector::const_iterator o = edge->outputs_.begin(); o != edge->outputs_.end(); ++o) outputs += (*o)->path() + " "; string failed = "FAILED: [code=" + std::to_string(exit_code) + "] "; if (printer_.supports_color()) { printer_.PrintOnNewLine("\x1B[31m" + failed + "\x1B[0m" + outputs + "\n"); } else { printer_.PrintOnNewLine(failed + outputs + "\n"); } printer_.PrintOnNewLine(edge->EvaluateCommand() + "\n"); } if (!output.empty()) { #ifdef _WIN32 // Fix extra CR being added on Windows, writing out CR CR LF (#773) fflush(stdout); // Begin Windows extra CR fix _setmode(_fileno(stdout), _O_BINARY); #endif // ninja sets stdout and stderr of subprocesses to a pipe, to be able to // check if the output is empty. Some compilers, e.g. clang, check // isatty(stderr) to decide if they should print colored output. // To make it possible to use colored output with ninja, subprocesses should // be run with a flag that forces them to always print color escape codes. // To make sure these escape codes don't show up in a file if ninja's output // is piped to a file, ninja strips ansi escape codes again if it's not // writing to a |smart_terminal_|. // (Launching subprocesses in pseudo ttys doesn't work because there are // only a few hundred available on some systems, and ninja can launch // thousands of parallel compile commands.) if (printer_.supports_color() || output.find('\x1b') == std::string::npos) { printer_.PrintOnNewLine(output); } else { std::string final_output = StripAnsiEscapeCodes(output); printer_.PrintOnNewLine(final_output); } #ifdef _WIN32 fflush(stdout); _setmode(_fileno(stdout), _O_TEXT); // End Windows extra CR fix #endif } } void StatusPrinter::BuildStarted() { started_edges_ = 0; finished_edges_ = 0; running_edges_ = 0; } void StatusPrinter::BuildFinished() { printer_.SetConsoleLocked(false); printer_.PrintOnNewLine(""); } string StatusPrinter::FormatProgressStatus(const char* progress_status_format, int64_t time_millis) const { string out; char buf[32]; for (const char* s = progress_status_format; *s != '\0'; ++s) { if (*s == '%') { ++s; switch (*s) { case '%': out.push_back('%'); break; // Started edges. case 's': snprintf(buf, sizeof(buf), "%d", started_edges_); out += buf; break; // Total edges. case 't': snprintf(buf, sizeof(buf), "%d", total_edges_); out += buf; break; // Running edges. case 'r': { snprintf(buf, sizeof(buf), "%d", running_edges_); out += buf; break; } // Unstarted edges. case 'u': snprintf(buf, sizeof(buf), "%d", total_edges_ - started_edges_); out += buf; break; // Finished edges. case 'f': snprintf(buf, sizeof(buf), "%d", finished_edges_); out += buf; break; // Overall finished edges per second. case 'o': SnprintfRate(finished_edges_ / (time_millis_ / 1e3), buf, "%.1f"); out += buf; break; // Current rate, average over the last '-j' jobs. case 'c': current_rate_.UpdateRate(finished_edges_, time_millis_); SnprintfRate(current_rate_.rate(), buf, "%.1f"); out += buf; break; // Percentage of edges completed case 'p': { int percent = 0; if (finished_edges_ != 0 && total_edges_ != 0) percent = (100 * finished_edges_) / total_edges_; snprintf(buf, sizeof(buf), "%3i%%", percent); out += buf; break; } #define FORMAT_TIME_HMMSS(t) \ "%" PRId64 ":%02" PRId64 ":%02" PRId64 "", (t) / 3600, ((t) % 3600) / 60, \ (t) % 60 #define FORMAT_TIME_MMSS(t) "%02" PRId64 ":%02" PRId64 "", (t) / 60, (t) % 60 // Wall time case 'e': // elapsed, seconds case 'w': // elapsed, human-readable case 'E': // ETA, seconds case 'W': // ETA, human-readable { double elapsed_sec = time_millis_ / 1e3; double eta_sec = -1; // To be printed as "?". if (time_predicted_percentage_ != 0.0) { // So, we know that we've spent time_millis_ wall clock, // and that is time_predicted_percentage_ percent. // How much time will we need to complete 100%? double total_wall_time = time_millis_ / time_predicted_percentage_; // Naturally, that gives us the time remaining. eta_sec = (total_wall_time - time_millis_) / 1e3; } const bool print_with_hours = elapsed_sec >= 60 * 60 || eta_sec >= 60 * 60; double sec = -1; switch (*s) { case 'e': // elapsed, seconds case 'w': // elapsed, human-readable sec = elapsed_sec; break; case 'E': // ETA, seconds case 'W': // ETA, human-readable sec = eta_sec; break; } if (sec < 0) snprintf(buf, sizeof(buf), "?"); else { switch (*s) { case 'e': // elapsed, seconds case 'E': // ETA, seconds snprintf(buf, sizeof(buf), "%.3f", sec); break; case 'w': // elapsed, human-readable case 'W': // ETA, human-readable if (print_with_hours) snprintf(buf, sizeof(buf), FORMAT_TIME_HMMSS((int64_t)sec)); else snprintf(buf, sizeof(buf), FORMAT_TIME_MMSS((int64_t)sec)); break; } } out += buf; break; } // Percentage of time spent out of the predicted time total case 'P': { snprintf(buf, sizeof(buf), "%3i%%", (int)(100. * time_predicted_percentage_)); out += buf; break; } default: Fatal("unknown placeholder '%%%c' in $NINJA_STATUS", *s); return ""; } } else { out.push_back(*s); } } return out; } void StatusPrinter::PrintStatus(const Edge* edge, int64_t time_millis) { if (explanations_) { // Collect all explanations for the current edge's outputs. std::vector explanations; for (Node* output : edge->outputs_) { explanations_->LookupAndAppend(output, &explanations); } if (!explanations.empty()) { // Start a new line so that the first explanation does not append to the // status line. printer_.PrintOnNewLine(""); for (const auto& exp : explanations) { fprintf(stderr, "ninja explain: %s\n", exp.c_str()); } } } if (config_.verbosity == BuildConfig::QUIET || config_.verbosity == BuildConfig::NO_STATUS_UPDATE) return; RecalculateProgressPrediction(); bool force_full_command = config_.verbosity == BuildConfig::VERBOSE; string to_print = edge->GetBinding("description"); if (to_print.empty() || force_full_command) to_print = edge->GetBinding("command"); to_print = FormatProgressStatus(progress_status_format_, time_millis) + to_print; printer_.Print(to_print, force_full_command ? LinePrinter::FULL : LinePrinter::ELIDE); } void StatusPrinter::Warning(const char* msg, ...) { va_list ap; va_start(ap, msg); ::Warning(msg, ap); va_end(ap); } void StatusPrinter::Error(const char* msg, ...) { va_list ap; va_start(ap, msg); ::Error(msg, ap); va_end(ap); } void StatusPrinter::Info(const char* msg, ...) { va_list ap; va_start(ap, msg); ::Info(msg, ap); va_end(ap); } ninja-1.13.2/src/status_printer.h000066400000000000000000000103321510764045400167300ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include #include #include "exit_status.h" #include "explanations.h" #include "line_printer.h" #include "status.h" /// Implementation of the Status interface that prints the status as /// human-readable strings to stdout struct StatusPrinter : Status { explicit StatusPrinter(const BuildConfig& config); /// Callbacks for the Plan to notify us about adding/removing Edge's. void EdgeAddedToPlan(const Edge* edge) override; void EdgeRemovedFromPlan(const Edge* edge) override; void BuildEdgeStarted(const Edge* edge, int64_t start_time_millis) override; void BuildEdgeFinished(Edge* edge, int64_t start_time_millis, int64_t end_time_millis, ExitStatus exit_code, const std::string& output) override; void BuildStarted() override; void BuildFinished() override; void Info(const char* msg, ...) override; void Warning(const char* msg, ...) override; void Error(const char* msg, ...) override; /// Format the progress status string by replacing the placeholders. /// See the user manual for more information about the available /// placeholders. /// @param progress_status_format The format of the progress status. /// @param status The status of the edge. std::string FormatProgressStatus(const char* progress_status_format, int64_t time_millis) const; /// Set the |explanations_| pointer. Used to implement `-d explain`. void SetExplanations(Explanations* explanations) override { explanations_ = explanations; } private: void PrintStatus(const Edge* edge, int64_t time_millis); const BuildConfig& config_; int started_edges_, finished_edges_, total_edges_, running_edges_; /// How much wall clock elapsed so far? int64_t time_millis_ = 0; /// How much cpu clock elapsed so far? int64_t cpu_time_millis_ = 0; /// What percentage of predicted total time have elapsed already? double time_predicted_percentage_ = 0.0; /// Out of all the edges, for how many do we know previous time? int eta_predictable_edges_total_ = 0; /// And how much time did they all take? int64_t eta_predictable_cpu_time_total_millis_ = 0; /// Out of all the non-finished edges, for how many do we know previous time? int eta_predictable_edges_remaining_ = 0; /// And how much time will they all take? int64_t eta_predictable_cpu_time_remaining_millis_ = 0; /// For how many edges we don't know the previous run time? int eta_unpredictable_edges_remaining_ = 0; void RecalculateProgressPrediction(); /// Prints progress output. LinePrinter printer_; /// An optional Explanations pointer, used to implement `-d explain`. Explanations* explanations_ = nullptr; /// The custom progress status format to use. const char* progress_status_format_; template void SnprintfRate(double rate, char (&buf)[S], const char* format) const { if (rate == -1) snprintf(buf, S, "?"); else snprintf(buf, S, format, rate); } struct SlidingRateInfo { SlidingRateInfo(int n) : rate_(-1), N(n), last_update_(-1) {} double rate() { return rate_; } void UpdateRate(int update_hint, int64_t time_millis) { if (update_hint == last_update_) return; last_update_ = update_hint; if (times_.size() == N) times_.pop(); times_.push(time_millis); if (times_.back() != times_.front()) rate_ = times_.size() / ((times_.back() - times_.front()) / 1e3); } private: double rate_; const size_t N; std::queue times_; int last_update_; }; mutable SlidingRateInfo current_rate_; }; ninja-1.13.2/src/status_test.cc000066400000000000000000000023441510764045400163660ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "status.h" #include "test.h" TEST(StatusTest, StatusFormatElapsed) { BuildConfig config; StatusPrinter status(config); status.BuildStarted(); // Before any task is done, the elapsed time must be zero. EXPECT_EQ("[%/e0.000]", status.FormatProgressStatus("[%%/e%e]", 0)); // Before any task is done, the elapsed time must be zero. EXPECT_EQ("[%/e00:00]", status.FormatProgressStatus("[%%/e%w]", 0)); } TEST(StatusTest, StatusFormatReplacePlaceholder) { BuildConfig config; StatusPrinter status(config); EXPECT_EQ("[%/s0/t0/r0/u0/f0]", status.FormatProgressStatus("[%%/s%s/t%t/r%r/u%u/f%f]", 0)); } ninja-1.13.2/src/string_piece.h000066400000000000000000000037431510764045400163250ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef NINJA_STRINGPIECE_H_ #define NINJA_STRINGPIECE_H_ #include #include /// StringPiece represents a slice of a string whose memory is managed /// externally. It is useful for reducing the number of std::strings /// we need to allocate. struct StringPiece { typedef const char* const_iterator; StringPiece() : str_(NULL), len_(0) {} /// The constructors intentionally allow for implicit conversions. StringPiece(const std::string& str) : str_(str.data()), len_(str.size()) {} StringPiece(const char* str) : str_(str), len_(strlen(str)) {} StringPiece(const char* str, size_t len) : str_(str), len_(len) {} bool operator==(const StringPiece& other) const { return len_ == other.len_ && memcmp(str_, other.str_, len_) == 0; } bool operator!=(const StringPiece& other) const { return !(*this == other); } /// Convert the slice into a full-fledged std::string, copying the /// data into a new string. std::string AsString() const { return len_ ? std::string(str_, len_) : std::string(); } const_iterator begin() const { return str_; } const_iterator end() const { return str_ + len_; } char operator[](size_t pos) const { return str_[pos]; } size_t size() const { return len_; } size_t empty() const { return len_ == 0; } const char* str_; size_t len_; }; #endif // NINJA_STRINGPIECE_H_ ninja-1.13.2/src/string_piece_util.cc000066400000000000000000000035601510764045400175150ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "string_piece_util.h" #include #include #include using namespace std; vector SplitStringPiece(StringPiece input, char sep) { vector elems; elems.reserve(count(input.begin(), input.end(), sep) + 1); StringPiece::const_iterator pos = input.begin(); for (;;) { const char* next_pos = find(pos, input.end(), sep); if (next_pos == input.end()) { elems.push_back(StringPiece(pos, input.end() - pos)); break; } elems.push_back(StringPiece(pos, next_pos - pos)); pos = next_pos + 1; } return elems; } string JoinStringPiece(const vector& list, char sep) { if (list.empty()) { return ""; } string ret; { size_t cap = list.size() - 1; for (size_t i = 0; i < list.size(); ++i) { cap += list[i].len_; } ret.reserve(cap); } for (size_t i = 0; i < list.size(); ++i) { if (i != 0) { ret += sep; } ret.append(list[i].str_, list[i].len_); } return ret; } bool EqualsCaseInsensitiveASCII(StringPiece a, StringPiece b) { if (a.len_ != b.len_) { return false; } for (size_t i = 0; i < a.len_; ++i) { if (ToLowerASCII(a.str_[i]) != ToLowerASCII(b.str_[i])) { return false; } } return true; } ninja-1.13.2/src/string_piece_util.h000066400000000000000000000021021510764045400173460ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef NINJA_STRINGPIECE_UTIL_H_ #define NINJA_STRINGPIECE_UTIL_H_ #include #include #include "string_piece.h" std::vector SplitStringPiece(StringPiece input, char sep); std::string JoinStringPiece(const std::vector& list, char sep); inline char ToLowerASCII(char c) { return (c >= 'A' && c <= 'Z') ? (c + ('a' - 'A')) : c; } bool EqualsCaseInsensitiveASCII(StringPiece a, StringPiece b); #endif // NINJA_STRINGPIECE_UTIL_H_ ninja-1.13.2/src/string_piece_util_test.cc000066400000000000000000000063471510764045400205620ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "string_piece_util.h" #include "test.h" using namespace std; TEST(StringPieceUtilTest, SplitStringPiece) { { string input("a:b:c"); vector list = SplitStringPiece(input, ':'); EXPECT_EQ(list.size(), size_t(3)); EXPECT_EQ(list[0], "a"); EXPECT_EQ(list[1], "b"); EXPECT_EQ(list[2], "c"); } { string empty; vector list = SplitStringPiece(empty, ':'); EXPECT_EQ(list.size(), size_t(1)); EXPECT_EQ(list[0], ""); } { string one("a"); vector list = SplitStringPiece(one, ':'); EXPECT_EQ(list.size(), size_t(1)); EXPECT_EQ(list[0], "a"); } { string sep_only(":"); vector list = SplitStringPiece(sep_only, ':'); EXPECT_EQ(list.size(), size_t(2)); EXPECT_EQ(list[0], ""); EXPECT_EQ(list[1], ""); } { string sep(":a:b:c:"); vector list = SplitStringPiece(sep, ':'); EXPECT_EQ(list.size(), size_t(5)); EXPECT_EQ(list[0], ""); EXPECT_EQ(list[1], "a"); EXPECT_EQ(list[2], "b"); EXPECT_EQ(list[3], "c"); EXPECT_EQ(list[4], ""); } } TEST(StringPieceUtilTest, JoinStringPiece) { { string input("a:b:c"); vector list = SplitStringPiece(input, ':'); EXPECT_EQ("a:b:c", JoinStringPiece(list, ':')); EXPECT_EQ("a/b/c", JoinStringPiece(list, '/')); } { string empty; vector list = SplitStringPiece(empty, ':'); EXPECT_EQ("", JoinStringPiece(list, ':')); } { vector empty_list; EXPECT_EQ("", JoinStringPiece(empty_list, ':')); } { string one("a"); vector single_list = SplitStringPiece(one, ':'); EXPECT_EQ("a", JoinStringPiece(single_list, ':')); } { string sep(":a:b:c:"); vector list = SplitStringPiece(sep, ':'); EXPECT_EQ(":a:b:c:", JoinStringPiece(list, ':')); } } TEST(StringPieceUtilTest, ToLowerASCII) { EXPECT_EQ('a', ToLowerASCII('A')); EXPECT_EQ('z', ToLowerASCII('Z')); EXPECT_EQ('a', ToLowerASCII('a')); EXPECT_EQ('z', ToLowerASCII('z')); EXPECT_EQ('/', ToLowerASCII('/')); EXPECT_EQ('1', ToLowerASCII('1')); } TEST(StringPieceUtilTest, EqualsCaseInsensitiveASCII) { EXPECT_TRUE(EqualsCaseInsensitiveASCII("abc", "abc")); EXPECT_TRUE(EqualsCaseInsensitiveASCII("abc", "ABC")); EXPECT_TRUE(EqualsCaseInsensitiveASCII("abc", "aBc")); EXPECT_TRUE(EqualsCaseInsensitiveASCII("AbC", "aBc")); EXPECT_TRUE(EqualsCaseInsensitiveASCII("", "")); EXPECT_FALSE(EqualsCaseInsensitiveASCII("a", "ac")); EXPECT_FALSE(EqualsCaseInsensitiveASCII("/", "\\")); EXPECT_FALSE(EqualsCaseInsensitiveASCII("1", "10")); } ninja-1.13.2/src/subprocess-posix.cc000066400000000000000000000313411510764045400173330ustar00rootroot00000000000000// Copyright 2012 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "exit_status.h" #include "subprocess.h" #include #include #include #include #include #include #include #include #include #if defined(USE_PPOLL) #include #else #include #endif extern char** environ; #include "util.h" using namespace std; namespace { ExitStatus ParseExitStatus(int status); } Subprocess::Subprocess(bool use_console) : fd_(-1), pid_(-1), use_console_(use_console) { } Subprocess::~Subprocess() { if (fd_ >= 0) close(fd_); // Reap child if forgotten. if (pid_ != -1) Finish(); } bool Subprocess::Start(SubprocessSet* set, const string& command) { int subproc_stdout_fd = -1; if (use_console_) { fd_ = -1; } else { int output_pipe[2]; if (pipe(output_pipe) < 0) Fatal("pipe: %s", strerror(errno)); fd_ = output_pipe[0]; subproc_stdout_fd = output_pipe[1]; #if !defined(USE_PPOLL) // If available, we use ppoll in DoWork(); otherwise we use pselect // and so must avoid overly-large FDs. if (fd_ >= static_cast(FD_SETSIZE)) Fatal("pipe: %s", strerror(EMFILE)); #endif // !USE_PPOLL SetCloseOnExec(fd_); } posix_spawn_file_actions_t action; int err = posix_spawn_file_actions_init(&action); if (err != 0) Fatal("posix_spawn_file_actions_init: %s", strerror(err)); if (!use_console_) { err = posix_spawn_file_actions_addclose(&action, fd_); if (err != 0) Fatal("posix_spawn_file_actions_addclose: %s", strerror(err)); } posix_spawnattr_t attr; err = posix_spawnattr_init(&attr); if (err != 0) Fatal("posix_spawnattr_init: %s", strerror(err)); short flags = 0; flags |= POSIX_SPAWN_SETSIGMASK; err = posix_spawnattr_setsigmask(&attr, &set->old_mask_); if (err != 0) Fatal("posix_spawnattr_setsigmask: %s", strerror(err)); // Signals which are set to be caught in the calling process image are set to // default action in the new process image, so no explicit // POSIX_SPAWN_SETSIGDEF parameter is needed. if (!use_console_) { // Put the child in its own process group, so ctrl-c won't reach it. flags |= POSIX_SPAWN_SETPGROUP; // No need to posix_spawnattr_setpgroup(&attr, 0), it's the default. // Open /dev/null over stdin. err = posix_spawn_file_actions_addopen(&action, 0, "/dev/null", O_RDONLY, 0); if (err != 0) { Fatal("posix_spawn_file_actions_addopen: %s", strerror(err)); } err = posix_spawn_file_actions_adddup2(&action, subproc_stdout_fd, 1); if (err != 0) Fatal("posix_spawn_file_actions_adddup2: %s", strerror(err)); err = posix_spawn_file_actions_adddup2(&action, subproc_stdout_fd, 2); if (err != 0) Fatal("posix_spawn_file_actions_adddup2: %s", strerror(err)); err = posix_spawn_file_actions_addclose(&action, subproc_stdout_fd); if (err != 0) Fatal("posix_spawn_file_actions_addclose: %s", strerror(err)); } #ifdef POSIX_SPAWN_USEVFORK flags |= POSIX_SPAWN_USEVFORK; #endif err = posix_spawnattr_setflags(&attr, flags); if (err != 0) Fatal("posix_spawnattr_setflags: %s", strerror(err)); const char* spawned_args[] = { "/bin/sh", "-c", command.c_str(), NULL }; err = posix_spawn(&pid_, "/bin/sh", &action, &attr, const_cast(spawned_args), environ); if (err != 0) Fatal("posix_spawn: %s", strerror(err)); err = posix_spawnattr_destroy(&attr); if (err != 0) Fatal("posix_spawnattr_destroy: %s", strerror(err)); err = posix_spawn_file_actions_destroy(&action); if (err != 0) Fatal("posix_spawn_file_actions_destroy: %s", strerror(err)); if (!use_console_) close(subproc_stdout_fd); return true; } void Subprocess::OnPipeReady() { char buf[4 << 10]; ssize_t len = read(fd_, buf, sizeof(buf)); if (len > 0) { buf_.append(buf, len); } else { if (len < 0) Fatal("read: %s", strerror(errno)); close(fd_); fd_ = -1; } } bool Subprocess::TryFinish(int waitpid_options) { assert(pid_ != -1); int status, ret; while ((ret = waitpid(pid_, &status, waitpid_options)) < 0) { if (errno != EINTR) Fatal("waitpid(%d): %s", pid_, strerror(errno)); } if (ret == 0) return false; // Subprocess is alive (WNOHANG-only). pid_ = -1; exit_status_ = ParseExitStatus(status); return true; // Subprocess has terminated. } ExitStatus Subprocess::Finish() { if (pid_ != -1) { TryFinish(0); assert(pid_ == -1); } return exit_status_; } namespace { ExitStatus ParseExitStatus(int status) { #ifdef _AIX if (WIFEXITED(status) && WEXITSTATUS(status) & 0x80) { // Map the shell's exit code used for signal failure (128 + signal) to the // status code expected by AIX WIFSIGNALED and WTERMSIG macros which, unlike // other systems, uses a different bit layout. int signal = WEXITSTATUS(status) & 0x7f; status = (signal << 16) | signal; } #endif if (WIFEXITED(status)) { // propagate the status transparently return static_cast(WEXITSTATUS(status)); } if (WIFSIGNALED(status)) { if (WTERMSIG(status) == SIGINT || WTERMSIG(status) == SIGTERM || WTERMSIG(status) == SIGHUP) return ExitInterrupted; } // At this point, we exit with any other signal+128 return static_cast(status + 128); } } // anonymous namespace bool Subprocess::Done() const { // Console subprocesses share console with ninja, and we consider them done // when they exit. // For other processes, we consider them done when we have consumed all their // output and closed their associated pipe. return (use_console_ && pid_ == -1) || (!use_console_ && fd_ == -1); } const string& Subprocess::GetOutput() const { return buf_; } volatile sig_atomic_t SubprocessSet::interrupted_; volatile sig_atomic_t SubprocessSet::s_sigchld_received; void SubprocessSet::SetInterruptedFlag(int signum) { interrupted_ = signum; } void SubprocessSet::SigChldHandler(int signo, siginfo_t* info, void* context) { s_sigchld_received = 1; } void SubprocessSet::HandlePendingInterruption() { sigset_t pending; sigemptyset(&pending); if (sigpending(&pending) == -1) { perror("ninja: sigpending"); return; } if (sigismember(&pending, SIGINT)) interrupted_ = SIGINT; else if (sigismember(&pending, SIGTERM)) interrupted_ = SIGTERM; else if (sigismember(&pending, SIGHUP)) interrupted_ = SIGHUP; } SubprocessSet::SubprocessSet() { // Block all these signals. // Their handlers will only be enabled during ppoll/pselect(). sigset_t set; sigemptyset(&set); sigaddset(&set, SIGINT); sigaddset(&set, SIGTERM); sigaddset(&set, SIGHUP); sigaddset(&set, SIGCHLD); if (sigprocmask(SIG_BLOCK, &set, &old_mask_) < 0) Fatal("sigprocmask: %s", strerror(errno)); struct sigaction act; memset(&act, 0, sizeof(act)); act.sa_handler = SetInterruptedFlag; if (sigaction(SIGINT, &act, &old_int_act_) < 0) Fatal("sigaction: %s", strerror(errno)); if (sigaction(SIGTERM, &act, &old_term_act_) < 0) Fatal("sigaction: %s", strerror(errno)); if (sigaction(SIGHUP, &act, &old_hup_act_) < 0) Fatal("sigaction: %s", strerror(errno)); memset(&act, 0, sizeof(act)); act.sa_flags = SA_SIGINFO | SA_NOCLDSTOP; act.sa_sigaction = SigChldHandler; if (sigaction(SIGCHLD, &act, &old_chld_act_) < 0) Fatal("sigaction: %s", strerror(errno)); } // Reaps console processes that have exited and moves them from the running set // to the finished set. void SubprocessSet::CheckConsoleProcessTerminated() { if (!s_sigchld_received) return; for (auto i = running_.begin(); i != running_.end(); ) { if ((*i)->use_console_ && (*i)->TryFinish(WNOHANG)) { finished_.push(*i); i = running_.erase(i); } else { ++i; } } } SubprocessSet::~SubprocessSet() { Clear(); if (sigaction(SIGINT, &old_int_act_, 0) < 0) Fatal("sigaction: %s", strerror(errno)); if (sigaction(SIGTERM, &old_term_act_, 0) < 0) Fatal("sigaction: %s", strerror(errno)); if (sigaction(SIGHUP, &old_hup_act_, 0) < 0) Fatal("sigaction: %s", strerror(errno)); if (sigaction(SIGCHLD, &old_chld_act_, 0) < 0) Fatal("sigaction: %s", strerror(errno)); if (sigprocmask(SIG_SETMASK, &old_mask_, 0) < 0) Fatal("sigprocmask: %s", strerror(errno)); } Subprocess *SubprocessSet::Add(const string& command, bool use_console) { Subprocess *subprocess = new Subprocess(use_console); if (!subprocess->Start(this, command)) { delete subprocess; return 0; } running_.push_back(subprocess); return subprocess; } #ifdef USE_PPOLL bool SubprocessSet::DoWork() { vector fds; nfds_t nfds = 0; for (vector::iterator i = running_.begin(); i != running_.end(); ++i) { int fd = (*i)->fd_; if (fd < 0) continue; pollfd pfd = { fd, POLLIN | POLLPRI, 0 }; fds.push_back(pfd); ++nfds; } if (nfds == 0) { // Add a dummy entry to prevent using an empty pollfd vector. // ppoll() allows to do this by setting fd < 0. pollfd pfd = { -1, 0, 0 }; fds.push_back(pfd); ++nfds; } interrupted_ = 0; s_sigchld_received = 0; int ret = ppoll(&fds.front(), nfds, NULL, &old_mask_); // Note: This can remove console processes from the running set, but that is // not a problem for the pollfd set, as console processes are not part of the // pollfd set (they don't have a fd). CheckConsoleProcessTerminated(); if (ret == -1) { if (errno != EINTR) { perror("ninja: ppoll"); return false; } return IsInterrupted(); } // ppoll/pselect prioritizes file descriptor events over a signal delivery. // However, if the user is trying to quit ninja, we should react as fast as // possible. HandlePendingInterruption(); if (IsInterrupted()) return true; // Iterate through both the pollfd set and the running set. // All valid fds in the running set are in the pollfd, in the same order. nfds_t cur_nfd = 0; for (vector::iterator i = running_.begin(); i != running_.end(); ) { int fd = (*i)->fd_; if (fd < 0) { ++i; continue; } assert(fd == fds[cur_nfd].fd); if (fds[cur_nfd++].revents) { (*i)->OnPipeReady(); if ((*i)->Done()) { finished_.push(*i); i = running_.erase(i); continue; } } ++i; } return IsInterrupted(); } #else // !defined(USE_PPOLL) bool SubprocessSet::DoWork() { fd_set set; int nfds = 0; FD_ZERO(&set); for (vector::iterator i = running_.begin(); i != running_.end(); ++i) { int fd = (*i)->fd_; if (fd >= 0) { FD_SET(fd, &set); if (nfds < fd+1) nfds = fd+1; } } interrupted_ = 0; s_sigchld_received = 0; int ret = pselect(nfds, (nfds > 0 ? &set : nullptr), 0, 0, 0, &old_mask_); CheckConsoleProcessTerminated(); if (ret == -1) { if (errno != EINTR) { perror("ninja: pselect"); return false; } return IsInterrupted(); } // ppoll/pselect prioritizes file descriptor events over a signal delivery. // However, if the user is trying to quit ninja, we should react as fast as // possible. HandlePendingInterruption(); if (IsInterrupted()) return true; for (vector::iterator i = running_.begin(); i != running_.end(); ) { int fd = (*i)->fd_; if (fd >= 0 && FD_ISSET(fd, &set)) { (*i)->OnPipeReady(); if ((*i)->Done()) { finished_.push(*i); i = running_.erase(i); continue; } } ++i; } return IsInterrupted(); } #endif // !defined(USE_PPOLL) Subprocess* SubprocessSet::NextFinished() { if (finished_.empty()) return NULL; Subprocess* subproc = finished_.front(); finished_.pop(); return subproc; } void SubprocessSet::Clear() { for (vector::iterator i = running_.begin(); i != running_.end(); ++i) // Since the foreground process is in our process group, it will receive // the interruption signal (i.e. SIGINT or SIGTERM) at the same time as us. if (!(*i)->use_console_) kill(-(*i)->pid_, interrupted_); for (vector::iterator i = running_.begin(); i != running_.end(); ++i) delete *i; running_.clear(); } ninja-1.13.2/src/subprocess-win32.cc000066400000000000000000000223301510764045400171310ustar00rootroot00000000000000// Copyright 2012 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "exit_status.h" #include "subprocess.h" #include #include #include #include "util.h" using namespace std; Subprocess::Subprocess(bool use_console) : child_(NULL) , overlapped_(), is_reading_(false), use_console_(use_console) { } Subprocess::~Subprocess() { if (pipe_) { if (!CloseHandle(pipe_)) Win32Fatal("CloseHandle"); } // Reap child if forgotten. if (child_) Finish(); } HANDLE Subprocess::SetupPipe(HANDLE ioport) { char pipe_name[100]; snprintf(pipe_name, sizeof(pipe_name), "\\\\.\\pipe\\ninja_pid%lu_sp%p", GetCurrentProcessId(), this); pipe_ = ::CreateNamedPipeA(pipe_name, PIPE_ACCESS_INBOUND | FILE_FLAG_OVERLAPPED, PIPE_TYPE_BYTE, PIPE_UNLIMITED_INSTANCES, 0, 0, INFINITE, NULL); if (pipe_ == INVALID_HANDLE_VALUE) Win32Fatal("CreateNamedPipe"); if (!CreateIoCompletionPort(pipe_, ioport, (ULONG_PTR)this, 0)) Win32Fatal("CreateIoCompletionPort"); memset(&overlapped_, 0, sizeof(overlapped_)); if (!ConnectNamedPipe(pipe_, &overlapped_) && GetLastError() != ERROR_IO_PENDING) { Win32Fatal("ConnectNamedPipe"); } // Get the write end of the pipe as a handle inheritable across processes. HANDLE output_write_handle = CreateFileA(pipe_name, GENERIC_WRITE, 0, NULL, OPEN_EXISTING, 0, NULL); HANDLE output_write_child; if (!DuplicateHandle(GetCurrentProcess(), output_write_handle, GetCurrentProcess(), &output_write_child, 0, TRUE, DUPLICATE_SAME_ACCESS)) { Win32Fatal("DuplicateHandle"); } CloseHandle(output_write_handle); return output_write_child; } bool Subprocess::Start(SubprocessSet* set, const string& command) { HANDLE child_pipe = SetupPipe(set->ioport_); SECURITY_ATTRIBUTES security_attributes; memset(&security_attributes, 0, sizeof(SECURITY_ATTRIBUTES)); security_attributes.nLength = sizeof(SECURITY_ATTRIBUTES); security_attributes.bInheritHandle = TRUE; // Must be inheritable so subprocesses can dup to children. HANDLE nul = CreateFileA("NUL", GENERIC_READ, FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, &security_attributes, OPEN_EXISTING, 0, NULL); if (nul == INVALID_HANDLE_VALUE) Fatal("couldn't open nul"); STARTUPINFOA startup_info; memset(&startup_info, 0, sizeof(startup_info)); startup_info.cb = sizeof(STARTUPINFO); if (!use_console_) { startup_info.dwFlags = STARTF_USESTDHANDLES; startup_info.hStdInput = nul; startup_info.hStdOutput = child_pipe; startup_info.hStdError = child_pipe; } // In the console case, child_pipe is still inherited by the child and closed // when the subprocess finishes, which then notifies ninja. PROCESS_INFORMATION process_info; memset(&process_info, 0, sizeof(process_info)); // Ninja handles ctrl-c, except for subprocesses in console pools. DWORD process_flags = use_console_ ? 0 : CREATE_NEW_PROCESS_GROUP; // Do not prepend 'cmd /c' on Windows, this breaks command // lines greater than 8,191 chars. if (!CreateProcessA(NULL, (char*)command.c_str(), NULL, NULL, /* inherit handles */ TRUE, process_flags, NULL, NULL, &startup_info, &process_info)) { DWORD error = GetLastError(); if (error == ERROR_FILE_NOT_FOUND) { // File (program) not found error is treated as a normal build // action failure. if (child_pipe) CloseHandle(child_pipe); CloseHandle(pipe_); CloseHandle(nul); pipe_ = NULL; // child_ is already NULL; buf_ = "CreateProcess failed: The system cannot find the file " "specified.\n"; return true; } else { fprintf(stderr, "\nCreateProcess failed. Command attempted:\n\"%s\"\n", command.c_str()); const char* hint = NULL; // ERROR_INVALID_PARAMETER means the command line was formatted // incorrectly. This can be caused by a command line being too long or // leading whitespace in the command. Give extra context for this case. if (error == ERROR_INVALID_PARAMETER) { if (command.length() > 0 && (command[0] == ' ' || command[0] == '\t')) hint = "command contains leading whitespace"; else hint = "is the command line too long?"; } Win32Fatal("CreateProcess", hint); } } // Close pipe channel only used by the child. if (child_pipe) CloseHandle(child_pipe); CloseHandle(nul); CloseHandle(process_info.hThread); child_ = process_info.hProcess; return true; } void Subprocess::OnPipeReady() { DWORD bytes; if (!GetOverlappedResult(pipe_, &overlapped_, &bytes, TRUE)) { if (GetLastError() == ERROR_BROKEN_PIPE) { CloseHandle(pipe_); pipe_ = NULL; return; } Win32Fatal("GetOverlappedResult"); } if (is_reading_ && bytes) buf_.append(overlapped_buf_, bytes); memset(&overlapped_, 0, sizeof(overlapped_)); is_reading_ = true; if (!::ReadFile(pipe_, overlapped_buf_, sizeof(overlapped_buf_), &bytes, &overlapped_)) { if (GetLastError() == ERROR_BROKEN_PIPE) { CloseHandle(pipe_); pipe_ = NULL; return; } if (GetLastError() != ERROR_IO_PENDING) Win32Fatal("ReadFile"); } // Even if we read any bytes in the readfile call, we'll enter this // function again later and get them at that point. } ExitStatus Subprocess::Finish() { if (!child_) return ExitFailure; // TODO: add error handling for all of these. WaitForSingleObject(child_, INFINITE); DWORD exit_code = 0; GetExitCodeProcess(child_, &exit_code); CloseHandle(child_); child_ = NULL; return exit_code == CONTROL_C_EXIT ? ExitInterrupted : static_cast(exit_code); } bool Subprocess::Done() const { return pipe_ == NULL; } const string& Subprocess::GetOutput() const { return buf_; } HANDLE SubprocessSet::ioport_; SubprocessSet::SubprocessSet() { ioport_ = ::CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 1); if (!ioport_) Win32Fatal("CreateIoCompletionPort"); if (!SetConsoleCtrlHandler(NotifyInterrupted, TRUE)) Win32Fatal("SetConsoleCtrlHandler"); } SubprocessSet::~SubprocessSet() { Clear(); SetConsoleCtrlHandler(NotifyInterrupted, FALSE); CloseHandle(ioport_); } BOOL WINAPI SubprocessSet::NotifyInterrupted(DWORD dwCtrlType) { if (dwCtrlType == CTRL_C_EVENT || dwCtrlType == CTRL_BREAK_EVENT) { if (!PostQueuedCompletionStatus(ioport_, 0, 0, NULL)) Win32Fatal("PostQueuedCompletionStatus"); return TRUE; } return FALSE; } Subprocess *SubprocessSet::Add(const string& command, bool use_console) { Subprocess *subprocess = new Subprocess(use_console); if (!subprocess->Start(this, command)) { delete subprocess; return 0; } if (subprocess->child_) running_.push_back(subprocess); else finished_.push(subprocess); return subprocess; } bool SubprocessSet::DoWork() { DWORD bytes_read; Subprocess* subproc; OVERLAPPED* overlapped; if (!GetQueuedCompletionStatus(ioport_, &bytes_read, (PULONG_PTR)&subproc, &overlapped, INFINITE)) { if (GetLastError() != ERROR_BROKEN_PIPE) Win32Fatal("GetQueuedCompletionStatus"); } if (!subproc) // A NULL subproc indicates that we were interrupted and is // delivered by NotifyInterrupted above. return true; subproc->OnPipeReady(); if (subproc->Done()) { vector::iterator end = remove(running_.begin(), running_.end(), subproc); if (running_.end() != end) { finished_.push(subproc); running_.resize(end - running_.begin()); } } return false; } Subprocess* SubprocessSet::NextFinished() { if (finished_.empty()) return NULL; Subprocess* subproc = finished_.front(); finished_.pop(); return subproc; } void SubprocessSet::Clear() { for (vector::iterator i = running_.begin(); i != running_.end(); ++i) { // Since the foreground process is in our process group, it will receive a // CTRL_C_EVENT or CTRL_BREAK_EVENT at the same time as us. if ((*i)->child_ && !(*i)->use_console_) { if (!GenerateConsoleCtrlEvent(CTRL_BREAK_EVENT, GetProcessId((*i)->child_))) { Win32Fatal("GenerateConsoleCtrlEvent"); } } } for (vector::iterator i = running_.begin(); i != running_.end(); ++i) delete *i; running_.clear(); } ninja-1.13.2/src/subprocess.h000066400000000000000000000107561510764045400160440ustar00rootroot00000000000000// Copyright 2012 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef NINJA_SUBPROCESS_H_ #define NINJA_SUBPROCESS_H_ #include #include #include #ifdef _WIN32 #include #else #include #endif // ppoll() exists on FreeBSD, but only on newer versions. #ifdef __FreeBSD__ # include # if defined USE_PPOLL && __FreeBSD_version < 1002000 # undef USE_PPOLL # endif #endif #include "exit_status.h" /// Subprocess wraps a single async subprocess. It is entirely /// passive: it expects the caller to notify it when its fds are ready /// for reading, as well as call Finish() to reap the child once done() /// is true. struct Subprocess { ~Subprocess(); /// Returns ExitSuccess on successful process exit, ExitInterrupted if /// the process was interrupted, ExitFailure if it otherwise failed. ExitStatus Finish(); bool Done() const; const std::string& GetOutput() const; private: Subprocess(bool use_console); bool Start(struct SubprocessSet* set, const std::string& command); void OnPipeReady(); std::string buf_; #ifdef _WIN32 /// Set up pipe_ as the parent-side pipe of the subprocess; return the /// other end of the pipe, usable in the child process. HANDLE SetupPipe(HANDLE ioport); HANDLE child_; HANDLE pipe_; OVERLAPPED overlapped_; char overlapped_buf_[4 << 10]; bool is_reading_; #else /// The file descriptor that will be used in ppoll/pselect() for this process, /// if any. Otherwise -1. /// In non-console mode, this is the read-side of a pipe that was created /// specifically for this subprocess. The write-side of the pipe is given to /// the subprocess as combined stdout and stderr. /// In console mode no pipe is created: fd_ is -1, and process termination is /// detected using the SIGCHLD signal and waitpid(WNOHANG). int fd_; /// PID of the subprocess. Set to -1 when the subprocess is reaped. pid_t pid_; /// In POSIX platforms it is necessary to use waitpid(WNOHANG) to know whether /// a certain subprocess has finished. This is done for terminal subprocesses. /// However, this also causes the subprocess to be reaped before Finish() is /// called, so we need to store the ExitStatus so that a later Finish() /// invocation can return it. ExitStatus exit_status_; /// Call waitpid() on the subprocess with the provided options and update the /// pid_ and exit_status_ fields. /// Return a boolean indicating whether the subprocess has indeed terminated. bool TryFinish(int waitpid_options); #endif bool use_console_; friend struct SubprocessSet; }; /// SubprocessSet runs a ppoll/pselect() loop around a set of Subprocesses. /// DoWork() waits for any state change in subprocesses; finished_ /// is a queue of subprocesses as they finish. struct SubprocessSet { SubprocessSet(); ~SubprocessSet(); Subprocess* Add(const std::string& command, bool use_console = false); bool DoWork(); Subprocess* NextFinished(); void Clear(); std::vector running_; std::queue finished_; #ifdef _WIN32 static BOOL WINAPI NotifyInterrupted(DWORD dwCtrlType); static HANDLE ioport_; #else static void SetInterruptedFlag(int signum); static void SigChldHandler(int signo, siginfo_t* info, void* context); /// Store the signal number that causes the interruption. /// 0 if not interruption. static volatile sig_atomic_t interrupted_; /// Whether ninja should quit. Set on SIGINT, SIGTERM or SIGHUP reception. static bool IsInterrupted() { return interrupted_ != 0; } static void HandlePendingInterruption(); /// Initialized to 0 before ppoll/pselect(). /// Filled to 1 by SIGCHLD handler when a child process terminates. static volatile sig_atomic_t s_sigchld_received; void CheckConsoleProcessTerminated(); struct sigaction old_int_act_; struct sigaction old_term_act_; struct sigaction old_hup_act_; struct sigaction old_chld_act_; sigset_t old_mask_; #endif }; #endif // NINJA_SUBPROCESS_H_ ninja-1.13.2/src/subprocess_test.cc000066400000000000000000000154431510764045400172370ustar00rootroot00000000000000// Copyright 2012 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "subprocess.h" #include "exit_status.h" #include "test.h" #ifndef _WIN32 // SetWithLots need setrlimit. #include #include #include #include #endif using namespace std; namespace { #ifdef _WIN32 const char* kSimpleCommand = "cmd /c dir \\"; #else const char* kSimpleCommand = "ls /"; #endif struct SubprocessTest : public testing::Test { SubprocessSet subprocs_; }; } // anonymous namespace // Run a command that fails and emits to stderr. TEST_F(SubprocessTest, BadCommandStderr) { Subprocess* subproc = subprocs_.Add("cmd /c ninja_no_such_command"); ASSERT_NE((Subprocess *) 0, subproc); while (!subproc->Done()) { // Pretend we discovered that stderr was ready for writing. subprocs_.DoWork(); } ExitStatus exit = subproc->Finish(); EXPECT_NE(ExitSuccess, exit); EXPECT_NE("", subproc->GetOutput()); } // Run a command that does not exist TEST_F(SubprocessTest, NoSuchCommand) { Subprocess* subproc = subprocs_.Add("ninja_no_such_command"); ASSERT_NE((Subprocess *) 0, subproc); while (!subproc->Done()) { // Pretend we discovered that stderr was ready for writing. subprocs_.DoWork(); } ExitStatus exit = subproc->Finish(); EXPECT_NE(ExitSuccess, exit); EXPECT_NE("", subproc->GetOutput()); #ifdef _WIN32 ASSERT_EQ("CreateProcess failed: The system cannot find the file " "specified.\n", subproc->GetOutput()); #endif } #ifndef _WIN32 TEST_F(SubprocessTest, InterruptChild) { Subprocess* subproc = subprocs_.Add("kill -INT $$"); ASSERT_NE((Subprocess *) 0, subproc); while (!subproc->Done()) { subprocs_.DoWork(); } EXPECT_EQ(ExitInterrupted, subproc->Finish()); } TEST_F(SubprocessTest, InterruptParent) { Subprocess* subproc = subprocs_.Add("kill -INT $PPID ; sleep 1"); ASSERT_NE((Subprocess *) 0, subproc); while (!subproc->Done()) { bool interrupted = subprocs_.DoWork(); if (interrupted) return; } ASSERT_FALSE("We should have been interrupted"); } TEST_F(SubprocessTest, InterruptChildWithSigTerm) { Subprocess* subproc = subprocs_.Add("kill -TERM $$"); ASSERT_NE((Subprocess *) 0, subproc); while (!subproc->Done()) { subprocs_.DoWork(); } EXPECT_EQ(ExitInterrupted, subproc->Finish()); } TEST_F(SubprocessTest, InterruptParentWithSigTerm) { Subprocess* subproc = subprocs_.Add("kill -TERM $PPID ; sleep 1"); ASSERT_NE((Subprocess *) 0, subproc); while (!subproc->Done()) { bool interrupted = subprocs_.DoWork(); if (interrupted) return; } ASSERT_FALSE("We should have been interrupted"); } TEST_F(SubprocessTest, InterruptChildWithSigHup) { Subprocess* subproc = subprocs_.Add("kill -HUP $$"); ASSERT_NE((Subprocess *) 0, subproc); while (!subproc->Done()) { subprocs_.DoWork(); } EXPECT_EQ(ExitInterrupted, subproc->Finish()); } TEST_F(SubprocessTest, InterruptParentWithSigHup) { Subprocess* subproc = subprocs_.Add("kill -HUP $PPID ; sleep 1"); ASSERT_NE((Subprocess *) 0, subproc); while (!subproc->Done()) { bool interrupted = subprocs_.DoWork(); if (interrupted) return; } ASSERT_FALSE("We should have been interrupted"); } TEST_F(SubprocessTest, Console) { // Skip test if we don't have the console ourselves. if (isatty(0) && isatty(1) && isatty(2)) { Subprocess* subproc = subprocs_.Add("test -t 0 -a -t 1 -a -t 2", /*use_console=*/true); ASSERT_NE((Subprocess*)0, subproc); while (!subproc->Done()) { subprocs_.DoWork(); } EXPECT_EQ(ExitSuccess, subproc->Finish()); } } #endif TEST_F(SubprocessTest, SetWithSingle) { Subprocess* subproc = subprocs_.Add(kSimpleCommand); ASSERT_NE((Subprocess *) 0, subproc); while (!subproc->Done()) { subprocs_.DoWork(); } ASSERT_EQ(ExitSuccess, subproc->Finish()); ASSERT_NE("", subproc->GetOutput()); ASSERT_EQ(1u, subprocs_.finished_.size()); } TEST_F(SubprocessTest, SetWithMulti) { Subprocess* processes[3]; const char* kCommands[3] = { kSimpleCommand, #ifdef _WIN32 "cmd /c echo hi", "cmd /c time /t", #else "id -u", "pwd", #endif }; for (int i = 0; i < 3; ++i) { processes[i] = subprocs_.Add(kCommands[i]); ASSERT_NE((Subprocess *) 0, processes[i]); } ASSERT_EQ(3u, subprocs_.running_.size()); for (int i = 0; i < 3; ++i) { ASSERT_FALSE(processes[i]->Done()); ASSERT_EQ("", processes[i]->GetOutput()); } while (!processes[0]->Done() || !processes[1]->Done() || !processes[2]->Done()) { ASSERT_GT(subprocs_.running_.size(), 0u); subprocs_.DoWork(); } ASSERT_EQ(0u, subprocs_.running_.size()); ASSERT_EQ(3u, subprocs_.finished_.size()); for (int i = 0; i < 3; ++i) { ASSERT_EQ(ExitSuccess, processes[i]->Finish()); ASSERT_NE("", processes[i]->GetOutput()); delete processes[i]; } } #if defined(USE_PPOLL) TEST_F(SubprocessTest, SetWithLots) { // Arbitrary big number; needs to be over 1024 to confirm we're no longer // hostage to pselect. const unsigned kNumProcs = 1025; // Make sure [ulimit -n] isn't going to stop us from working. rlimit rlim; ASSERT_EQ(0, getrlimit(RLIMIT_NOFILE, &rlim)); if (rlim.rlim_cur < kNumProcs) { printf("Raise [ulimit -n] above %u (currently %lu) to make this test go\n", kNumProcs, static_cast(rlim.rlim_cur)); return; } vector procs; for (size_t i = 0; i < kNumProcs; ++i) { Subprocess* subproc = subprocs_.Add("/bin/echo"); ASSERT_NE((Subprocess *) 0, subproc); procs.push_back(subproc); } while (!subprocs_.running_.empty()) subprocs_.DoWork(); for (size_t i = 0; i < procs.size(); ++i) { ASSERT_EQ(ExitSuccess, procs[i]->Finish()); ASSERT_NE("", procs[i]->GetOutput()); } ASSERT_EQ(kNumProcs, subprocs_.finished_.size()); } #endif // !__APPLE__ && !_WIN32 // TODO: this test could work on Windows, just not sure how to simply // read stdin. #ifndef _WIN32 // Verify that a command that attempts to read stdin correctly thinks // that stdin is closed. TEST_F(SubprocessTest, ReadStdin) { Subprocess* subproc = subprocs_.Add("cat -"); while (!subproc->Done()) { subprocs_.DoWork(); } ASSERT_EQ(ExitSuccess, subproc->Finish()); ASSERT_EQ(1u, subprocs_.finished_.size()); } #endif // _WIN32 ninja-1.13.2/src/test.cc000066400000000000000000000162521510764045400147660ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifdef _WIN32 #include // Has to be before util.h is included. #endif #include "test.h" #include #include #include #ifdef _WIN32 #include #include #else #include #endif #include "build_log.h" #include "graph.h" #include "manifest_parser.h" #include "util.h" #ifdef _AIX extern "C" { // GCC "helpfully" strips the definition of mkdtemp out on AIX. // The function is still present, so if we define it ourselves // it will work perfectly fine. extern char* mkdtemp(char* name_template); } #endif using namespace std; namespace { #ifdef _WIN32 /// Windows has no mkdtemp. Implement it in terms of _mktemp_s. char* mkdtemp(char* name_template) { int err = _mktemp_s(name_template, strlen(name_template) + 1); if (err < 0) { perror("_mktemp_s"); return NULL; } err = _mkdir(name_template); if (err < 0) { perror("mkdir"); return NULL; } return name_template; } #endif // _WIN32 string GetSystemTempDir() { #ifdef _WIN32 char buf[1024]; if (!GetTempPath(sizeof(buf), buf)) return ""; return buf; #else const char* tempdir = getenv("TMPDIR"); if (tempdir) return tempdir; return "/tmp"; #endif } } // anonymous namespace StateTestWithBuiltinRules::StateTestWithBuiltinRules() { AddCatRule(&state_); } void StateTestWithBuiltinRules::AddCatRule(State* state) { AssertParse(state, "rule cat\n" " command = cat $in > $out\n"); } Node* StateTestWithBuiltinRules::GetNode(const string& path) { EXPECT_FALSE(strpbrk(path.c_str(), "/\\")); return state_.GetNode(path, 0); } void AssertParse(State* state, const char* input, ManifestParserOptions opts) { ManifestParser parser(state, NULL, opts); string err; EXPECT_TRUE(parser.ParseTest(input, &err)); ASSERT_EQ("", err); VerifyGraph(*state); } void AssertHash(const char* expected, uint64_t actual) { ASSERT_EQ(BuildLog::LogEntry::HashCommand(expected), actual); } void VerifyGraph(const State& state) { for (vector::const_iterator e = state.edges_.begin(); e != state.edges_.end(); ++e) { // All edges need at least one output. EXPECT_FALSE((*e)->outputs_.empty()); // Check that the edge's inputs have the edge as out-edge. for (vector::const_iterator in_node = (*e)->inputs_.begin(); in_node != (*e)->inputs_.end(); ++in_node) { const vector& out_edges = (*in_node)->out_edges(); EXPECT_NE(find(out_edges.begin(), out_edges.end(), *e), out_edges.end()); } // Check that the edge's outputs have the edge as in-edge. for (vector::const_iterator out_node = (*e)->outputs_.begin(); out_node != (*e)->outputs_.end(); ++out_node) { EXPECT_EQ((*out_node)->in_edge(), *e); } } // The union of all in- and out-edges of each nodes should be exactly edges_. set node_edge_set; for (State::Paths::const_iterator p = state.paths_.begin(); p != state.paths_.end(); ++p) { const Node* n = p->second; if (n->in_edge()) node_edge_set.insert(n->in_edge()); node_edge_set.insert(n->out_edges().begin(), n->out_edges().end()); } set edge_set(state.edges_.begin(), state.edges_.end()); EXPECT_EQ(node_edge_set, edge_set); } void VirtualFileSystem::Create(const string& path, const string& contents) { files_[path].mtime = now_; files_[path].contents = contents; files_created_.insert(path); } TimeStamp VirtualFileSystem::Stat(const string& path, string* err) const { FileMap::const_iterator i = files_.find(path); if (i != files_.end()) { *err = i->second.stat_error; return i->second.mtime; } return 0; } bool VirtualFileSystem::WriteFile(const string& path, const string& contents, bool /*crlf_on_windows*/) { Create(path, contents); return true; } bool VirtualFileSystem::MakeDir(const string& path) { directories_made_.push_back(path); return true; // success } FileReader::Status VirtualFileSystem::ReadFile(const string& path, string* contents, string* err) { files_read_.push_back(path); FileMap::iterator i = files_.find(path); if (i != files_.end()) { *contents = i->second.contents; return Okay; } *err = strerror(ENOENT); return NotFound; } int VirtualFileSystem::RemoveFile(const string& path) { if (find(directories_made_.begin(), directories_made_.end(), path) != directories_made_.end()) return -1; FileMap::iterator i = files_.find(path); if (i != files_.end()) { files_.erase(i); files_removed_.insert(path); return 0; } else { return 1; } } void ScopedTempDir::CreateAndEnter(const string& name) { // First change into the system temp dir and save it for cleanup. start_dir_ = GetSystemTempDir(); if (start_dir_.empty()) Fatal("couldn't get system temp dir"); if (chdir(start_dir_.c_str()) < 0) Fatal("chdir: %s", strerror(errno)); // Create a temporary subdirectory of that. char name_template[1024]; strcpy(name_template, name.c_str()); strcat(name_template, "-XXXXXX"); char* tempname = mkdtemp(name_template); if (!tempname) Fatal("mkdtemp: %s", strerror(errno)); temp_dir_name_ = tempname; // chdir into the new temporary directory. if (chdir(temp_dir_name_.c_str()) < 0) Fatal("chdir: %s", strerror(errno)); } void ScopedTempDir::Cleanup() { if (temp_dir_name_.empty()) return; // Something went wrong earlier. // Move out of the directory we're about to clobber. if (chdir(start_dir_.c_str()) < 0) Fatal("chdir: %s", strerror(errno)); #ifdef _WIN32 string command = "rmdir /s /q " + temp_dir_name_; #else string command = "rm -rf " + temp_dir_name_; #endif if (system(command.c_str()) < 0) Fatal("system: %s", strerror(errno)); temp_dir_name_.clear(); } ScopedFilePath::ScopedFilePath(ScopedFilePath&& other) noexcept : path_(std::move(other.path_)), released_(other.released_) { other.released_ = true; } /// It would be nice to use '= default' here instead but some old compilers /// such as GCC from Ubuntu 16.06 will not compile it with "noexcept", so just /// write it manually. ScopedFilePath& ScopedFilePath::operator=(ScopedFilePath&& other) noexcept { if (this != &other) { this->~ScopedFilePath(); new (this) ScopedFilePath(std::move(other)); } return *this; } ScopedFilePath::~ScopedFilePath() { if (!released_) { platformAwareUnlink(path_.c_str()); } } void ScopedFilePath::Release() { released_ = true; } ninja-1.13.2/src/test.h000066400000000000000000000101251510764045400146210ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef NINJA_TEST_H_ #define NINJA_TEST_H_ #include #include "disk_interface.h" #include "manifest_parser.h" #include "state.h" // Support utilities for tests. struct Node; /// A base test fixture that includes a State object with a /// builtin "cat" rule. struct StateTestWithBuiltinRules : public testing::Test { StateTestWithBuiltinRules(); /// Add a "cat" rule to \a state. Used by some tests; it's /// otherwise done by the ctor to state_. void AddCatRule(State* state); /// Short way to get a Node by its path from state_. Node* GetNode(const std::string& path); State state_; }; void AssertParse(State* state, const char* input, ManifestParserOptions = ManifestParserOptions()); void AssertHash(const char* expected, uint64_t actual); void VerifyGraph(const State& state); /// An implementation of DiskInterface that uses an in-memory representation /// of disk state. It also logs file accesses and directory creations /// so it can be used by tests to verify disk access patterns. struct VirtualFileSystem : public DiskInterface { VirtualFileSystem() : now_(1) {} /// "Create" a file with contents. void Create(const std::string& path, const std::string& contents); /// Tick "time" forwards; subsequent file operations will be newer than /// previous ones. int Tick() { return ++now_; } // DiskInterface TimeStamp Stat(const std::string& path, std::string* err) const override; bool WriteFile(const std::string& path, const std::string& contents, bool /*crlf_on_windows*/) override; bool MakeDir(const std::string& path) override; Status ReadFile(const std::string& path, std::string* contents, std::string* err) override; int RemoveFile(const std::string& path) override; /// An entry for a single in-memory file. struct Entry { int mtime; std::string stat_error; // If mtime is -1. std::string contents; }; std::vector directories_made_; std::vector files_read_; typedef std::map FileMap; FileMap files_; std::set files_removed_; std::set files_created_; /// A simple fake timestamp for file operations. int now_; }; struct ScopedTempDir { /// Create a temporary directory and chdir into it. void CreateAndEnter(const std::string& name); /// Clean up the temporary directory. void Cleanup(); /// The temp directory containing our dir. std::string start_dir_; /// The subdirectory name for our dir, or empty if it hasn't been set up. std::string temp_dir_name_; }; /// A class that records a file path and ensures that it is removed /// on destruction. This ensures that tests do not keep stale files in the /// current directory where they run, even in case of assertion failure. struct ScopedFilePath { /// Constructor just records the file path. ScopedFilePath(const std::string& path) : path_(path) {} ScopedFilePath(const char* path) : path_(path) {} /// Allow move operations. ScopedFilePath(ScopedFilePath&&) noexcept; ScopedFilePath& operator=(ScopedFilePath&&) noexcept; /// Destructor destroys the file, unless Release() was called. ~ScopedFilePath(); /// Release the file, the destructor will not remove the file. void Release(); const char* c_str() const { return path_.c_str(); } const std::string& path() const { return path_; } bool released() const { return released_; } private: std::string path_; bool released_ = false; }; #endif // NINJA_TEST_H_ ninja-1.13.2/src/third_party/000077500000000000000000000000001510764045400160235ustar00rootroot00000000000000ninja-1.13.2/src/third_party/emhash/000077500000000000000000000000001510764045400172705ustar00rootroot00000000000000ninja-1.13.2/src/third_party/emhash/README.ninja000066400000000000000000000005521510764045400212500ustar00rootroot00000000000000Description: emhash8::HashMap for C++14/17 Version: 1.6.5 (commit bdebddbdce1b473bbc189178fd523ef4a876ea01) URL: https://github.com/ktprime/emhash Copyright: Copyright (c) 2021-2024 Huang Yuanbing & bailuzhou AT 163.com SPDX-License-Identifier: MIT Local changes: - Added includes for _mm_prefetch on MinGW. - Fixed some spelling errors to appease the linter. ninja-1.13.2/src/third_party/emhash/hash_table8.hpp000066400000000000000000001621251510764045400221720ustar00rootroot00000000000000// emhash8::HashMap for C++14/17 // version 1.6.5 // https://github.com/ktprime/emhash/blob/master/hash_table8.hpp // // Licensed under the MIT License . // SPDX-License-Identifier: MIT // Copyright (c) 2021-2024 Huang Yuanbing & bailuzhou AT 163.com // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE #pragma once #include #include #include #include #include #include #include #include #include #include #include #undef EMH_NEW #undef EMH_EMPTY // likely/unlikely #if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__clang__) # define EMH_LIKELY(condition) __builtin_expect(condition, 1) # define EMH_UNLIKELY(condition) __builtin_expect(condition, 0) #else # define EMH_LIKELY(condition) condition # define EMH_UNLIKELY(condition) condition #endif #define EMH_EMPTY(n) (0 > (int)(_index[n].next)) #define EMH_EQHASH(n, key_hash) (((size_type)(key_hash) & ~_mask) == (_index[n].slot & ~_mask)) //#define EMH_EQHASH(n, key_hash) ((size_type)(key_hash - _index[n].slot) & ~_mask) == 0 #define EMH_NEW(key, val, bucket, key_hash) \ new(_pairs + _num_filled) value_type(key, val); \ _etail = bucket; \ _index[bucket] = {bucket, _num_filled++ | ((size_type)(key_hash) & ~_mask)} #if _WIN32 && defined(_M_IX86) #include #endif namespace emhash8 { struct DefaultPolicy { static constexpr float load_factor = 0.80f; static constexpr float min_load_factor = 0.20f; static constexpr size_t cacheline_size = 64U; }; template, typename EqT = std::equal_to, typename Allocator = std::allocator>, //never used typename Policy = DefaultPolicy> //never used class HashMap { #ifndef EMH_DEFAULT_LOAD_FACTOR constexpr static float EMH_DEFAULT_LOAD_FACTOR = 0.80f; #endif constexpr static float EMH_MIN_LOAD_FACTOR = 0.25f; //< 0.5 constexpr static uint32_t EMH_CACHE_LINE_SIZE = 64; //debug only public: using htype = HashMap; using value_type = std::pair; using key_type = KeyT; using mapped_type = ValueT; //using dPolicy = Policy; #ifdef EMH_SMALL_TYPE using size_type = uint16_t; #elif EMH_SIZE_TYPE == 0 using size_type = uint32_t; #else using size_type = size_t; #endif using hasher = HashT; using key_equal = EqT; constexpr static size_type INACTIVE = 0-1u; //constexpr uint32_t END = 0-0x1u; constexpr static size_type EAD = 2; struct Index { size_type next; size_type slot; }; class const_iterator; class iterator { public: using iterator_category = std::bidirectional_iterator_tag; using difference_type = std::ptrdiff_t; using value_type = typename htype::value_type; using pointer = value_type*; using const_pointer = const value_type* ; using reference = value_type&; using const_reference = const value_type&; iterator() : kv_(nullptr) {} iterator(const_iterator& cit) { kv_ = cit.kv_; } iterator(const htype* hash_map, size_type bucket) { kv_ = hash_map->_pairs + (int)bucket; } iterator& operator++() { kv_ ++; return *this; } iterator operator++(int) { auto cur = *this; kv_ ++; return cur; } iterator& operator--() { kv_ --; return *this; } iterator operator--(int) { auto cur = *this; kv_ --; return cur; } reference operator*() const { return *kv_; } pointer operator->() const { return kv_; } bool operator == (const iterator& rhs) const { return kv_ == rhs.kv_; } bool operator != (const iterator& rhs) const { return kv_ != rhs.kv_; } bool operator == (const const_iterator& rhs) const { return kv_ == rhs.kv_; } bool operator != (const const_iterator& rhs) const { return kv_ != rhs.kv_; } public: value_type* kv_; }; class const_iterator { public: using iterator_category = std::bidirectional_iterator_tag; using value_type = typename htype::value_type; using difference_type = std::ptrdiff_t; using pointer = value_type*; using const_pointer = const value_type*; using reference = value_type&; using const_reference = const value_type&; const_iterator(const iterator& it) { kv_ = it.kv_; } const_iterator (const htype* hash_map, size_type bucket) { kv_ = hash_map->_pairs + (int)bucket; } const_iterator& operator++() { kv_ ++; return *this; } const_iterator operator++(int) { auto cur = *this; kv_ ++; return cur; } const_iterator& operator--() { kv_ --; return *this; } const_iterator operator--(int) { auto cur = *this; kv_ --; return cur; } const_reference operator*() const { return *kv_; } const_pointer operator->() const { return kv_; } bool operator == (const iterator& rhs) const { return kv_ == rhs.kv_; } bool operator != (const iterator& rhs) const { return kv_ != rhs.kv_; } bool operator == (const const_iterator& rhs) const { return kv_ == rhs.kv_; } bool operator != (const const_iterator& rhs) const { return kv_ != rhs.kv_; } public: const value_type* kv_; }; void init(size_type bucket, float mlf = EMH_DEFAULT_LOAD_FACTOR) { _pairs = nullptr; _index = nullptr; _mask = _num_buckets = 0; _num_filled = 0; _mlf = (uint32_t)((1 << 27) / EMH_DEFAULT_LOAD_FACTOR); max_load_factor(mlf); rehash(bucket); } HashMap(size_type bucket = 2, float mlf = EMH_DEFAULT_LOAD_FACTOR) { init(bucket, mlf); } HashMap(const HashMap& rhs) { if (rhs.load_factor() > EMH_MIN_LOAD_FACTOR) { _pairs = alloc_bucket((size_type)(rhs._num_buckets * rhs.max_load_factor()) + 4); _index = alloc_index(rhs._num_buckets); clone(rhs); } else { init(rhs._num_filled + 2, rhs.max_load_factor()); for (auto it = rhs.begin(); it != rhs.end(); ++it) insert_unique(it->first, it->second); } } HashMap(HashMap&& rhs) noexcept { init(0); *this = std::move(rhs); } HashMap(std::initializer_list ilist) { init((size_type)ilist.size()); for (auto it = ilist.begin(); it != ilist.end(); ++it) do_insert(*it); } template HashMap(InputIt first, InputIt last, size_type bucket_count=4) { init(std::distance(first, last) + bucket_count); for (; first != last; ++first) emplace(*first); } HashMap& operator=(const HashMap& rhs) { if (this == &rhs) return *this; if (rhs.load_factor() < EMH_MIN_LOAD_FACTOR) { clear(); free(_pairs); _pairs = nullptr; rehash(rhs._num_filled + 2); for (auto it = rhs.begin(); it != rhs.end(); ++it) insert_unique(it->first, it->second); return *this; } clearkv(); if (_num_buckets != rhs._num_buckets) { free(_pairs); free(_index); _index = alloc_index(rhs._num_buckets); _pairs = alloc_bucket((size_type)(rhs._num_buckets * rhs.max_load_factor()) + 4); } clone(rhs); return *this; } HashMap& operator=(HashMap&& rhs) noexcept { if (this != &rhs) { swap(rhs); rhs.clear(); } return *this; } template bool operator == (const Con& rhs) const { if (size() != rhs.size()) return false; for (auto it = begin(), last = end(); it != last; ++it) { auto oi = rhs.find(it->first); if (oi == rhs.end() || it->second != oi->second) return false; } return true; } template bool operator != (const Con& rhs) const { return !(*this == rhs); } ~HashMap() noexcept { clearkv(); free(_pairs); free(_index); _index = nullptr; _pairs = nullptr; } void clone(const HashMap& rhs) { _hasher = rhs._hasher; // _eq = rhs._eq; _num_buckets = rhs._num_buckets; _num_filled = rhs._num_filled; _mlf = rhs._mlf; _last = rhs._last; _mask = rhs._mask; #if EMH_HIGH_LOAD _ehead = rhs._ehead; #endif _etail = rhs._etail; auto opairs = rhs._pairs; memcpy((char*)_index, (char*)rhs._index, (_num_buckets + EAD) * sizeof(Index)); if (is_copy_trivially()) { memcpy((char*)_pairs, (char*)opairs, _num_filled * sizeof(value_type)); } else { for (size_type slot = 0; slot < _num_filled; slot++) new(_pairs + slot) value_type(opairs[slot]); } } void swap(HashMap& rhs) { // std::swap(_eq, rhs._eq); std::swap(_hasher, rhs._hasher); std::swap(_pairs, rhs._pairs); std::swap(_index, rhs._index); std::swap(_num_buckets, rhs._num_buckets); std::swap(_num_filled, rhs._num_filled); std::swap(_mask, rhs._mask); std::swap(_mlf, rhs._mlf); std::swap(_last, rhs._last); #if EMH_HIGH_LOAD std::swap(_ehead, rhs._ehead); #endif std::swap(_etail, rhs._etail); } // ------------------------------------------------------------- iterator first() const { return {this, 0}; } iterator last() const { return {this, _num_filled - 1}; } value_type& front() { return _pairs[0]; } const value_type& front() const { return _pairs[0]; } value_type& back() { return _pairs[_num_filled - 1]; } const value_type& back() const { return _pairs[_num_filled - 1]; } void pop_front() { erase(begin()); } //TODO. only erase first without move last void pop_back() { erase(last()); } iterator begin() { return first(); } const_iterator cbegin() const { return first(); } const_iterator begin() const { return first(); } iterator end() { return {this, _num_filled}; } const_iterator cend() const { return {this, _num_filled}; } const_iterator end() const { return cend(); } const value_type* values() const { return _pairs; } const Index* index() const { return _index; } size_type size() const { return _num_filled; } bool empty() const { return _num_filled == 0; } size_type bucket_count() const { return _num_buckets; } /// Returns average number of elements per bucket. float load_factor() const { return static_cast(_num_filled) / (_mask + 1); } HashT& hash_function() const { return _hasher; } EqT& key_eq() const { return _eq; } void max_load_factor(float mlf) { if (mlf < 0.992 && mlf > EMH_MIN_LOAD_FACTOR) { _mlf = (uint32_t)((1 << 27) / mlf); if (_num_buckets > 0) rehash(_num_buckets); } } constexpr float max_load_factor() const { return (1 << 27) / (float)_mlf; } constexpr size_type max_size() const { return (1ull << (sizeof(size_type) * 8 - 1)); } constexpr size_type max_bucket_count() const { return max_size(); } #if EMH_STATIS //Returns the bucket number where the element with key k is located. size_type bucket(const KeyT& key) const { const auto bucket = hash_bucket(key); const auto next_bucket = _index[bucket].next; if ((int)next_bucket < 0) return 0; else if (bucket == next_bucket) return bucket + 1; return hash_main(bucket) + 1; } //Returns the number of elements in bucket n. size_type bucket_size(const size_type bucket) const { auto next_bucket = _index[bucket].next; if ((int)next_bucket < 0) return 0; next_bucket = hash_main(bucket); size_type ibucket_size = 1; //iterator each item in current main bucket while (true) { const auto nbucket = _index[next_bucket].next; if (nbucket == next_bucket) { break; } ibucket_size ++; next_bucket = nbucket; } return ibucket_size; } size_type get_main_bucket(const size_type bucket) const { auto next_bucket = _index[bucket].next; if ((int)next_bucket < 0) return INACTIVE; return hash_main(bucket); } size_type get_diss(size_type bucket, size_type next_bucket, const size_type slots) const { auto pbucket = reinterpret_cast(&_pairs[bucket]); auto pnext = reinterpret_cast(&_pairs[next_bucket]); if (pbucket / EMH_CACHE_LINE_SIZE == pnext / EMH_CACHE_LINE_SIZE) return 0; size_type diff = pbucket > pnext ? (pbucket - pnext) : (pnext - pbucket); if (diff / EMH_CACHE_LINE_SIZE < slots - 1) return diff / EMH_CACHE_LINE_SIZE + 1; return slots - 1; } int get_bucket_info(const size_type bucket, size_type steps[], const size_type slots) const { auto next_bucket = _index[bucket].next; if ((int)next_bucket < 0) return -1; const auto main_bucket = hash_main(bucket); if (next_bucket == main_bucket) return 1; else if (main_bucket != bucket) return 0; steps[get_diss(bucket, next_bucket, slots)] ++; size_type ibucket_size = 2; //find a empty and linked it to tail while (true) { const auto nbucket = _index[next_bucket].next; if (nbucket == next_bucket) break; steps[get_diss(nbucket, next_bucket, slots)] ++; ibucket_size ++; next_bucket = nbucket; } return (int)ibucket_size; } void dump_statics() const { const size_type slots = 128; size_type buckets[slots + 1] = {0}; size_type steps[slots + 1] = {0}; for (size_type bucket = 0; bucket < _num_buckets; ++bucket) { auto bsize = get_bucket_info(bucket, steps, slots); if (bsize > 0) buckets[bsize] ++; } size_type sumb = 0, collision = 0, sumc = 0, finds = 0, sumn = 0; puts("============== buckets size ration ========="); for (size_type i = 0; i < sizeof(buckets) / sizeof(buckets[0]); i++) { const auto bucketsi = buckets[i]; if (bucketsi == 0) continue; sumb += bucketsi; sumn += bucketsi * i; collision += bucketsi * (i - 1); finds += bucketsi * i * (i + 1) / 2; printf(" %2u %8u %2.2lf| %.2lf\n", i, bucketsi, bucketsi * 100.0 * i / _num_filled, sumn * 100.0 / _num_filled); } puts("========== collision miss ration ==========="); for (size_type i = 0; i < sizeof(steps) / sizeof(steps[0]); i++) { sumc += steps[i]; if (steps[i] <= 2) continue; printf(" %2u %8u %.2lf %.2lf\n", i, steps[i], steps[i] * 100.0 / collision, sumc * 100.0 / collision); } if (sumb == 0) return; printf(" _num_filled/bucket_size/packed collision/cache_miss/hit_find = %u/%.2lf/%zd/ %.2lf%%/%.2lf%%/%.2lf\n", _num_filled, _num_filled * 1.0 / sumb, sizeof(value_type), (collision * 100.0 / _num_filled), (collision - steps[0]) * 100.0 / _num_filled, finds * 1.0 / _num_filled); assert(sumn == _num_filled); assert(sumc == collision); puts("============== buckets size end ============="); } #endif void pack_zero(ValueT zero) { _pairs[_num_filled] = {KeyT(), zero}; } // ------------------------------------------------------------ template iterator find(const K& key) noexcept { return {this, find_filled_slot(key)}; } template const_iterator find(const K& key) const noexcept { return {this, find_filled_slot(key)}; } template ValueT& at(const K& key) { const auto slot = find_filled_slot(key); //throw return _pairs[slot].second; } template const ValueT& at(const K& key) const { const auto slot = find_filled_slot(key); //throw return _pairs[slot].second; } const ValueT& index(const uint32_t index) const { return _pairs[index].second; } ValueT& index(const uint32_t index) { return _pairs[index].second; } template bool contains(const K& key) const noexcept { return find_filled_slot(key) != _num_filled; } template size_type count(const K& key) const noexcept { return find_filled_slot(key) == _num_filled ? 0 : 1; //return find_sorted_bucket(key) == END ? 0 : 1; //return find_hash_bucket(key) == END ? 0 : 1; } template std::pair equal_range(const K& key) { const auto found = find(key); if (found.second == _num_filled) return { found, found }; else return { found, std::next(found) }; } void merge(HashMap& rhs) { if (empty()) { *this = std::move(rhs); return; } for (auto rit = rhs.begin(); rit != rhs.end(); ) { auto fit = find(rit->first); if (fit == end()) { insert_unique(rit->first, std::move(rit->second)); rit = rhs.erase(rit); } else { ++rit; } } } /// Returns the matching ValueT or nullptr if k isn't found. bool try_get(const KeyT& key, ValueT& val) const noexcept { const auto slot = find_filled_slot(key); const auto found = slot != _num_filled; if (found) { val = _pairs[slot].second; } return found; } /// Returns the matching ValueT or nullptr if k isn't found. ValueT* try_get(const KeyT& key) noexcept { const auto slot = find_filled_slot(key); return slot != _num_filled ? &_pairs[slot].second : nullptr; } /// Const version of the above ValueT* try_get(const KeyT& key) const noexcept { const auto slot = find_filled_slot(key); return slot != _num_filled ? &_pairs[slot].second : nullptr; } /// set value if key exist bool try_set(const KeyT& key, const ValueT& val) noexcept { const auto slot = find_filled_slot(key); if (slot == _num_filled) return false; _pairs[slot].second = val; return true; } /// set value if key exist bool try_set(const KeyT& key, ValueT&& val) noexcept { const auto slot = find_filled_slot(key); if (slot == _num_filled) return false; _pairs[slot].second = std::move(val); return true; } /// Convenience function. ValueT get_or_return_default(const KeyT& key) const noexcept { const auto slot = find_filled_slot(key); return slot == _num_filled ? ValueT() : _pairs[slot].second; } // ----------------------------------------------------- std::pair do_insert(const value_type& value) noexcept { const auto key_hash = hash_key(value.first); const auto bucket = find_or_allocate(value.first, key_hash); const auto bempty = EMH_EMPTY(bucket); if (bempty) { EMH_NEW(value.first, value.second, bucket, key_hash); } const auto slot = _index[bucket].slot & _mask; return { {this, slot}, bempty }; } std::pair do_insert(value_type&& value) noexcept { const auto key_hash = hash_key(value.first); const auto bucket = find_or_allocate(value.first, key_hash); const auto bempty = EMH_EMPTY(bucket); if (bempty) { EMH_NEW(std::move(value.first), std::move(value.second), bucket, key_hash); } const auto slot = _index[bucket].slot & _mask; return { {this, slot}, bempty }; } template std::pair do_insert(K&& key, V&& val) noexcept { const auto key_hash = hash_key(key); const auto bucket = find_or_allocate(key, key_hash); const auto bempty = EMH_EMPTY(bucket); if (bempty) { EMH_NEW(std::forward(key), std::forward(val), bucket, key_hash); } const auto slot = _index[bucket].slot & _mask; return { {this, slot}, bempty }; } template std::pair do_assign(K&& key, V&& val) noexcept { check_expand_need(); const auto key_hash = hash_key(key); const auto bucket = find_or_allocate(key, key_hash); const auto bempty = EMH_EMPTY(bucket); if (bempty) { EMH_NEW(std::forward(key), std::forward(val), bucket, key_hash); } else { _pairs[_index[bucket].slot & _mask].second = std::move(val); } const auto slot = _index[bucket].slot & _mask; return { {this, slot}, bempty }; } std::pair insert(const value_type& p) { check_expand_need(); return do_insert(p); } std::pair insert(value_type && p) { check_expand_need(); return do_insert(std::move(p)); } void insert(std::initializer_list ilist) { reserve(ilist.size() + _num_filled, false); for (auto it = ilist.begin(); it != ilist.end(); ++it) do_insert(*it); } template void insert(Iter first, Iter last) { reserve(std::distance(first, last) + _num_filled, false); for (; first != last; ++first) do_insert(first->first, first->second); } #if 0 template void insert_unique(Iter begin, Iter end) { reserve(std::distance(begin, end) + _num_filled, false); for (; begin != end; ++begin) { insert_unique(*begin); } } #endif template size_type insert_unique(K&& key, V&& val) { check_expand_need(); const auto key_hash = hash_key(key); auto bucket = find_unique_bucket(key_hash); EMH_NEW(std::forward(key), std::forward(val), bucket, key_hash); return bucket; } size_type insert_unique(value_type&& value) { return insert_unique(std::move(value.first), std::move(value.second)); } size_type insert_unique(const value_type& value) { return insert_unique(value.first, value.second); } template std::pair emplace(Args&&... args) noexcept { check_expand_need(); return do_insert(std::forward(args)...); } //no any optimize for position template iterator emplace_hint(const_iterator hint, Args&&... args) { (void)hint; check_expand_need(); return do_insert(std::forward(args)...).first; } template std::pair try_emplace(const KeyT& k, Args&&... args) { check_expand_need(); return do_insert(k, std::forward(args)...); } template std::pair try_emplace(KeyT&& k, Args&&... args) { check_expand_need(); return do_insert(std::move(k), std::forward(args)...); } template size_type emplace_unique(Args&&... args) { return insert_unique(std::forward(args)...); } std::pair insert_or_assign(const KeyT& key, ValueT&& val) { return do_assign(key, std::forward(val)); } std::pair insert_or_assign(KeyT&& key, ValueT&& val) { return do_assign(std::move(key), std::forward(val)); } /// Return the old value or ValueT() if it didn't exist. ValueT set_get(const KeyT& key, const ValueT& val) { check_expand_need(); const auto key_hash = hash_key(key); const auto bucket = find_or_allocate(key, key_hash); if (EMH_EMPTY(bucket)) { EMH_NEW(key, val, bucket, key_hash); return ValueT(); } else { const auto slot = _index[bucket].slot & _mask; ValueT old_value(val); std::swap(_pairs[slot].second, old_value); return old_value; } } /// Like std::map::operator[]. ValueT& operator[](const KeyT& key) noexcept { check_expand_need(); const auto key_hash = hash_key(key); const auto bucket = find_or_allocate(key, key_hash); if (EMH_EMPTY(bucket)) { /* Check if inserting a value rather than overwriting an old entry */ EMH_NEW(key, std::move(ValueT()), bucket, key_hash); } const auto slot = _index[bucket].slot & _mask; return _pairs[slot].second; } ValueT& operator[](KeyT&& key) noexcept { check_expand_need(); const auto key_hash = hash_key(key); const auto bucket = find_or_allocate(key, key_hash); if (EMH_EMPTY(bucket)) { EMH_NEW(std::move(key), std::move(ValueT()), bucket, key_hash); } const auto slot = _index[bucket].slot & _mask; return _pairs[slot].second; } /// Erase an element from the hash table. /// return 0 if element was not found size_type erase(const KeyT& key) noexcept { const auto key_hash = hash_key(key); const auto sbucket = find_filled_bucket(key, key_hash); if (sbucket == INACTIVE) return 0; const auto main_bucket = key_hash & _mask; erase_slot(sbucket, (size_type)main_bucket); return 1; } //iterator erase(const_iterator begin_it, const_iterator end_it) iterator erase(const const_iterator& cit) noexcept { const auto slot = (size_type)(cit.kv_ - _pairs); size_type main_bucket; const auto sbucket = find_slot_bucket(slot, main_bucket); //TODO erase_slot(sbucket, main_bucket); return {this, slot}; } //only last >= first iterator erase(const_iterator first, const_iterator last) noexcept { auto esize = long(last.kv_ - first.kv_); auto tsize = long((_pairs + _num_filled) - last.kv_); //last to tail size auto next = first; while (tsize -- > 0) { if (esize-- <= 0) break; next = ++erase(next); } //fast erase from last next = this->last(); while (esize -- > 0) next = --erase(next); return {this, size_type(next.kv_ - _pairs)}; } template size_type erase_if(Pred pred) { auto old_size = size(); for (auto it = begin(); it != end();) { if (pred(*it)) it = erase(it); else ++it; } return old_size - size(); } static constexpr bool is_triviall_destructable() { #if __cplusplus >= 201402L || _MSC_VER > 1600 return !(std::is_trivially_destructible::value && std::is_trivially_destructible::value); #else return !(std::is_pod::value && std::is_pod::value); #endif } static constexpr bool is_copy_trivially() { #if __cplusplus >= 201103L || _MSC_VER > 1600 return (std::is_trivially_copyable::value && std::is_trivially_copyable::value); #else return (std::is_pod::value && std::is_pod::value); #endif } void clearkv() { if (is_triviall_destructable()) { while (_num_filled --) _pairs[_num_filled].~value_type(); } } /// Remove all elements, keeping full capacity. void clear() noexcept { clearkv(); if (_num_filled > 0) memset((char*)_index, INACTIVE, sizeof(_index[0]) * _num_buckets); _last = _num_filled = 0; _etail = INACTIVE; #if EMH_HIGH_LOAD _ehead = 0; #endif } void shrink_to_fit(const float min_factor = EMH_DEFAULT_LOAD_FACTOR / 4) { if (load_factor() < min_factor && bucket_count() > 10) //safe guard rehash(_num_filled + 1); } #if EMH_HIGH_LOAD #define EMH_PREVET(i, n) i[n].slot void set_empty() { auto prev = 0; for (int32_t bucket = 1; bucket < _num_buckets; ++bucket) { if (EMH_EMPTY(bucket)) { if (prev != 0) { EMH_PREVET(_index, bucket) = prev; _index[_prev].next = -bucket; } else _ehead = bucket; prev = bucket; } } EMH_PREVET(_index, _ehead) = prev; _index[_prev].next = 0-_ehead; _ehead = 0-_index[_ehead].next; } void clear_empty() { auto prev = EMH_PREVET(_index, _ehead); while (prev != _ehead) { _index[_prev].next = INACTIVE; prev = EMH_PREVET(_index, prev); } _index[_ehead].next = INACTIVE; _ehead = 0; } //prev-ehead->next size_type pop_empty(const size_type bucket) { const auto prev_bucket = EMH_PREVET(_index, bucket); const int next_bucket = 0-_index[bucket].next; EMH_PREVET(_index, next_bucket) = prev_bucket; _index[prev_bucket].next = -next_bucket; _ehead = next_bucket; return bucket; } //ehead->bucket->next void push_empty(const int32_t bucket) { const int next_bucket = 0-_index[_ehead].next; assert(next_bucket > 0); EMH_PREVET(_index, bucket) = _ehead; _index[bucket].next = -next_bucket; EMH_PREVET(_index, next_bucket) = bucket; _index[_ehead].next = -bucket; // _ehead = bucket; } #endif /// Make room for this many elements bool reserve(uint64_t num_elems, bool force) { (void)force; #if EMH_HIGH_LOAD == 0 const auto required_buckets = num_elems * _mlf >> 27; if (EMH_LIKELY(required_buckets < _mask)) // && !force return false; #elif EMH_HIGH_LOAD const auto required_buckets = num_elems + num_elems * 1 / 9; if (EMH_LIKELY(required_buckets < _mask)) return false; else if (_num_buckets < 16 && _num_filled < _num_buckets) return false; else if (_num_buckets > EMH_HIGH_LOAD) { if (_ehead == 0) { set_empty(); return false; } else if (/*_num_filled + 100 < _num_buckets && */_index[_ehead].next != 0-_ehead) { return false; } } #endif #if EMH_STATIS if (_num_filled > EMH_STATIS) dump_statics(); #endif //assert(required_buckets < max_size()); rehash(required_buckets + 2); return true; } static value_type* alloc_bucket(size_type num_buckets) { #ifdef EMH_ALLOC auto new_pairs = aligned_alloc(32, (uint64_t)num_buckets * sizeof(value_type)); #else auto new_pairs = malloc((uint64_t)num_buckets * sizeof(value_type)); #endif return (value_type *)(new_pairs); } static Index* alloc_index(size_type num_buckets) { auto new_index = (char*)malloc((uint64_t)(EAD + num_buckets) * sizeof(Index)); return (Index *)(new_index); } bool reserve(size_type required_buckets) noexcept { if (_num_filled != required_buckets) return reserve(required_buckets, true); _last = 0; #if EMH_HIGH_LOAD _ehead = 0; #endif #if EMH_SORT std::sort(_pairs, _pairs + _num_filled, [this](const value_type & l, const value_type & r) { const auto hashl = (size_type)hash_key(l.first) & _mask, hashr = (size_type)hash_key(r.first) & _mask; return hashl < hashr; //return l.first < r.first; }); #endif memset((char*)_index, INACTIVE, sizeof(_index[0]) * _num_buckets); for (size_type slot = 0; slot < _num_filled; slot++) { const auto& key = _pairs[slot].first; const auto key_hash = hash_key(key); const auto bucket = size_type(key_hash & _mask); auto& next_bucket = _index[bucket].next; if ((int)next_bucket < 0) _index[bucket] = {1, slot | ((size_type)(key_hash) & ~_mask)}; else { _index[bucket].slot |= (size_type)(key_hash) & ~_mask; next_bucket ++; } } return true; } void rebuild(size_type num_buckets) noexcept { free(_index); auto new_pairs = (value_type*)alloc_bucket((size_type)(num_buckets * max_load_factor()) + 4); if (is_copy_trivially()) { if (_pairs) memcpy((char*)new_pairs, (char*)_pairs, _num_filled * sizeof(value_type)); } else { for (size_type slot = 0; slot < _num_filled; slot++) { new(new_pairs + slot) value_type(std::move(_pairs[slot])); if (is_triviall_destructable()) _pairs[slot].~value_type(); } } free(_pairs); _pairs = new_pairs; _index = (Index*)alloc_index (num_buckets); memset((char*)_index, INACTIVE, sizeof(_index[0]) * num_buckets); memset((char*)(_index + num_buckets), 0, sizeof(_index[0]) * EAD); } void rehash(uint64_t required_buckets) { if (required_buckets < _num_filled) return; assert(required_buckets < max_size()); auto num_buckets = _num_filled > (1u << 16) ? (1u << 16) : 4u; while (num_buckets < required_buckets) { num_buckets *= 2; } #if EMH_SAVE_MEM if (sizeof(KeyT) < sizeof(size_type) && num_buckets >= (1ul << (2 * 8))) num_buckets = 2ul << (sizeof(KeyT) * 8); #endif #if EMH_REHASH_LOG auto last = _last; size_type collision = 0; #endif #if EMH_HIGH_LOAD _ehead = 0; #endif _last = 0; _mask = num_buckets - 1; #if EMH_PACK_TAIL > 1 _last = _mask; num_buckets += num_buckets * EMH_PACK_TAIL / 100; //add more 5-10% #endif _num_buckets = num_buckets; rebuild(num_buckets); #ifdef EMH_SORT std::sort(_pairs, _pairs + _num_filled, [this](const value_type & l, const value_type & r) { const auto hashl = hash_key(l.first), hashr = hash_key(r.first); auto diff = int64_t((hashl & _mask) - (hashr & _mask)); if (diff != 0) return diff < 0; return hashl < hashr; // return l.first < r.first; }); #endif _etail = INACTIVE; for (size_type slot = 0; slot < _num_filled; ++slot) { const auto& key = _pairs[slot].first; const auto key_hash = hash_key(key); const auto bucket = find_unique_bucket(key_hash); _index[bucket] = { bucket, slot | ((size_type)(key_hash) & ~_mask) }; #if EMH_REHASH_LOG if (bucket != hash_main(bucket)) collision ++; #endif } #if EMH_REHASH_LOG if (_num_filled > EMH_REHASH_LOG) { auto mbucket = _num_filled - collision; char buff[255] = {0}; sprintf(buff, " _num_filled/aver_size/K.V/pack/collision|last = %u/%.2lf/%s.%s/%zd|%.2lf%%,%.2lf%%", _num_filled, double (_num_filled) / mbucket, typeid(KeyT).name(), typeid(ValueT).name(), sizeof(_pairs[0]), collision * 100.0 / _num_filled, last * 100.0 / _num_buckets); #ifdef EMH_LOG static uint32_t ihashs = 0; EMH_LOG() << "hash_nums = " << ihashs ++ << "|" <<__FUNCTION__ << "|" << buff << endl; #else puts(buff); #endif } #endif } private: // Can we fit another element? bool check_expand_need() { return reserve(_num_filled, false); } static void prefetch_heap_block(char* ctrl) { // Prefetch the heap-allocated memory region to resolve potential TLB // misses. This is intended to overlap with execution of calculating the hash for a key. #if __linux__ __builtin_prefetch(static_cast(ctrl)); #elif _WIN32 && defined(_M_IX86) _mm_prefetch((const char*)ctrl, _MM_HINT_T0); #endif } size_type slot_to_bucket(const size_type slot) const noexcept { size_type main_bucket; return find_slot_bucket(slot, main_bucket); //TODO } //very slow void erase_slot(const size_type sbucket, const size_type main_bucket) noexcept { const auto slot = _index[sbucket].slot & _mask; const auto ebucket = erase_bucket(sbucket, main_bucket); const auto last_slot = --_num_filled; if (EMH_LIKELY(slot != last_slot)) { const auto last_bucket = (_etail == INACTIVE || ebucket == _etail) ? slot_to_bucket(last_slot) : _etail; _pairs[slot] = std::move(_pairs[last_slot]); _index[last_bucket].slot = slot | (_index[last_bucket].slot & ~_mask); } if (is_triviall_destructable()) _pairs[last_slot].~value_type(); _etail = INACTIVE; _index[ebucket] = {INACTIVE, 0}; #if EMH_HIGH_LOAD if (_ehead) { if (10 * _num_filled < 8 * _num_buckets) clear_empty(); else if (ebucket) push_empty(ebucket); } #endif } size_type erase_bucket(const size_type bucket, const size_type main_bucket) noexcept { const auto next_bucket = _index[bucket].next; if (bucket == main_bucket) { if (main_bucket != next_bucket) { const auto nbucket = _index[next_bucket].next; _index[main_bucket] = { (nbucket == next_bucket) ? main_bucket : nbucket, _index[next_bucket].slot }; } return next_bucket; } const auto prev_bucket = find_prev_bucket(main_bucket, bucket); _index[prev_bucket].next = (bucket == next_bucket) ? prev_bucket : next_bucket; return bucket; } // Find the slot with this key, or return bucket size size_type find_slot_bucket(const size_type slot, size_type& main_bucket) const { const auto key_hash = hash_key(_pairs[slot].first); const auto bucket = main_bucket = size_type(key_hash & _mask); if (slot == (_index[bucket].slot & _mask)) return bucket; auto next_bucket = _index[bucket].next; while (true) { if (EMH_LIKELY(slot == (_index[next_bucket].slot & _mask))) return next_bucket; next_bucket = _index[next_bucket].next; } return INACTIVE; } // Find the slot with this key, or return bucket size size_type find_filled_bucket(const KeyT& key, uint64_t key_hash) const noexcept { const auto bucket = size_type(key_hash & _mask); auto next_bucket = _index[bucket].next; if (EMH_UNLIKELY((int)next_bucket < 0)) return INACTIVE; const auto slot = _index[bucket].slot & _mask; //prefetch_heap_block((char*)&_pairs[slot]); if (EMH_EQHASH(bucket, key_hash)) { if (EMH_LIKELY(_eq(key, _pairs[slot].first))) return bucket; } if (next_bucket == bucket) return INACTIVE; while (true) { if (EMH_EQHASH(next_bucket, key_hash)) { const auto next_slot = _index[next_bucket].slot & _mask; if (EMH_LIKELY(_eq(key, _pairs[next_slot].first))) return next_bucket; } const auto nbucket = _index[next_bucket].next; if (nbucket == next_bucket) return INACTIVE; next_bucket = nbucket; } return INACTIVE; } // Find the slot with this key, or return bucket size template size_type find_filled_slot(const K& key) const noexcept { const auto key_hash = hash_key(key); const auto bucket = size_type(key_hash & _mask); auto next_bucket = _index[bucket].next; if ((int)next_bucket < 0) return _num_filled; const auto slot = _index[bucket].slot & _mask; //prefetch_heap_block((char*)&_pairs[slot]); if (EMH_EQHASH(bucket, key_hash)) { if (EMH_LIKELY(_eq(key, _pairs[slot].first))) return slot; } if (next_bucket == bucket) return _num_filled; while (true) { if (EMH_EQHASH(next_bucket, key_hash)) { const auto next_slot = _index[next_bucket].slot & _mask; if (EMH_LIKELY(_eq(key, _pairs[next_slot].first))) return next_slot; } const auto nbucket = _index[next_bucket].next; if (nbucket == next_bucket) return _num_filled; next_bucket = nbucket; } return _num_filled; } #if EMH_SORT size_type find_hash_bucket(const KeyT& key) const noexcept { const auto key_hash = hash_key(key); const auto bucket = size_type(key_hash & _mask); const auto next_bucket = _index[bucket].next; if ((int)next_bucket < 0) return END; auto slot = _index[bucket].slot & _mask; if (_eq(key, _pairs[slot++].first)) return slot; else if (next_bucket == bucket) return END; while (true) { const auto& okey = _pairs[slot++].first; if (_eq(key, okey)) return slot; const auto hasho = hash_key(okey); if ((hasho & _mask) != bucket) break; else if (hasho > key_hash) break; else if (EMH_UNLIKELY(slot >= _num_filled)) break; } return END; } //only for find/can not insert size_type find_sorted_bucket(const KeyT& key) const noexcept { const auto key_hash = hash_key(key); const auto bucket = size_type(key_hash & _mask); const auto slots = (int)(_index[bucket].next); //TODO if (slots < 0 /**|| key < _pairs[slot].first*/) return END; const auto slot = _index[bucket].slot & _mask; auto ormask = _index[bucket].slot & ~_mask; auto hmask = (size_type)(key_hash) & ~_mask; if ((hmask | ormask) != ormask) return END; if (_eq(key, _pairs[slot].first)) return slot; else if (slots == 1 || key < _pairs[slot].first) return END; #if EMH_SORT if (key < _pairs[slot].first || key > _pairs[slots + slot - 1].first) return END; #endif for (size_type i = 1; i < slots; ++i) { const auto& okey = _pairs[slot + i].first; if (_eq(key, okey)) return slot + i; // else if (okey > key) // return END; } return END; } #endif //kick out bucket and find empty to occpuy //it will break the origin link and relink again. //before: main_bucket-->prev_bucket --> bucket --> next_bucket //after : main_bucket-->prev_bucket --> (removed)--> new_bucket--> next_bucket size_type kickout_bucket(const size_type kmain, const size_type bucket) noexcept { const auto next_bucket = _index[bucket].next; const auto new_bucket = find_empty_bucket(next_bucket, 2); const auto prev_bucket = find_prev_bucket(kmain, bucket); const auto last = next_bucket == bucket ? new_bucket : next_bucket; _index[new_bucket] = {last, _index[bucket].slot}; _index[prev_bucket].next = new_bucket; _index[bucket].next = INACTIVE; return bucket; } /* ** inserts a new key into a hash table; first, check whether key's main ** bucket/position is free. If not, check whether colliding node/bucket is in its main ** position or not: if it is not, move colliding bucket to an empty place and ** put new key in its main position; otherwise (colliding bucket is in its main ** position), new key goes to an empty position. */ template size_type find_or_allocate(const K& key, uint64_t key_hash) noexcept { const auto bucket = size_type(key_hash & _mask); auto next_bucket = _index[bucket].next; prefetch_heap_block((char*)&_pairs[bucket]); if ((int)next_bucket < 0) { #if EMH_HIGH_LOAD if (next_bucket != INACTIVE) pop_empty(bucket); #endif return bucket; } const auto slot = _index[bucket].slot & _mask; if (EMH_EQHASH(bucket, key_hash)) if (EMH_LIKELY(_eq(key, _pairs[slot].first))) return bucket; //check current bucket_key is in main bucket or not const auto kmain = hash_bucket(_pairs[slot].first); if (kmain != bucket) return kickout_bucket(kmain, bucket); else if (next_bucket == bucket) return _index[next_bucket].next = find_empty_bucket(next_bucket, 1); uint32_t csize = 1; //find next linked bucket and check key while (true) { const auto eslot = _index[next_bucket].slot & _mask; if (EMH_EQHASH(next_bucket, key_hash)) { if (EMH_LIKELY(_eq(key, _pairs[eslot].first))) return next_bucket; } csize += 1; const auto nbucket = _index[next_bucket].next; if (nbucket == next_bucket) break; next_bucket = nbucket; } //find a empty and link it to tail const auto new_bucket = find_empty_bucket(next_bucket, csize); prefetch_heap_block((char*)&_pairs[new_bucket]); return _index[next_bucket].next = new_bucket; } size_type find_unique_bucket(uint64_t key_hash) noexcept { const auto bucket = size_type(key_hash & _mask); auto next_bucket = _index[bucket].next; if ((int)next_bucket < 0) { #if EMH_HIGH_LOAD if (next_bucket != INACTIVE) pop_empty(bucket); #endif return bucket; } //check current bucket_key is in main bucket or not const auto kmain = hash_main(bucket); if (EMH_UNLIKELY(kmain != bucket)) return kickout_bucket(kmain, bucket); else if (EMH_UNLIKELY(next_bucket != bucket)) next_bucket = find_last_bucket(next_bucket); return _index[next_bucket].next = find_empty_bucket(next_bucket, 2); } /*** Different probing techniques usually provide a trade-off between memory locality and avoidance of clustering. Since Robin Hood hashing is relatively resilient to clustering (both primary and secondary), linear probing is the most cache friendly alternativeis typically used. It's the core algorithm of this hash map with highly optimization/benchmark. normally linear probing is inefficient with high load factor, it use a new 3-way linear probing strategy to search empty slot. from benchmark even the load factor > 0.9, it's more 2-3 timer fast than one-way search strategy. 1. linear or quadratic probing a few cache line for less cache miss from input slot "bucket_from". 2. the first search slot from member variant "_last", init with 0 3. the second search slot from calculated pos "(_num_filled + _last) & _mask", it's like a rand value */ // key is not in this mavalue. Find a place to put it. size_type find_empty_bucket(const size_type bucket_from, uint32_t csize) noexcept { (void)csize; #if EMH_HIGH_LOAD if (_ehead) return pop_empty(_ehead); #endif auto bucket = bucket_from; if (EMH_EMPTY(++bucket) || EMH_EMPTY(++bucket)) return bucket; #ifdef EMH_QUADRATIC constexpr size_type linear_probe_length = 2 * EMH_CACHE_LINE_SIZE / sizeof(Index);//16 for (size_type offset = csize + 2, step = 4; offset <= linear_probe_length; ) { bucket = (bucket_from + offset) & _mask; if (EMH_EMPTY(bucket) || EMH_EMPTY(++bucket)) return bucket; offset += step; //7/8. 12. 16 } #else constexpr size_type quadratic_probe_length = 6u; for (size_type offset = 4u, step = 3u; step < quadratic_probe_length; ) { bucket = (bucket_from + offset) & _mask; if (EMH_EMPTY(bucket) || EMH_EMPTY(++bucket)) return bucket; offset += step++; } #endif #if EMH_PREFETCH __builtin_prefetch(static_cast(_index + _last + 1), 0, EMH_PREFETCH); #endif for (;;) { #if EMH_PACK_TAIL //find empty bucket and skip next if (EMH_EMPTY(_last++))// || EMH_EMPTY(_last++)) return _last++ - 1; if (EMH_UNLIKELY(_last >= _num_buckets)) _last = 0; auto medium = (_mask / 4 + _last++) & _mask; if (EMH_EMPTY(medium)) return medium; #else _last &= _mask; if (EMH_EMPTY(++_last))// || EMH_EMPTY(++_last)) return _last; auto medium = (_num_buckets / 2 + _last) & _mask; if (EMH_EMPTY(medium))// || EMH_EMPTY(++medium)) return medium; #endif } return 0; } size_type find_last_bucket(size_type main_bucket) const { auto next_bucket = _index[main_bucket].next; if (next_bucket == main_bucket) return main_bucket; while (true) { const auto nbucket = _index[next_bucket].next; if (nbucket == next_bucket) return next_bucket; next_bucket = nbucket; } } size_type find_prev_bucket(const size_type main_bucket, const size_type bucket) const { auto next_bucket = _index[main_bucket].next; if (next_bucket == bucket) return main_bucket; while (true) { const auto nbucket = _index[next_bucket].next; if (nbucket == bucket) return next_bucket; next_bucket = nbucket; } } size_type hash_bucket(const KeyT& key) const noexcept { return (size_type)hash_key(key) & _mask; } size_type hash_main(const size_type bucket) const noexcept { const auto slot = _index[bucket].slot & _mask; return (size_type)hash_key(_pairs[slot].first) & _mask; } #if EMH_INT_HASH static constexpr uint64_t KC = UINT64_C(11400714819323198485); static uint64_t hash64(uint64_t key) { #if __SIZEOF_INT128__ && EMH_INT_HASH == 1 __uint128_t r = key; r *= KC; return (uint64_t)(r >> 64) + (uint64_t)r; #elif EMH_INT_HASH == 2 //MurmurHash3Mixer uint64_t h = key; h ^= h >> 33; h *= 0xff51afd7ed558ccd; h ^= h >> 33; h *= 0xc4ceb9fe1a85ec53; h ^= h >> 33; return h; #elif _WIN64 && EMH_INT_HASH == 1 uint64_t high; return _umul128(key, KC, &high) + high; #elif EMH_INT_HASH == 3 auto ror = (key >> 32) | (key << 32); auto low = key * 0xA24BAED4963EE407ull; auto high = ror * 0x9FB21C651E98DF25ull; auto mix = low + high; return mix; #elif EMH_INT_HASH == 1 uint64_t r = key * UINT64_C(0xca4bcaa75ec3f625); return (r >> 32) + r; #elif EMH_WYHASH64 return wyhash64(key, KC); #else uint64_t x = key; x = (x ^ (x >> 30)) * UINT64_C(0xbf58476d1ce4e5b9); x = (x ^ (x >> 27)) * UINT64_C(0x94d049bb133111eb); x = x ^ (x >> 31); return x; #endif } #endif #if EMH_WYHASH_HASH //#define WYHASH_CONDOM 1 static uint64_t wymix(uint64_t A, uint64_t B) { #if defined(__SIZEOF_INT128__) __uint128_t r = A; r *= B; #if WYHASH_CONDOM2 A ^= (uint64_t)r; B ^= (uint64_t)(r >> 64); #else A = (uint64_t)r; B = (uint64_t)(r >> 64); #endif #elif defined(_MSC_VER) && defined(_M_X64) #if WYHASH_CONDOM2 uint64_t a, b; a = _umul128(A, B, &b); A ^= a; B ^= b; #else A = _umul128(A, B, &B); #endif #else uint64_t ha = A >> 32, hb = B >> 32, la = (uint32_t)A, lb = (uint32_t)B, hi, lo; uint64_t rh = ha * hb, rm0 = ha * lb, rm1 = hb * la, rl = la * lb, t = rl + (rm0 << 32), c = t < rl; lo = t + (rm1 << 32); c += lo < t; hi = rh + (rm0 >> 32) + (rm1 >> 32) + c; #if WYHASH_CONDOM2 A ^= lo; B ^= hi; #else A = lo; B = hi; #endif #endif return A ^ B; } //multiply and xor mix function, aka MUM static inline uint64_t wyr8(const uint8_t *p) { uint64_t v; memcpy(&v, p, 8); return v; } static inline uint64_t wyr4(const uint8_t *p) { uint32_t v; memcpy(&v, p, 4); return v; } static inline uint64_t wyr3(const uint8_t *p, size_t k) { return (((uint64_t)p[0]) << 16) | (((uint64_t)p[k >> 1]) << 8) | p[k - 1]; } inline static const uint64_t secret[4] = { 0x2d358dccaa6c78a5ull, 0x8bb84b93962eacc9ull, 0x4b33a62ed433d4a3ull, 0x4d5a2da51de1aa47ull}; public: //wyhash main function https://github.com/wangyi-fudan/wyhash static uint64_t wyhashstr(const char *key, const size_t len) { uint64_t a = 0, b = 0, seed = secret[0]; const uint8_t *p = (const uint8_t*)key; if (EMH_LIKELY(len <= 16)) { if (EMH_LIKELY(len >= 4)) { const auto half = (len >> 3) << 2; a = (wyr4(p) << 32U) | wyr4(p + half); p += len - 4; b = (wyr4(p) << 32U) | wyr4(p - half); } else if (len) { a = wyr3(p, len); } } else { size_t i = len; if (EMH_UNLIKELY(i > 48)) { uint64_t see1 = seed, see2 = seed; do { seed = wymix(wyr8(p + 0) ^ secret[1], wyr8(p + 8) ^ seed); see1 = wymix(wyr8(p + 16) ^ secret[2], wyr8(p + 24) ^ see1); see2 = wymix(wyr8(p + 32) ^ secret[3], wyr8(p + 40) ^ see2); p += 48; i -= 48; } while (EMH_LIKELY(i > 48)); seed ^= see1 ^ see2; } while (i > 16) { seed = wymix(wyr8(p) ^ secret[1], wyr8(p + 8) ^ seed); i -= 16; p += 16; } a = wyr8(p + i - 16); b = wyr8(p + i - 8); } return wymix(secret[1] ^ len, wymix(a ^ secret[1], b ^ seed)); } #endif private: template::value, uint32_t>::type = 0> inline uint64_t hash_key(const UType key) const { #if EMH_INT_HASH return hash64(key); #elif EMH_IDENTITY_HASH return key + (key >> 24); #else return _hasher(key); #endif } template::value, uint32_t>::type = 0> inline uint64_t hash_key(const UType& key) const { #if EMH_WYHASH_HASH return wyhashstr(key.data(), key.size()); #else return _hasher(key); #endif } template::value && !std::is_same::value, uint32_t>::type = 0> inline uint64_t hash_key(const UType& key) const { return _hasher(key); } private: Index* _index; value_type*_pairs; HashT _hasher; EqT _eq; uint32_t _mlf; size_type _mask; size_type _num_buckets; size_type _num_filled; size_type _last; #if EMH_HIGH_LOAD size_type _ehead; #endif size_type _etail; }; } // namespace emhash ninja-1.13.2/src/third_party/rapidhash/000077500000000000000000000000001510764045400177665ustar00rootroot00000000000000ninja-1.13.2/src/third_party/rapidhash/README.ninja000066400000000000000000000005571510764045400217530ustar00rootroot00000000000000Description: Very fast, high quality, platform-independent hashing algorithm. Version: commit 4a6b2570e868536be84800353efd92c699f37d2c URL: https://github.com/Nicoshev/rapidhash Copyright: Copyright (C) 2024 Nicolas De Carli, Based on 'wyhash', by Wang Yi SPDX-License-Identifier: BSD-2-Clause Local changes: - Changed to UNIX line endings ninja-1.13.2/src/third_party/rapidhash/rapidhash.h000077500000000000000000000263751510764045400221220ustar00rootroot00000000000000/* * rapidhash - Very fast, high quality, platform-independent hashing algorithm. * Copyright (C) 2024 Nicolas De Carli * * Based on 'wyhash', by Wang Yi * * BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php) * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You can contact the author at: * - rapidhash source repository: https://github.com/Nicoshev/rapidhash */ /* * Includes. */ #include #include #if defined(_MSC_VER) #include #if defined(_M_X64) && !defined(_M_ARM64EC) #pragma intrinsic(_umul128) #endif #endif /* * C++ macros. * * RAPIDHASH_INLINE can be overridden to be stronger than a hint, i.e. by adding __attribute__((always_inline)). */ #ifdef __cplusplus #define RAPIDHASH_NOEXCEPT noexcept #define RAPIDHASH_CONSTEXPR constexpr #ifndef RAPIDHASH_INLINE #define RAPIDHASH_INLINE inline #endif #else #define RAPIDHASH_NOEXCEPT #define RAPIDHASH_CONSTEXPR static const #ifndef RAPIDHASH_INLINE #define RAPIDHASH_INLINE static inline #endif #endif /* * Protection macro, alters behaviour of rapid_mum multiplication function. * * RAPIDHASH_FAST: Normal behavior, max speed. * RAPIDHASH_PROTECTED: Extra protection against entropy loss. */ #ifndef RAPIDHASH_PROTECTED #define RAPIDHASH_FAST #elif defined(RAPIDHASH_FAST) #error "cannot define RAPIDHASH_PROTECTED and RAPIDHASH_FAST simultaneously." #endif /* * Unrolling macros, changes code definition for main hash function. * * RAPIDHASH_COMPACT: Legacy variant, each loop process 48 bytes. * RAPIDHASH_UNROLLED: Unrolled variant, each loop process 96 bytes. * * Most modern CPUs should benefit from having RAPIDHASH_UNROLLED. * * These macros do not alter the output hash. */ #ifndef RAPIDHASH_COMPACT #define RAPIDHASH_UNROLLED #elif defined(RAPIDHASH_UNROLLED) #error "cannot define RAPIDHASH_COMPACT and RAPIDHASH_UNROLLED simultaneously." #endif /* * Likely and unlikely macros. */ #if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__clang__) #define _likely_(x) __builtin_expect(x,1) #define _unlikely_(x) __builtin_expect(x,0) #else #define _likely_(x) (x) #define _unlikely_(x) (x) #endif /* * Endianness macros. */ #ifndef RAPIDHASH_LITTLE_ENDIAN #if defined(_WIN32) || defined(__LITTLE_ENDIAN__) || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) #define RAPIDHASH_LITTLE_ENDIAN #elif defined(__BIG_ENDIAN__) || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) #define RAPIDHASH_BIG_ENDIAN #else #warning "could not determine endianness! Falling back to little endian." #define RAPIDHASH_LITTLE_ENDIAN #endif #endif /* * Default seed. */ #define RAPID_SEED (0xbdd89aa982704029ull) /* * Default secret parameters. */ RAPIDHASH_CONSTEXPR uint64_t rapid_secret[3] = {0x2d358dccaa6c78a5ull, 0x8bb84b93962eacc9ull, 0x4b33a62ed433d4a3ull}; /* * 64*64 -> 128bit multiply function. * * @param A Address of 64-bit number. * @param B Address of 64-bit number. * * Calculates 128-bit C = *A * *B. * * When RAPIDHASH_FAST is defined: * Overwrites A contents with C's low 64 bits. * Overwrites B contents with C's high 64 bits. * * When RAPIDHASH_PROTECTED is defined: * Xors and overwrites A contents with C's low 64 bits. * Xors and overwrites B contents with C's high 64 bits. */ RAPIDHASH_INLINE void rapid_mum(uint64_t *A, uint64_t *B) RAPIDHASH_NOEXCEPT { #if defined(__SIZEOF_INT128__) __uint128_t r=*A; r*=*B; #ifdef RAPIDHASH_PROTECTED *A^=(uint64_t)r; *B^=(uint64_t)(r>>64); #else *A=(uint64_t)r; *B=(uint64_t)(r>>64); #endif #elif defined(_MSC_VER) && (defined(_WIN64) || defined(_M_HYBRID_CHPE_ARM64)) #if defined(_M_X64) #ifdef RAPIDHASH_PROTECTED uint64_t a, b; a=_umul128(*A,*B,&b); *A^=a; *B^=b; #else *A=_umul128(*A,*B,B); #endif #else #ifdef RAPIDHASH_PROTECTED uint64_t a, b; b = __umulh(*A, *B); a = *A * *B; *A^=a; *B^=b; #else uint64_t c = __umulh(*A, *B); *A = *A * *B; *B = c; #endif #endif #else uint64_t ha=*A>>32, hb=*B>>32, la=(uint32_t)*A, lb=(uint32_t)*B, hi, lo; uint64_t rh=ha*hb, rm0=ha*lb, rm1=hb*la, rl=la*lb, t=rl+(rm0<<32), c=t>32)+(rm1>>32)+c; #ifdef RAPIDHASH_PROTECTED *A^=lo; *B^=hi; #else *A=lo; *B=hi; #endif #endif } /* * Multiply and xor mix function. * * @param A 64-bit number. * @param B 64-bit number. * * Calculates 128-bit C = A * B. * Returns 64-bit xor between high and low 64 bits of C. */ RAPIDHASH_INLINE uint64_t rapid_mix(uint64_t A, uint64_t B) RAPIDHASH_NOEXCEPT { rapid_mum(&A,&B); return A^B; } /* * Read functions. */ #ifdef RAPIDHASH_LITTLE_ENDIAN RAPIDHASH_INLINE uint64_t rapid_read64(const uint8_t *p) RAPIDHASH_NOEXCEPT { uint64_t v; memcpy(&v, p, sizeof(uint64_t)); return v;} RAPIDHASH_INLINE uint64_t rapid_read32(const uint8_t *p) RAPIDHASH_NOEXCEPT { uint32_t v; memcpy(&v, p, sizeof(uint32_t)); return v;} #elif defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__clang__) RAPIDHASH_INLINE uint64_t rapid_read64(const uint8_t *p) RAPIDHASH_NOEXCEPT { uint64_t v; memcpy(&v, p, sizeof(uint64_t)); return __builtin_bswap64(v);} RAPIDHASH_INLINE uint64_t rapid_read32(const uint8_t *p) RAPIDHASH_NOEXCEPT { uint32_t v; memcpy(&v, p, sizeof(uint32_t)); return __builtin_bswap32(v);} #elif defined(_MSC_VER) RAPIDHASH_INLINE uint64_t rapid_read64(const uint8_t *p) RAPIDHASH_NOEXCEPT { uint64_t v; memcpy(&v, p, sizeof(uint64_t)); return _byteswap_uint64(v);} RAPIDHASH_INLINE uint64_t rapid_read32(const uint8_t *p) RAPIDHASH_NOEXCEPT { uint32_t v; memcpy(&v, p, sizeof(uint32_t)); return _byteswap_ulong(v);} #else RAPIDHASH_INLINE uint64_t rapid_read64(const uint8_t *p) RAPIDHASH_NOEXCEPT { uint64_t v; memcpy(&v, p, 8); return (((v >> 56) & 0xff)| ((v >> 40) & 0xff00)| ((v >> 24) & 0xff0000)| ((v >> 8) & 0xff000000)| ((v << 8) & 0xff00000000)| ((v << 24) & 0xff0000000000)| ((v << 40) & 0xff000000000000)| ((v << 56) & 0xff00000000000000)); } RAPIDHASH_INLINE uint64_t rapid_read32(const uint8_t *p) RAPIDHASH_NOEXCEPT { uint32_t v; memcpy(&v, p, 4); return (((v >> 24) & 0xff)| ((v >> 8) & 0xff00)| ((v << 8) & 0xff0000)| ((v << 24) & 0xff000000)); } #endif /* * Reads and combines 3 bytes of input. * * @param p Buffer to read from. * @param k Length of @p, in bytes. * * Always reads and combines 3 bytes from memory. * Guarantees to read each buffer position at least once. * * Returns a 64-bit value containing all three bytes read. */ RAPIDHASH_INLINE uint64_t rapid_readSmall(const uint8_t *p, size_t k) RAPIDHASH_NOEXCEPT { return (((uint64_t)p[0])<<56)|(((uint64_t)p[k>>1])<<32)|p[k-1];} /* * rapidhash main function. * * @param key Buffer to be hashed. * @param len @key length, in bytes. * @param seed 64-bit seed used to alter the hash result predictably. * @param secret Triplet of 64-bit secrets used to alter hash result predictably. * * Returns a 64-bit hash. */ RAPIDHASH_INLINE uint64_t rapidhash_internal(const void *key, size_t len, uint64_t seed, const uint64_t* secret) RAPIDHASH_NOEXCEPT { const uint8_t *p=(const uint8_t *)key; seed^=rapid_mix(seed^secret[0],secret[1])^len; uint64_t a, b; if(_likely_(len<=16)){ if(_likely_(len>=4)){ const uint8_t * plast = p + len - 4; a = (rapid_read32(p) << 32) | rapid_read32(plast); const uint64_t delta = ((len&24)>>(len>>3)); b = ((rapid_read32(p + delta) << 32) | rapid_read32(plast - delta)); } else if(_likely_(len>0)){ a=rapid_readSmall(p,len); b=0;} else a=b=0; } else{ size_t i=len; if(_unlikely_(i>48)){ uint64_t see1=seed, see2=seed; #ifdef RAPIDHASH_UNROLLED while(_likely_(i>=96)){ seed=rapid_mix(rapid_read64(p)^secret[0],rapid_read64(p+8)^seed); see1=rapid_mix(rapid_read64(p+16)^secret[1],rapid_read64(p+24)^see1); see2=rapid_mix(rapid_read64(p+32)^secret[2],rapid_read64(p+40)^see2); seed=rapid_mix(rapid_read64(p+48)^secret[0],rapid_read64(p+56)^seed); see1=rapid_mix(rapid_read64(p+64)^secret[1],rapid_read64(p+72)^see1); see2=rapid_mix(rapid_read64(p+80)^secret[2],rapid_read64(p+88)^see2); p+=96; i-=96; } if(_unlikely_(i>=48)){ seed=rapid_mix(rapid_read64(p)^secret[0],rapid_read64(p+8)^seed); see1=rapid_mix(rapid_read64(p+16)^secret[1],rapid_read64(p+24)^see1); see2=rapid_mix(rapid_read64(p+32)^secret[2],rapid_read64(p+40)^see2); p+=48; i-=48; } #else do { seed=rapid_mix(rapid_read64(p)^secret[0],rapid_read64(p+8)^seed); see1=rapid_mix(rapid_read64(p+16)^secret[1],rapid_read64(p+24)^see1); see2=rapid_mix(rapid_read64(p+32)^secret[2],rapid_read64(p+40)^see2); p+=48; i-=48; } while (_likely_(i>=48)); #endif seed^=see1^see2; } if(i>16){ seed=rapid_mix(rapid_read64(p)^secret[2],rapid_read64(p+8)^seed^secret[1]); if(i>32) seed=rapid_mix(rapid_read64(p+16)^secret[2],rapid_read64(p+24)^seed); } a=rapid_read64(p+i-16); b=rapid_read64(p+i-8); } a^=secret[1]; b^=seed; rapid_mum(&a,&b); return rapid_mix(a^secret[0]^len,b^secret[1]); } /* * rapidhash default seeded hash function. * * @param key Buffer to be hashed. * @param len @key length, in bytes. * @param seed 64-bit seed used to alter the hash result predictably. * * Calls rapidhash_internal using provided parameters and default secrets. * * Returns a 64-bit hash. */ RAPIDHASH_INLINE uint64_t rapidhash_withSeed(const void *key, size_t len, uint64_t seed) RAPIDHASH_NOEXCEPT { return rapidhash_internal(key, len, seed, rapid_secret); } /* * rapidhash default hash function. * * @param key Buffer to be hashed. * @param len @key length, in bytes. * * Calls rapidhash_withSeed using provided parameters and the default seed. * * Returns a 64-bit hash. */ RAPIDHASH_INLINE uint64_t rapidhash(const void *key, size_t len) RAPIDHASH_NOEXCEPT { return rapidhash_withSeed(key, len, RAPID_SEED); } ninja-1.13.2/src/timestamp.h000066400000000000000000000021621510764045400156470ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef NINJA_TIMESTAMP_H_ #define NINJA_TIMESTAMP_H_ #ifdef _WIN32 #include "win32port.h" #else #ifndef __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS #endif #include #endif // When considering file modification times we only care to compare // them against one another -- we never convert them to an absolute // real time. On POSIX we use timespec (seconds&nanoseconds since epoch) // and on Windows we use a different value. Both fit in an int64. typedef int64_t TimeStamp; #endif // NINJA_TIMESTAMP_H_ ninja-1.13.2/src/util.cc000066400000000000000000000666041510764045400147720ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "util.h" #ifdef __CYGWIN__ #include #include #elif defined( _WIN32) #include #include #include #include #endif #include #include #include #include #include #include #include #include #include #ifndef _WIN32 #include #include #endif #include #include #if defined(__APPLE__) || defined(__FreeBSD__) #include #elif defined(__SVR4) && defined(__sun) #include #include #elif defined(_AIX) && !defined(__PASE__) #include #elif defined(__linux__) || defined(__GLIBC__) #include #include #include #include "string_piece_util.h" #endif #if defined(__FreeBSD__) #include #endif #include "edit_distance.h" using namespace std; void Fatal(const char* msg, ...) { va_list ap; fprintf(stderr, "ninja: fatal: "); va_start(ap, msg); vfprintf(stderr, msg, ap); va_end(ap); fprintf(stderr, "\n"); #ifdef _WIN32 // On Windows, some tools may inject extra threads. // exit() may block on locks held by those threads, so forcibly exit. fflush(stderr); fflush(stdout); ExitProcess(1); #else exit(1); #endif } void Warning(const char* msg, va_list ap) { fprintf(stderr, "ninja: warning: "); vfprintf(stderr, msg, ap); fprintf(stderr, "\n"); } void Warning(const char* msg, ...) { va_list ap; va_start(ap, msg); Warning(msg, ap); va_end(ap); } void Error(const char* msg, va_list ap) { fprintf(stderr, "ninja: error: "); vfprintf(stderr, msg, ap); fprintf(stderr, "\n"); } void Error(const char* msg, ...) { va_list ap; va_start(ap, msg); Error(msg, ap); va_end(ap); } void Info(const char* msg, va_list ap) { fprintf(stdout, "ninja: "); vfprintf(stdout, msg, ap); fprintf(stdout, "\n"); } void Info(const char* msg, ...) { va_list ap; va_start(ap, msg); Info(msg, ap); va_end(ap); } void CanonicalizePath(string* path, uint64_t* slash_bits) { size_t len = path->size(); char* str = 0; if (len > 0) str = &(*path)[0]; CanonicalizePath(str, &len, slash_bits); path->resize(len); } static bool IsPathSeparator(char c) { #ifdef _WIN32 return c == '/' || c == '\\'; #else return c == '/'; #endif } void CanonicalizePath(char* path, size_t* len, uint64_t* slash_bits) { // WARNING: this function is performance-critical; please benchmark // any changes you make to it. if (*len == 0) { return; } char* start = path; char* dst = start; char* dst_start = dst; const char* src = start; const char* end = start + *len; const char* src_next; // For absolute paths, skip the leading directory separator // as this one should never be removed from the result. if (IsPathSeparator(*src)) { #ifdef _WIN32 // Windows network path starts with // if (src + 2 <= end && IsPathSeparator(src[1])) { src += 2; dst += 2; } else { ++src; ++dst; } #else ++src; ++dst; #endif dst_start = dst; } else { // For relative paths, skip any leading ../ as these are quite common // to reference source files in build plans, and doing this here makes // the loop work below faster in general. while (src + 3 <= end && src[0] == '.' && src[1] == '.' && IsPathSeparator(src[2])) { src += 3; dst += 3; } } // Loop over all components of the paths _except_ the last one, in // order to simplify the loop's code and make it faster. int component_count = 0; char* dst0 = dst; for (; src < end; src = src_next) { #ifndef _WIN32 // Use memchr() for faster lookups thanks to optimized C library // implementation. `hyperfine canon_perftest` shows a significant // difference (e,g, 484ms vs 437ms). const char* next_sep = static_cast(::memchr(src, '/', end - src)); if (!next_sep) { // This is the last component, will be handled out of the loop. break; } #else // Need to check for both '/' and '\\' so do not use memchr(). // Cannot use strpbrk() because end[0] can be \0 or something else! const char* next_sep = src; while (next_sep != end && !IsPathSeparator(*next_sep)) ++next_sep; if (next_sep == end) { // This is the last component, will be handled out of the loop. break; } #endif // Position for next loop iteration. src_next = next_sep + 1; // Length of the component, excluding trailing directory. size_t component_len = next_sep - src; if (component_len <= 2) { if (component_len == 0) { continue; // Ignore empty component, e.g. 'foo//bar' -> 'foo/bar'. } if (src[0] == '.') { if (component_len == 1) { continue; // Ignore '.' component, e.g. './foo' -> 'foo'. } else if (src[1] == '.') { // Process the '..' component if found. Back up if possible. if (component_count > 0) { // Move back to start of previous component. --component_count; while (--dst > dst0 && !IsPathSeparator(dst[-1])) { // nothing to do here, decrement happens before condition check. } } else { dst[0] = '.'; dst[1] = '.'; dst[2] = src[2]; dst += 3; } continue; } } } ++component_count; // Copy or skip component, including trailing directory separator. if (dst != src) { ::memmove(dst, src, src_next - src); } dst += src_next - src; } // Handling the last component that does not have a trailing separator. // The logic here is _slightly_ different since there is no trailing // directory separator. size_t component_len = end - src; do { if (component_len == 0) break; // Ignore empty component (e.g. 'foo//' -> 'foo/') if (src[0] == '.') { if (component_len == 1) break; // Ignore trailing '.' (e.g. 'foo/.' -> 'foo/') if (component_len == 2 && src[1] == '.') { // Handle '..'. Back up if possible. if (component_count > 0) { while (--dst > dst0 && !IsPathSeparator(dst[-1])) { // nothing to do here, decrement happens before condition check. } } else { dst[0] = '.'; dst[1] = '.'; dst += 2; // No separator to add here. } break; } } // Skip or copy last component, no trailing separator. if (dst != src) { ::memmove(dst, src, component_len); } dst += component_len; } while (0); // Remove trailing path separator if any, but keep the initial // path separator(s) if there was one (or two on Windows). if (dst > dst_start && IsPathSeparator(dst[-1])) dst--; if (dst == start) { // Handle special cases like "aa/.." -> "." *dst++ = '.'; } *len = dst - start; // dst points after the trailing char here. #ifdef _WIN32 uint64_t bits = 0; uint64_t bits_mask = 1; for (char* c = start; c < start + *len; ++c) { switch (*c) { case '\\': bits |= bits_mask; *c = '/'; NINJA_FALLTHROUGH; case '/': bits_mask <<= 1; } } *slash_bits = bits; #else *slash_bits = 0; #endif } static inline bool IsKnownShellSafeCharacter(char ch) { if ('A' <= ch && ch <= 'Z') return true; if ('a' <= ch && ch <= 'z') return true; if ('0' <= ch && ch <= '9') return true; switch (ch) { case '_': case '+': case '-': case '.': case '/': return true; default: return false; } } static inline bool IsKnownWin32SafeCharacter(char ch) { switch (ch) { case ' ': case '"': return false; default: return true; } } static inline bool StringNeedsShellEscaping(const string& input) { for (size_t i = 0; i < input.size(); ++i) { if (!IsKnownShellSafeCharacter(input[i])) return true; } return false; } static inline bool StringNeedsWin32Escaping(const string& input) { for (size_t i = 0; i < input.size(); ++i) { if (!IsKnownWin32SafeCharacter(input[i])) return true; } return false; } void GetShellEscapedString(const string& input, string* result) { assert(result); if (!StringNeedsShellEscaping(input)) { result->append(input); return; } const char kQuote = '\''; const char kEscapeSequence[] = "'\\'"; result->push_back(kQuote); string::const_iterator span_begin = input.begin(); for (string::const_iterator it = input.begin(), end = input.end(); it != end; ++it) { if (*it == kQuote) { result->append(span_begin, it); result->append(kEscapeSequence); span_begin = it; } } result->append(span_begin, input.end()); result->push_back(kQuote); } void GetWin32EscapedString(const string& input, string* result) { assert(result); if (!StringNeedsWin32Escaping(input)) { result->append(input); return; } const char kQuote = '"'; const char kBackslash = '\\'; result->push_back(kQuote); size_t consecutive_backslash_count = 0; string::const_iterator span_begin = input.begin(); for (string::const_iterator it = input.begin(), end = input.end(); it != end; ++it) { switch (*it) { case kBackslash: ++consecutive_backslash_count; break; case kQuote: result->append(span_begin, it); result->append(consecutive_backslash_count + 1, kBackslash); span_begin = it; consecutive_backslash_count = 0; break; default: consecutive_backslash_count = 0; break; } } result->append(span_begin, input.end()); result->append(consecutive_backslash_count, kBackslash); result->push_back(kQuote); } int ReadFile(const string& path, string* contents, string* err) { #ifdef _WIN32 // This makes a ninja run on a set of 1500 manifest files about 4% faster // than using the generic fopen code below. err->clear(); HANDLE f = ::CreateFileA(path.c_str(), GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING, FILE_FLAG_SEQUENTIAL_SCAN, NULL); if (f == INVALID_HANDLE_VALUE) { err->assign(GetLastErrorString()); return -ENOENT; } for (;;) { DWORD len; char buf[64 << 10]; if (!::ReadFile(f, buf, sizeof(buf), &len, NULL)) { err->assign(GetLastErrorString()); contents->clear(); ::CloseHandle(f); return -EIO; } if (len == 0) break; contents->append(buf, len); } ::CloseHandle(f); return 0; #else FILE* f = fopen(path.c_str(), "rb"); if (!f) { err->assign(strerror(errno)); return -errno; } #ifdef __USE_LARGEFILE64 struct stat64 st; if (fstat64(fileno(f), &st) < 0) { #else struct stat st; if (fstat(fileno(f), &st) < 0) { #endif err->assign(strerror(errno)); fclose(f); return -errno; } // +1 is for the resize in ManifestParser::Load contents->reserve(st.st_size + 1); char buf[64 << 10]; size_t len; while (!feof(f) && (len = fread(buf, 1, sizeof(buf), f)) > 0) { contents->append(buf, len); } if (ferror(f)) { err->assign(strerror(errno)); // XXX errno? contents->clear(); fclose(f); return -errno; } fclose(f); return 0; #endif } void SetCloseOnExec(int fd) { #ifndef _WIN32 int flags = fcntl(fd, F_GETFD); if (flags < 0) { perror("fcntl(F_GETFD)"); } else { if (fcntl(fd, F_SETFD, flags | FD_CLOEXEC) < 0) perror("fcntl(F_SETFD)"); } #else HANDLE hd = (HANDLE) _get_osfhandle(fd); if (! SetHandleInformation(hd, HANDLE_FLAG_INHERIT, 0)) { fprintf(stderr, "SetHandleInformation(): %s", GetLastErrorString().c_str()); } #endif // ! _WIN32 } const char* SpellcheckStringV(const string& text, const vector& words) { const bool kAllowReplacements = true; const int kMaxValidEditDistance = 3; int min_distance = kMaxValidEditDistance + 1; const char* result = NULL; for (vector::const_iterator i = words.begin(); i != words.end(); ++i) { int distance = EditDistance(*i, text, kAllowReplacements, kMaxValidEditDistance); if (distance < min_distance) { min_distance = distance; result = *i; } } return result; } const char* SpellcheckString(const char* text, ...) { // Note: This takes a const char* instead of a string& because using // va_start() with a reference parameter is undefined behavior. va_list ap; va_start(ap, text); vector words; const char* word; while ((word = va_arg(ap, const char*))) words.push_back(word); va_end(ap); return SpellcheckStringV(text, words); } #ifdef _WIN32 string GetLastErrorString() { DWORD err = GetLastError(); char* msg_buf; FormatMessageA( FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, NULL, err, MAKELANGID(LANG_ENGLISH, SUBLANG_DEFAULT), (char*)&msg_buf, 0, NULL); if (msg_buf == nullptr) { char fallback_msg[128] = {0}; snprintf(fallback_msg, sizeof(fallback_msg), "GetLastError() = %lu", err); return fallback_msg; } string msg = msg_buf; LocalFree(msg_buf); return msg; } void Win32Fatal(const char* function, const char* hint) { if (hint) { Fatal("%s: %s (%s)", function, GetLastErrorString().c_str(), hint); } else { Fatal("%s: %s", function, GetLastErrorString().c_str()); } } #endif bool islatinalpha(int c) { // isalpha() is locale-dependent. return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z'); } string StripAnsiEscapeCodes(const string& in) { string stripped; stripped.reserve(in.size()); for (size_t i = 0; i < in.size(); ++i) { if (in[i] != '\33') { // Not an escape code. stripped.push_back(in[i]); continue; } // Only strip CSIs for now. if (i + 1 >= in.size()) break; if (in[i + 1] != '[') continue; // Not a CSI. i += 2; // Skip everything up to and including the next [a-zA-Z]. while (i < in.size() && !islatinalpha(in[i])) ++i; } return stripped; } #if defined(__linux__) || defined(__GLIBC__) std::pair readCount(const std::string& path) { std::ifstream file(path.c_str()); if (!file.is_open()) return std::make_pair(0, false); int64_t n = 0; file >> n; if (file.good()) return std::make_pair(n, true); return std::make_pair(0, false); } struct MountPoint { int mountId; int parentId; StringPiece deviceId; StringPiece root; StringPiece mountPoint; vector options; vector optionalFields; StringPiece fsType; StringPiece mountSource; vector superOptions; bool parse(const string& line) { vector pieces = SplitStringPiece(line, ' '); if (pieces.size() < 10) return false; size_t optionalStart = 0; for (size_t i = 6; i < pieces.size(); i++) { if (pieces[i] == "-") { optionalStart = i + 1; break; } } if (optionalStart == 0) return false; if (optionalStart + 3 != pieces.size()) return false; mountId = atoi(pieces[0].AsString().c_str()); parentId = atoi(pieces[1].AsString().c_str()); deviceId = pieces[2]; root = pieces[3]; mountPoint = pieces[4]; options = SplitStringPiece(pieces[5], ','); optionalFields = vector(&pieces[6], &pieces[optionalStart - 1]); fsType = pieces[optionalStart]; mountSource = pieces[optionalStart + 1]; superOptions = SplitStringPiece(pieces[optionalStart + 2], ','); return true; } string translate(string& path) const { // path must be sub dir of root if (path.compare(0, root.len_, root.str_, root.len_) != 0) { return string(); } path.erase(0, root.len_); if (path == ".." || (path.length() > 2 && path.compare(0, 3, "../") == 0)) { return string(); } return mountPoint.AsString() + "/" + path; } }; struct CGroupSubSys { int id; string name; vector subsystems; bool parse(string& line) { size_t first = line.find(':'); if (first == string::npos) return false; line[first] = '\0'; size_t second = line.find(':', first + 1); if (second == string::npos) return false; line[second] = '\0'; id = atoi(line.c_str()); name = line.substr(second + 1); vector pieces = SplitStringPiece(StringPiece(line.c_str() + first + 1), ','); for (size_t i = 0; i < pieces.size(); i++) { subsystems.push_back(pieces[i].AsString()); } return true; } }; map ParseMountInfo(map& subsystems) { map cgroups; ifstream mountinfo("/proc/self/mountinfo"); if (!mountinfo.is_open()) return cgroups; while (!mountinfo.eof()) { string line; getline(mountinfo, line); MountPoint mp; if (!mp.parse(line)) continue; if (mp.fsType == "cgroup") { for (size_t i = 0; i < mp.superOptions.size(); i++) { std::string opt = mp.superOptions[i].AsString(); auto subsys = subsystems.find(opt); if (subsys == subsystems.end()) { continue; } std::string newPath = mp.translate(subsys->second.name); if (!newPath.empty()) { cgroups.emplace(opt, newPath); } } } else if (mp.fsType == "cgroup2") { // Find cgroup2 entry in format "0::/path/to/cgroup" auto subsys = std::find_if(subsystems.begin(), subsystems.end(), [](const auto& sys) { return sys.first == "" && sys.second.id == 0; }); if (subsys == subsystems.end()) { continue; } std::string path = mp.mountPoint.AsString(); if (subsys->second.name != "/") { // Append the relative path for the cgroup to the mount point path.append(subsys->second.name); } cgroups.emplace("cgroup2", path); } } return cgroups; } map ParseSelfCGroup() { map cgroups; ifstream cgroup("/proc/self/cgroup"); if (!cgroup.is_open()) return cgroups; string line; while (!cgroup.eof()) { getline(cgroup, line); CGroupSubSys subsys; if (!subsys.parse(line)) continue; for (size_t i = 0; i < subsys.subsystems.size(); i++) { cgroups.insert(make_pair(subsys.subsystems[i], subsys)); } } return cgroups; } int ParseCgroupV1(std::string& path) { std::pair quota = readCount(path + "/cpu.cfs_quota_us"); if (!quota.second || quota.first == -1) return -1; std::pair period = readCount(path + "/cpu.cfs_period_us"); if (!period.second) return -1; if (period.first == 0) return -1; return quota.first / period.first; } int ParseCgroupV2(std::string& path) { // Read CPU quota from cgroup v2 std::ifstream cpu_max(path + "/cpu.max"); if (!cpu_max.is_open()) { return -1; } std::string max_line; if (!std::getline(cpu_max, max_line) || max_line.empty()) { return -1; } // Format is "quota period" or "max period" size_t space_pos = max_line.find(' '); if (space_pos == string::npos) { return -1; } std::string quota_str = max_line.substr(0, space_pos); std::string period_str = max_line.substr(space_pos + 1); if (quota_str == "max") { return -1; // No CPU limit set } // Convert quota string to integer char* quota_end = nullptr; errno = 0; int64_t quota = strtoll(quota_str.c_str(), "a_end, 10); // Check for conversion errors if (errno == ERANGE || quota_end == quota_str.c_str() || *quota_end != '\0' || quota <= 0) { return -1; } // Convert period string to integer char* period_end = nullptr; errno = 0; int64_t period = strtoll(period_str.c_str(), &period_end, 10); // Check for conversion errors if (errno == ERANGE || period_end == period_str.c_str() || *period_end != '\0' || period <= 0) { return -1; } return quota / period; } int ParseCPUFromCGroup() { auto subsystems = ParseSelfCGroup(); auto cgroups = ParseMountInfo(subsystems); // Prefer cgroup v2 if both v1 and v2 should be present const auto cgroup2 = cgroups.find("cgroup2"); if (cgroup2 != cgroups.end()) { return ParseCgroupV2(cgroup2->second); } const auto cpu = cgroups.find("cpu"); if (cpu != cgroups.end()) { return ParseCgroupV1(cpu->second); } return -1; } #endif int GetProcessorCount() { #ifdef _WIN32 DWORD cpuCount = 0; #ifndef _WIN64 // Need to use GetLogicalProcessorInformationEx to get real core count on // machines with >64 cores. See https://stackoverflow.com/a/31209344/21475 DWORD len = 0; if (!GetLogicalProcessorInformationEx(RelationProcessorCore, nullptr, &len) && GetLastError() == ERROR_INSUFFICIENT_BUFFER) { std::vector buf(len); int cores = 0; if (GetLogicalProcessorInformationEx(RelationProcessorCore, reinterpret_cast( buf.data()), &len)) { for (DWORD i = 0; i < len; ) { auto info = reinterpret_cast( buf.data() + i); if (info->Relationship == RelationProcessorCore && info->Processor.GroupCount == 1) { for (KAFFINITY core_mask = info->Processor.GroupMask[0].Mask; core_mask; core_mask >>= 1) { cores += (core_mask & 1); } } i += info->Size; } if (cores != 0) { cpuCount = cores; } } } #endif if (cpuCount == 0) { cpuCount = GetActiveProcessorCount(ALL_PROCESSOR_GROUPS); } JOBOBJECT_CPU_RATE_CONTROL_INFORMATION info; // reference: // https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_cpu_rate_control_information if (QueryInformationJobObject(NULL, JobObjectCpuRateControlInformation, &info, sizeof(info), NULL)) { if (info.ControlFlags & (JOB_OBJECT_CPU_RATE_CONTROL_ENABLE | JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP)) { return cpuCount * info.CpuRate / 10000; } } return cpuCount; #else int cgroupCount = -1; int schedCount = -1; #if defined(__linux__) || defined(__GLIBC__) cgroupCount = ParseCPUFromCGroup(); #endif // The number of exposed processors might not represent the actual number of // processors threads can run on. This happens when a CPU set limitation is // active, see https://github.com/ninja-build/ninja/issues/1278 #if defined(__FreeBSD__) cpuset_t mask; CPU_ZERO(&mask); if (cpuset_getaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID, -1, sizeof(mask), &mask) == 0) { return CPU_COUNT(&mask); } #elif defined(CPU_COUNT) cpu_set_t set; if (sched_getaffinity(getpid(), sizeof(set), &set) == 0) { schedCount = CPU_COUNT(&set); } #endif if (cgroupCount >= 0 && schedCount >= 0) return std::min(cgroupCount, schedCount); if (cgroupCount < 0 && schedCount < 0) return static_cast(sysconf(_SC_NPROCESSORS_ONLN)); return std::max(cgroupCount, schedCount); #endif } #if defined(_WIN32) || defined(__CYGWIN__) static double CalculateProcessorLoad(uint64_t idle_ticks, uint64_t total_ticks) { static uint64_t previous_idle_ticks = 0; static uint64_t previous_total_ticks = 0; static double previous_load = -0.0; uint64_t idle_ticks_since_last_time = idle_ticks - previous_idle_ticks; uint64_t total_ticks_since_last_time = total_ticks - previous_total_ticks; bool first_call = (previous_total_ticks == 0); bool ticks_not_updated_since_last_call = (total_ticks_since_last_time == 0); double load; if (first_call || ticks_not_updated_since_last_call) { load = previous_load; } else { // Calculate load. double idle_to_total_ratio = ((double)idle_ticks_since_last_time) / total_ticks_since_last_time; double load_since_last_call = 1.0 - idle_to_total_ratio; // Filter/smooth result when possible. if(previous_load > 0) { load = 0.9 * previous_load + 0.1 * load_since_last_call; } else { load = load_since_last_call; } } previous_load = load; previous_total_ticks = total_ticks; previous_idle_ticks = idle_ticks; return load; } static uint64_t FileTimeToTickCount(const FILETIME & ft) { uint64_t high = (((uint64_t)(ft.dwHighDateTime)) << 32); uint64_t low = ft.dwLowDateTime; return (high | low); } double GetLoadAverage() { FILETIME idle_time, kernel_time, user_time; BOOL get_system_time_succeeded = GetSystemTimes(&idle_time, &kernel_time, &user_time); double posix_compatible_load; if (get_system_time_succeeded) { uint64_t idle_ticks = FileTimeToTickCount(idle_time); // kernel_time from GetSystemTimes already includes idle_time. uint64_t total_ticks = FileTimeToTickCount(kernel_time) + FileTimeToTickCount(user_time); double processor_load = CalculateProcessorLoad(idle_ticks, total_ticks); posix_compatible_load = processor_load * GetProcessorCount(); } else { posix_compatible_load = -0.0; } return posix_compatible_load; } #elif defined(__PASE__) double GetLoadAverage() { return -0.0f; } #elif defined(_AIX) double GetLoadAverage() { perfstat_cpu_total_t cpu_stats; if (perfstat_cpu_total(NULL, &cpu_stats, sizeof(cpu_stats), 1) < 0) { return -0.0f; } // Calculation taken from comment in libperfstats.h return double(cpu_stats.loadavg[0]) / double(1 << SBITS); } #elif defined(__UCLIBC__) || (defined(__BIONIC__) && __ANDROID_API__ < 29) double GetLoadAverage() { struct sysinfo si; if (sysinfo(&si) != 0) return -0.0f; return 1.0 / (1 << SI_LOAD_SHIFT) * si.loads[0]; } #elif defined(__HAIKU__) double GetLoadAverage() { return -0.0f; } #else double GetLoadAverage() { double loadavg[3] = { 0.0f, 0.0f, 0.0f }; if (getloadavg(loadavg, 3) < 0) { // Maybe we should return an error here or the availability of // getloadavg(3) should be checked when ninja is configured. return -0.0f; } return loadavg[0]; } #endif // _WIN32 std::string GetWorkingDirectory() { std::string ret; char* success = NULL; do { ret.resize(ret.size() + 1024); errno = 0; success = getcwd(&ret[0], ret.size()); } while (!success && errno == ERANGE); if (!success) { Fatal("cannot determine working directory: %s", strerror(errno)); } ret.resize(strlen(&ret[0])); return ret; } bool Truncate(const string& path, size_t size, string* err) { #ifdef _WIN32 int fh = _sopen(path.c_str(), _O_RDWR | _O_CREAT, _SH_DENYNO, _S_IREAD | _S_IWRITE); int success = _chsize(fh, size); _close(fh); #else int success = truncate(path.c_str(), size); #endif // Both truncate() and _chsize() return 0 on success and set errno and return // -1 on failure. if (success < 0) { *err = strerror(errno); return false; } return true; } int platformAwareUnlink(const char* filename) { #ifdef _WIN32 return _unlink(filename); #else return unlink(filename); #endif } ninja-1.13.2/src/util.h000066400000000000000000000106701510764045400146240ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef NINJA_UTIL_H_ #define NINJA_UTIL_H_ #ifdef _WIN32 #include "win32port.h" #else #include #endif #include #include #include #if !defined(__has_cpp_attribute) # define __has_cpp_attribute(x) 0 #endif #if __has_cpp_attribute(noreturn) # define NORETURN [[noreturn]] #else # define NORETURN // nothing for old compilers #endif /// Log a fatal message and exit. NORETURN void Fatal(const char* msg, ...); // Have a generic fall-through for different versions of C/C++. #if __has_cpp_attribute(fallthrough) # define NINJA_FALLTHROUGH [[fallthrough]] #elif defined(__clang__) # define NINJA_FALLTHROUGH [[clang::fallthrough]] #else # define NINJA_FALLTHROUGH // nothing #endif /// Log a warning message. void Warning(const char* msg, ...); void Warning(const char* msg, va_list ap); /// Log an error message. void Error(const char* msg, ...); void Error(const char* msg, va_list ap); /// Log an informational message. void Info(const char* msg, ...); void Info(const char* msg, va_list ap); /// Canonicalize a path like "foo/../bar.h" into just "bar.h". /// |slash_bits| has bits set starting from lowest for a backslash that was /// normalized to a forward slash. (only used on Windows) void CanonicalizePath(std::string* path, uint64_t* slash_bits); void CanonicalizePath(char* path, size_t* len, uint64_t* slash_bits); /// Appends |input| to |*result|, escaping according to the whims of either /// Bash, or Win32's CommandLineToArgvW(). /// Appends the string directly to |result| without modification if we can /// determine that it contains no problematic characters. void GetShellEscapedString(const std::string& input, std::string* result); void GetWin32EscapedString(const std::string& input, std::string* result); /// Read a file to a string (in text mode: with CRLF conversion /// on Windows). /// Returns -errno and fills in \a err on error. int ReadFile(const std::string& path, std::string* contents, std::string* err); /// Mark a file descriptor to not be inherited on exec()s. void SetCloseOnExec(int fd); /// Given a misspelled string and a list of correct spellings, returns /// the closest match or NULL if there is no close enough match. const char* SpellcheckStringV(const std::string& text, const std::vector& words); /// Like SpellcheckStringV, but takes a NULL-terminated list. const char* SpellcheckString(const char* text, ...); bool islatinalpha(int c); /// Removes all Ansi escape codes (http://www.termsys.demon.co.uk/vtansi.htm). std::string StripAnsiEscapeCodes(const std::string& in); /// @return the number of processors on the machine. Useful for an initial /// guess for how many jobs to run in parallel. @return 0 on error. int GetProcessorCount(); /// @return the load average of the machine. A negative value is returned /// on error. double GetLoadAverage(); /// a wrapper for getcwd() std::string GetWorkingDirectory(); /// Truncates a file to the given size. bool Truncate(const std::string& path, size_t size, std::string* err); #ifdef _MSC_VER #define snprintf _snprintf #define fileno _fileno #define chdir _chdir #define strtoull _strtoui64 #define getcwd _getcwd #define PATH_MAX _MAX_PATH #endif #ifdef _WIN32 /// Convert the value returned by GetLastError() into a string. std::string GetLastErrorString(); /// Calls Fatal() with a function name and GetLastErrorString. NORETURN void Win32Fatal(const char* function, const char* hint = NULL); /// Naive implementation of C++ 20 std::bit_cast(), used to fix Clang and GCC /// [-Wcast-function-type] warning on casting result of GetProcAddress(). template inline To FunctionCast(From from) { static_assert(sizeof(To) == sizeof(From), ""); To result; memcpy(&result, &from, sizeof(To)); return result; } #endif int platformAwareUnlink(const char* filename); #endif // NINJA_UTIL_H_ ninja-1.13.2/src/util_test.cc000066400000000000000000000323031510764045400160160ustar00rootroot00000000000000// Copyright 2011 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "util.h" #include "test.h" using namespace std; namespace { void CanonicalizePath(string* path) { uint64_t unused; ::CanonicalizePath(path, &unused); } } // namespace TEST(CanonicalizePath, PathSamples) { string path; CanonicalizePath(&path); EXPECT_EQ("", path); path = "foo.h"; CanonicalizePath(&path); EXPECT_EQ("foo.h", path); path = "./foo.h"; CanonicalizePath(&path); EXPECT_EQ("foo.h", path); path = "./foo/./bar.h"; CanonicalizePath(&path); EXPECT_EQ("foo/bar.h", path); path = "./x/foo/../bar.h"; CanonicalizePath(&path); EXPECT_EQ("x/bar.h", path); path = "./x/foo/../../bar.h"; CanonicalizePath(&path); EXPECT_EQ("bar.h", path); path = "foo//bar"; CanonicalizePath(&path); EXPECT_EQ("foo/bar", path); path = "foo//.//..///bar"; CanonicalizePath(&path); EXPECT_EQ("bar", path); path = "./x/../foo/../../bar.h"; CanonicalizePath(&path); EXPECT_EQ("../bar.h", path); path = "foo/./."; CanonicalizePath(&path); EXPECT_EQ("foo", path); path = "foo/bar/.."; CanonicalizePath(&path); EXPECT_EQ("foo", path); path = "foo/.hidden_bar"; CanonicalizePath(&path); EXPECT_EQ("foo/.hidden_bar", path); path = "/foo"; CanonicalizePath(&path); EXPECT_EQ("/foo", path); path = "//foo"; CanonicalizePath(&path); #ifdef _WIN32 EXPECT_EQ("//foo", path); #else EXPECT_EQ("/foo", path); #endif path = ".."; CanonicalizePath(&path); EXPECT_EQ("..", path); path = "../"; CanonicalizePath(&path); EXPECT_EQ("..", path); path = "../foo"; CanonicalizePath(&path); EXPECT_EQ("../foo", path); path = "../foo/"; CanonicalizePath(&path); EXPECT_EQ("../foo", path); path = "../.."; CanonicalizePath(&path); EXPECT_EQ("../..", path); path = "../../"; CanonicalizePath(&path); EXPECT_EQ("../..", path); path = "./../"; CanonicalizePath(&path); EXPECT_EQ("..", path); path = "/.."; CanonicalizePath(&path); EXPECT_EQ("/..", path); path = "/../"; CanonicalizePath(&path); EXPECT_EQ("/..", path); path = "/../.."; CanonicalizePath(&path); EXPECT_EQ("/../..", path); path = "/../../"; CanonicalizePath(&path); EXPECT_EQ("/../..", path); path = "/"; CanonicalizePath(&path); EXPECT_EQ("/", path); path = "/foo/.."; CanonicalizePath(&path); EXPECT_EQ("/", path); path = "."; CanonicalizePath(&path); EXPECT_EQ(".", path); path = "./."; CanonicalizePath(&path); EXPECT_EQ(".", path); path = "foo/.."; CanonicalizePath(&path); EXPECT_EQ(".", path); path = "foo/.._bar"; CanonicalizePath(&path); EXPECT_EQ("foo/.._bar", path); } #ifdef _WIN32 TEST(CanonicalizePath, PathSamplesWindows) { string path; CanonicalizePath(&path); EXPECT_EQ("", path); path = "foo.h"; CanonicalizePath(&path); EXPECT_EQ("foo.h", path); path = ".\\foo.h"; CanonicalizePath(&path); EXPECT_EQ("foo.h", path); path = ".\\foo\\.\\bar.h"; CanonicalizePath(&path); EXPECT_EQ("foo/bar.h", path); path = ".\\x\\foo\\..\\bar.h"; CanonicalizePath(&path); EXPECT_EQ("x/bar.h", path); path = ".\\x\\foo\\..\\..\\bar.h"; CanonicalizePath(&path); EXPECT_EQ("bar.h", path); path = "foo\\\\bar"; CanonicalizePath(&path); EXPECT_EQ("foo/bar", path); path = "foo\\\\.\\\\..\\\\\\bar"; CanonicalizePath(&path); EXPECT_EQ("bar", path); path = ".\\x\\..\\foo\\..\\..\\bar.h"; CanonicalizePath(&path); EXPECT_EQ("../bar.h", path); path = "foo\\.\\."; CanonicalizePath(&path); EXPECT_EQ("foo", path); path = "foo\\bar\\.."; CanonicalizePath(&path); EXPECT_EQ("foo", path); path = "foo\\.hidden_bar"; CanonicalizePath(&path); EXPECT_EQ("foo/.hidden_bar", path); path = "\\foo"; CanonicalizePath(&path); EXPECT_EQ("/foo", path); path = "\\\\foo"; CanonicalizePath(&path); EXPECT_EQ("//foo", path); path = "\\"; CanonicalizePath(&path); EXPECT_EQ("/", path); } TEST(CanonicalizePath, SlashTracking) { string path; uint64_t slash_bits; path = "foo.h"; CanonicalizePath(&path, &slash_bits); EXPECT_EQ("foo.h", path); EXPECT_EQ(0, slash_bits); path = "a\\foo.h"; CanonicalizePath(&path, &slash_bits); EXPECT_EQ("a/foo.h", path); EXPECT_EQ(1, slash_bits); path = "a/bcd/efh\\foo.h"; CanonicalizePath(&path, &slash_bits); EXPECT_EQ("a/bcd/efh/foo.h", path); EXPECT_EQ(4, slash_bits); path = "a\\bcd/efh\\foo.h"; CanonicalizePath(&path, &slash_bits); EXPECT_EQ("a/bcd/efh/foo.h", path); EXPECT_EQ(5, slash_bits); path = "a\\bcd\\efh\\foo.h"; CanonicalizePath(&path, &slash_bits); EXPECT_EQ("a/bcd/efh/foo.h", path); EXPECT_EQ(7, slash_bits); path = "a/bcd/efh/foo.h"; CanonicalizePath(&path, &slash_bits); EXPECT_EQ("a/bcd/efh/foo.h", path); EXPECT_EQ(0, slash_bits); path = "a\\./efh\\foo.h"; CanonicalizePath(&path, &slash_bits); EXPECT_EQ("a/efh/foo.h", path); EXPECT_EQ(3, slash_bits); path = "a\\../efh\\foo.h"; CanonicalizePath(&path, &slash_bits); EXPECT_EQ("efh/foo.h", path); EXPECT_EQ(1, slash_bits); path = "a\\b\\c\\d\\e\\f\\g\\foo.h"; CanonicalizePath(&path, &slash_bits); EXPECT_EQ("a/b/c/d/e/f/g/foo.h", path); EXPECT_EQ(127, slash_bits); path = "a\\b\\c\\..\\..\\..\\g\\foo.h"; CanonicalizePath(&path, &slash_bits); EXPECT_EQ("g/foo.h", path); EXPECT_EQ(1, slash_bits); path = "a\\b/c\\../../..\\g\\foo.h"; CanonicalizePath(&path, &slash_bits); EXPECT_EQ("g/foo.h", path); EXPECT_EQ(1, slash_bits); path = "a\\b/c\\./../..\\g\\foo.h"; CanonicalizePath(&path, &slash_bits); EXPECT_EQ("a/g/foo.h", path); EXPECT_EQ(3, slash_bits); path = "a\\b/c\\./../..\\g/foo.h"; CanonicalizePath(&path, &slash_bits); EXPECT_EQ("a/g/foo.h", path); EXPECT_EQ(1, slash_bits); path = "a\\\\\\foo.h"; CanonicalizePath(&path, &slash_bits); EXPECT_EQ("a/foo.h", path); EXPECT_EQ(1, slash_bits); path = "a/\\\\foo.h"; CanonicalizePath(&path, &slash_bits); EXPECT_EQ("a/foo.h", path); EXPECT_EQ(0, slash_bits); path = "a\\//foo.h"; CanonicalizePath(&path, &slash_bits); EXPECT_EQ("a/foo.h", path); EXPECT_EQ(1, slash_bits); } TEST(CanonicalizePath, CanonicalizeNotExceedingLen) { // Make sure searching \/ doesn't go past supplied len. char buf[] = "foo/bar\\baz.h\\"; // Last \ past end. uint64_t slash_bits; size_t size = 13; ::CanonicalizePath(buf, &size, &slash_bits); EXPECT_EQ(0, strncmp("foo/bar/baz.h", buf, size)); EXPECT_EQ(2, slash_bits); // Not including the trailing one. } TEST(CanonicalizePath, TooManyComponents) { string path; uint64_t slash_bits; // 64 is OK. path = "a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./" "a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./x.h"; CanonicalizePath(&path, &slash_bits); EXPECT_EQ(slash_bits, 0x0); // Backslashes version. path = "a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\" "a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\" "a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\" "a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\x.h"; CanonicalizePath(&path, &slash_bits); EXPECT_EQ(slash_bits, 0xffffffff); // 65 is OK if #component is less than 60 after path canonicalization. path = "a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./" "a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./a/./x/y.h"; CanonicalizePath(&path, &slash_bits); EXPECT_EQ(slash_bits, 0x0); // Backslashes version. path = "a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\" "a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\" "a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\" "a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\a\\.\\x\\y.h"; CanonicalizePath(&path, &slash_bits); EXPECT_EQ(slash_bits, uint64_t(0x1ffffffff)); // 59 after canonicalization is OK. path = "a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/" "a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/x/y.h"; EXPECT_EQ(58, std::count(path.begin(), path.end(), '/')); CanonicalizePath(&path, &slash_bits); EXPECT_EQ(slash_bits, 0x0); // Backslashes version. path = "a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\" "a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\" "a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\" "a\\a\\a\\a\\a\\a\\a\\a\\a\\x\\y.h"; EXPECT_EQ(58, std::count(path.begin(), path.end(), '\\')); CanonicalizePath(&path, &slash_bits); EXPECT_EQ(slash_bits, uint64_t(0x3ffffffffffffff)); // More than 60 components is now completely ok too. path = "a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\" "a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\" "a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\" "a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\" "a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\" "a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\" "a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\" "a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\" "a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\" "a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\" "a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\" "a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\" "a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\" "a\\a\\a\\a\\a\\a\\a\\a\\a\\x\\y.h"; EXPECT_EQ(218, std::count(path.begin(), path.end(), '\\')); CanonicalizePath(&path, &slash_bits); EXPECT_EQ(slash_bits, 0xffffffffffffffff); } #else // !_WIN32 TEST(CanonicalizePath, TooManyComponents) { string path; uint64_t slash_bits; // More than 60 components is now completely ok. path = "a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/" "a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/" "a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/" "a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/" "a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/" "a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/" "a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/" "a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/" "a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/" "a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/" "a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/" "a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/" "a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/" "a/a/a/a/a/a/a/a/a/x/y.h"; EXPECT_EQ(218, std::count(path.begin(), path.end(), '/')); CanonicalizePath(&path, &slash_bits); EXPECT_EQ(slash_bits, 0x0); } #endif // !_WIN32 TEST(CanonicalizePath, UpDir) { string path, err; path = "../../foo/bar.h"; CanonicalizePath(&path); EXPECT_EQ("../../foo/bar.h", path); path = "test/../../foo/bar.h"; CanonicalizePath(&path); EXPECT_EQ("../foo/bar.h", path); } TEST(CanonicalizePath, AbsolutePath) { string path = "/usr/include/stdio.h"; string err; CanonicalizePath(&path); EXPECT_EQ("/usr/include/stdio.h", path); } TEST(CanonicalizePath, NotNullTerminated) { string path; size_t len; uint64_t unused; path = "foo/. bar/."; len = strlen("foo/."); // Canonicalize only the part before the space. CanonicalizePath(&path[0], &len, &unused); EXPECT_EQ(strlen("foo"), len); EXPECT_EQ("foo/. bar/.", string(path)); // Verify that foo/..file gets canonicalized to 'file' without // touching the rest of the string. path = "foo/../file bar/."; len = strlen("foo/../file"); CanonicalizePath(&path[0], &len, &unused); EXPECT_EQ(strlen("file"), len); EXPECT_EQ("file../file bar/.", string(path)); } TEST(PathEscaping, TortureTest) { string result; GetWin32EscapedString("foo bar\\\"'$@d!st!c'\\path'\\", &result); EXPECT_EQ("\"foo bar\\\\\\\"'$@d!st!c'\\path'\\\\\"", result); result.clear(); GetShellEscapedString("foo bar\"/'$@d!st!c'/path'", &result); EXPECT_EQ("'foo bar\"/'\\''$@d!st!c'\\''/path'\\'''", result); } TEST(PathEscaping, SensiblePathsAreNotNeedlesslyEscaped) { const char* path = "some/sensible/path/without/crazy/characters.c++"; string result; GetWin32EscapedString(path, &result); EXPECT_EQ(path, result); result.clear(); GetShellEscapedString(path, &result); EXPECT_EQ(path, result); } TEST(PathEscaping, SensibleWin32PathsAreNotNeedlesslyEscaped) { const char* path = "some\\sensible\\path\\without\\crazy\\characters.c++"; string result; GetWin32EscapedString(path, &result); EXPECT_EQ(path, result); } TEST(StripAnsiEscapeCodes, EscapeAtEnd) { string stripped = StripAnsiEscapeCodes("foo\33"); EXPECT_EQ("foo", stripped); stripped = StripAnsiEscapeCodes("foo\33["); EXPECT_EQ("foo", stripped); } TEST(StripAnsiEscapeCodes, StripColors) { // An actual clang warning. string input = "\33[1maffixmgr.cxx:286:15: \33[0m\33[0;1;35mwarning: " "\33[0m\33[1musing the result... [-Wparentheses]\33[0m"; string stripped = StripAnsiEscapeCodes(input); EXPECT_EQ("affixmgr.cxx:286:15: warning: using the result... [-Wparentheses]", stripped); } ninja-1.13.2/src/version.cc000066400000000000000000000033331510764045400154700ustar00rootroot00000000000000// Copyright 2013 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "version.h" #include #include "util.h" using namespace std; const char* kNinjaVersion = "1.13.2"; void ParseVersion(const string& version, int* major, int* minor) { size_t end = version.find('.'); *major = atoi(version.substr(0, end).c_str()); *minor = 0; if (end != string::npos) { size_t start = end + 1; end = version.find('.', start); *minor = atoi(version.substr(start, end).c_str()); } } void CheckNinjaVersion(const string& version) { int bin_major, bin_minor; ParseVersion(kNinjaVersion, &bin_major, &bin_minor); int file_major, file_minor; ParseVersion(version, &file_major, &file_minor); if (bin_major > file_major) { Warning("ninja executable version (%s) greater than build file " "ninja_required_version (%s); versions may be incompatible.", kNinjaVersion, version.c_str()); return; } if ((bin_major == file_major && bin_minor < file_minor) || bin_major < file_major) { Fatal("ninja version (%s) incompatible with build file " "ninja_required_version version (%s).", kNinjaVersion, version.c_str()); } } ninja-1.13.2/src/version.h000066400000000000000000000021451510764045400153320ustar00rootroot00000000000000// Copyright 2013 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef NINJA_VERSION_H_ #define NINJA_VERSION_H_ #include /// The version number of the current Ninja release. This will always /// be "git" on trunk. extern const char* kNinjaVersion; /// Parse the major/minor components of a version string. void ParseVersion(const std::string& version, int* major, int* minor); /// Check whether \a version is compatible with the current Ninja version, /// aborting if not. void CheckNinjaVersion(const std::string& required_version); #endif // NINJA_VERSION_H_ ninja-1.13.2/src/win32port.h000066400000000000000000000021641510764045400155150ustar00rootroot00000000000000// Copyright 2012 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef NINJA_WIN32PORT_H_ #define NINJA_WIN32PORT_H_ #if defined(__MINGW32__) || defined(__MINGW64__) #ifndef __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS #endif #include #endif typedef signed short int16_t; typedef unsigned short uint16_t; /// A 64-bit integer type typedef signed long long int64_t; typedef unsigned long long uint64_t; // printf format specifier for uint64_t, from C99. #ifndef PRIu64 #define PRId64 "I64d" #define PRIu64 "I64u" #define PRIx64 "I64x" #endif #endif // NINJA_WIN32PORT_H_ ninja-1.13.2/windows/000077500000000000000000000000001510764045400143755ustar00rootroot00000000000000ninja-1.13.2/windows/ninja.manifest000066400000000000000000000006601510764045400172260ustar00rootroot00000000000000 UTF-8 true