salsa-0.23.0/.cargo_vcs_info.json0000644000000001360000000000100122210ustar { "git": { "sha1": "572d144b33c766c792239c98b470265aaab3fef0" }, "path_in_vcs": "" }salsa-0.23.0/.devcontainer/devcontainer.json000064400000000000000000000022331046102023000171240ustar 00000000000000// For format details, see https://aka.ms/devcontainer.json. For config options, see the // README at: https://github.com/devcontainers/templates/tree/main/src/rust { "name": "Rust", // Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile "image": "mcr.microsoft.com/devcontainers/rust:1-1-bullseye", "features": { "ghcr.io/devcontainers-contrib/features/ripgrep:1": {} } // Use 'mounts' to make the cargo cache persistent in a Docker Volume. // "mounts": [ // { // "source": "devcontainer-cargo-cache-${devcontainerId}", // "target": "/usr/local/cargo", // "type": "volume" // } // ] // Features to add to the dev container. More info: https://containers.dev/features. // "features": {}, // Use 'forwardPorts' to make a list of ports inside the container available locally. // "forwardPorts": [], // Use 'postCreateCommand' to run commands after the container is created. // "postCreateCommand": "rustc --version", // Configure tool-specific properties. // "customizations": {}, // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root. // "remoteUser": "root" } salsa-0.23.0/.dir-locals.el000064400000000000000000000000501046102023000134350ustar 00000000000000((rust-mode (rust-format-on-save . t))) salsa-0.23.0/.github/dependabot.yml000064400000000000000000000007231046102023000152030ustar 00000000000000# To get started with Dependabot version updates, you'll need to specify which # package ecosystems to update and where the package manifests are located. # Please see the documentation for more information: # https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates # https://containers.dev/guide/dependabot version: 2 updates: - package-ecosystem: "devcontainers" directory: "/" schedule: interval: weekly salsa-0.23.0/.github/workflows/book.yml000064400000000000000000000035351046102023000160710ustar 00000000000000name: Book on: push: branches: - master pull_request: paths: - "book/**" - ".github/workflows/book.yml" jobs: book: name: Book runs-on: ubuntu-latest env: MDBOOK_VERSION: "0.4.40" MDBOOK_LINKCHECK_VERSION: "0.7.7" MDBOOK_MERMAID_VERSION: "0.13.0" steps: - uses: actions/checkout@v4 - name: Install mdbook run: | curl -L https://github.com/rust-lang/mdBook/releases/download/v$MDBOOK_VERSION/mdbook-v$MDBOOK_VERSION-x86_64-unknown-linux-gnu.tar.gz | tar xz -C ~/.cargo/bin curl -L https://github.com/badboy/mdbook-mermaid/releases/download/v$MDBOOK_MERMAID_VERSION/mdbook-mermaid-v$MDBOOK_MERMAID_VERSION-x86_64-unknown-linux-gnu.tar.gz | tar xz -C ~/.cargo/bin curl -L https://github.com/Michael-F-Bryan/mdbook-linkcheck/releases/download/v$MDBOOK_LINKCHECK_VERSION/mdbook-linkcheck.x86_64-unknown-linux-gnu.zip -O unzip mdbook-linkcheck.x86_64-unknown-linux-gnu.zip -d ~/.cargo/bin chmod +x ~/.cargo/bin/mdbook-linkcheck - name: Setup Pages id: pages uses: actions/configure-pages@v5 - name: Build run: mdbook build working-directory: book - name: Upload static files as artifact id: deployment uses: actions/upload-pages-artifact@v3 with: path: ./book/book/html deploy: name: Deploy runs-on: ubuntu-latest needs: book if: github.event_name == 'push' && github.ref == 'refs/heads/master' concurrency: group: github-pages cancel-in-progress: true permissions: contents: read pages: write id-token: write environment: name: github-pages url: ${{ steps.deployment.outputs.page_url }} steps: - name: Deploy to GitHub Pages id: deployment uses: actions/deploy-pages@v4 salsa-0.23.0/.github/workflows/release.yml000064400000000000000000000030501046102023000165470ustar 00000000000000name: Release-plz permissions: pull-requests: write contents: write on: push: branches: - master jobs: # Release unpublished packages. release-plz-release: if: ${{ github.repository_owner == 'salsa-rs' }} name: Release-plz release runs-on: ubuntu-latest permissions: contents: write steps: - name: Checkout repository uses: actions/checkout@v4 with: fetch-depth: 0 - name: Install Rust toolchain uses: dtolnay/rust-toolchain@stable - name: Run release-plz uses: release-plz/action@v0.5 with: command: release env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} # Create a PR with the new versions and changelog, preparing the next release. release-plz-pr: if: ${{ github.repository_owner == 'salsa-rs' }} name: Release-plz PR runs-on: ubuntu-latest permissions: contents: write pull-requests: write concurrency: group: release-plz-${{ github.ref }} cancel-in-progress: false steps: - name: Checkout repository uses: actions/checkout@v4 with: fetch-depth: 0 - name: Install Rust toolchain uses: dtolnay/rust-toolchain@stable - name: Run release-plz uses: release-plz/action@v0.5 with: command: release-pr env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} salsa-0.23.0/.github/workflows/test.yml000064400000000000000000000115001046102023000161050ustar 00000000000000name: Test on: pull_request: merge_group: push: branches: - master concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true jobs: test: name: Test strategy: matrix: rust: - 1.85.0 - stable - beta experimental: - false include: - rust: nightly experimental: true continue-on-error: ${{ matrix.experimental }} runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v4 - name: Setup Rust toolchain uses: dtolnay/rust-toolchain@master id: rust-toolchain with: toolchain: ${{ matrix.rust }} components: rustfmt, clippy - uses: taiki-e/install-action@nextest - uses: actions/cache@v4 with: path: | ~/.cargo/bin/ ~/.cargo/registry/index/ ~/.cargo/registry/cache/ ~/.cargo/git/db/ target/ key: ${{ runner.os }}-cargo-${{ steps.rust-toolchain.outputs.cachekey }}-${{ hashFiles('**/Cargo.toml') }} restore-keys: | ${{ runner.os }}-cargo-${{ steps.rust-toolchain.outputs.cachekey }}- ${{ runner.os }}-cargo- - name: Format run: cargo fmt -- --check - name: Clippy run: cargo clippy --workspace --all-targets -- -D warnings - name: Test run: cargo nextest run --workspace --all-targets --no-fail-fast - name: Test docs run: cargo test --workspace --doc - name: Check (without default features) run: cargo check --workspace --no-default-features miri: name: Miri runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v4 - name: Install Miri uses: dtolnay/rust-toolchain@miri id: rust-toolchain - uses: taiki-e/install-action@nextest - uses: actions/cache@v4 with: path: | ~/.cargo/bin/ ~/.cargo/registry/index/ ~/.cargo/registry/cache/ ~/.cargo/git/db/ target/ key: ${{ runner.os }}-cargo-${{ steps.rust-toolchain.outputs.cachekey }}-miri-${{ hashFiles('**/Cargo.toml') }} restore-keys: | ${{ runner.os }}-cargo-${{ steps.rust-toolchain.outputs.cachekey }}-miri- ${{ runner.os }}-cargo-${{ steps.rust-toolchain.outputs.cachekey }}- ${{ runner.os }}-cargo- - name: Setup Miri run: cargo miri setup - name: Test with Miri run: cargo miri nextest run --no-fail-fast --tests env: MIRIFLAGS: -Zmiri-disable-isolation -Zmiri-retag-fields - name: Run examples with Miri run: cargo miri run --example calc shuttle: name: Shuttle runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v4 - name: Setup Rust toolchain uses: dtolnay/rust-toolchain@master id: rust-toolchain with: toolchain: stable - uses: taiki-e/install-action@nextest - uses: actions/cache@v4 with: path: | ~/.cargo/bin/ ~/.cargo/registry/index/ ~/.cargo/registry/cache/ ~/.cargo/git/db/ target/ key: ${{ runner.os }}-cargo-${{ steps.rust-toolchain.outputs.cachekey }}-${{ hashFiles('**/Cargo.toml') }} restore-keys: | ${{ runner.os }}-cargo-${{ steps.rust-toolchain.outputs.cachekey }}- ${{ runner.os }}-cargo- - name: Test with Shuttle run: cargo nextest run --features shuttle --test parallel benchmarks: # https://github.com/CodSpeedHQ/action/issues/126 if: github.event_name != 'merge_group' name: Benchmarks runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v4 - name: Setup Rust toolchain uses: dtolnay/rust-toolchain@master id: rust-toolchain with: toolchain: stable - name: "Setup codspeed" uses: taiki-e/install-action@v2 with: tool: cargo-codspeed - uses: actions/cache@v4 with: path: | ~/.cargo/bin/ ~/.cargo/registry/index/ ~/.cargo/registry/cache/ ~/.cargo/git/db/ target/ key: ${{ runner.os }}-cargo-${{ steps.rust-toolchain.outputs.cachekey }}-${{ hashFiles('**/Cargo.toml') }} restore-keys: | ${{ runner.os }}-cargo-${{ steps.rust-toolchain.outputs.cachekey }}- ${{ runner.os }}-cargo- - name: "Build benchmarks" run: cargo codspeed build - name: "Run benchmarks" uses: CodSpeedHQ/action@v3 with: run: cargo codspeed run token: ${{ secrets.CODSPEED_TOKEN }} salsa-0.23.0/.gitignore000064400000000000000000000000571046102023000130030ustar 00000000000000/target **/*.rs.bk Cargo.lock TAGS nikom .idea salsa-0.23.0/CHANGELOG.md000064400000000000000000000377151046102023000126370ustar 00000000000000# Changelog All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). ## [Unreleased] ## [0.23.0](https://github.com/salsa-rs/salsa/compare/salsa-v0.22.0...salsa-v0.23.0) - 2025-06-27 ### Added - `Update` derive field overwrite support ([#747](https://github.com/salsa-rs/salsa/pull/747)) ### Fixed - fix race in `MemoTableTypes` ([#912](https://github.com/salsa-rs/salsa/pull/912)) - multithreaded nested fixpoint iteration ([#882](https://github.com/salsa-rs/salsa/pull/882)) ### Other - Emit self ty for query debug name of assoc function queries ([#927](https://github.com/salsa-rs/salsa/pull/927)) - Replace ingredient cache with faster ingredient map ([#921](https://github.com/salsa-rs/salsa/pull/921)) - add option to track heap memory usage of memos ([#925](https://github.com/salsa-rs/salsa/pull/925)) - Hide generated structs of tracked functions from docs via `#[doc(hidden)]` ([#917](https://github.com/salsa-rs/salsa/pull/917)) - Add API to dump memory usage ([#916](https://github.com/salsa-rs/salsa/pull/916)) - Revert "Assert size for interned Value" & Mark `Slot` trait as unsafe ([#915](https://github.com/salsa-rs/salsa/pull/915)) - add an option to tune interned garbage collection ([#911](https://github.com/salsa-rs/salsa/pull/911)) - Use explicit discriminants for `QueryOriginKind` for better comparisons ([#913](https://github.com/salsa-rs/salsa/pull/913)) - update boxcar ([#910](https://github.com/salsa-rs/salsa/pull/910)) - use latest revision for dependencies on interned values ([#908](https://github.com/salsa-rs/salsa/pull/908)) - remove high-durability values from interned LRU ([#907](https://github.com/salsa-rs/salsa/pull/907)) - Preserve attributes on interned/tracked struct fields ([#905](https://github.com/salsa-rs/salsa/pull/905)) - Assert size for interned `Value` ([#901](https://github.com/salsa-rs/salsa/pull/901)) - reduce size of interned value metadata ([#903](https://github.com/salsa-rs/salsa/pull/903)) - panic with string message again for cycle panics ([#898](https://github.com/salsa-rs/salsa/pull/898)) - Use `Revision` and `Durability` directly in input `Value` ([#902](https://github.com/salsa-rs/salsa/pull/902)) - Fix flaky parallel_join test ([#900](https://github.com/salsa-rs/salsa/pull/900)) - Bump MSRV to 1.85 ([#899](https://github.com/salsa-rs/salsa/pull/899)) - Simple LRU garbage collection for interned values ([#839](https://github.com/salsa-rs/salsa/pull/839)) - Capture execution backtrace when throwing `UnexpectedCycle` ([#883](https://github.com/salsa-rs/salsa/pull/883)) - Store tracked struct ids as ThinVec on Revisions ([#892](https://github.com/salsa-rs/salsa/pull/892)) - Update dependencies, remove unused `heck` dependency ([#894](https://github.com/salsa-rs/salsa/pull/894)) - Set `validate_final` in `execute` after removing the last cycle head ([#890](https://github.com/salsa-rs/salsa/pull/890)) - Pack `QueryEdge` memory layout ([#886](https://github.com/salsa-rs/salsa/pull/886)) - Lazily allocate extra memo state ([#888](https://github.com/salsa-rs/salsa/pull/888)) - Pack `QueryOrigin` memory layout ([#885](https://github.com/salsa-rs/salsa/pull/885)) - Restrict memo size assertion to 64bit platforms ([#884](https://github.com/salsa-rs/salsa/pull/884)) - Don't report stale outputs if there is newer generation in new_outputs ([#879](https://github.com/salsa-rs/salsa/pull/879)) - Fix hang in nested fixpoint iteration ([#871](https://github.com/salsa-rs/salsa/pull/871)) - Add debug spans for `new_revision` and `evict_lru` ([#881](https://github.com/salsa-rs/salsa/pull/881)) - Add fetch span ([#875](https://github.com/salsa-rs/salsa/pull/875)) - shrink_to_fit `IdentityMap` before storing it ([#816](https://github.com/salsa-rs/salsa/pull/816)) - Allow lifetimes in arguments in tracked fns with >1 parameters ([#880](https://github.com/salsa-rs/salsa/pull/880)) - Replace loom with shuttle ([#876](https://github.com/salsa-rs/salsa/pull/876)) - Use generational identifiers for tracked structs ([#864](https://github.com/salsa-rs/salsa/pull/864)) ### Fixed - `#[doc(hidden)]` auto-generated tracked-fn structs ([#917](https://github.com/salsa-rs/salsa/pull/917)) ## [0.22.0](https://github.com/salsa-rs/salsa/compare/salsa-v0.21.1...salsa-v0.22.0) - 2025-05-23 ### Fixed - fix memo table growth condition ([#850](https://github.com/salsa-rs/salsa/pull/850)) - incorrect caching for queries participating in fixpoint ([#843](https://github.com/salsa-rs/salsa/pull/843)) - change detection for fixpoint queries ([#836](https://github.com/salsa-rs/salsa/pull/836)) ### Other - Allow creation of tracked associated functions (without `self`) ([#859](https://github.com/salsa-rs/salsa/pull/859)) - Short-circuit `block-on` if same thread ([#862](https://github.com/salsa-rs/salsa/pull/862)) - Skip release-plz jobs on forks ([#873](https://github.com/salsa-rs/salsa/pull/873)) - Unwind with specific type when encountering an unexpected cycle ([#856](https://github.com/salsa-rs/salsa/pull/856)) - Remove jar mentions from book ([#775](https://github.com/salsa-rs/salsa/pull/775)) - Implement an `!Update` bound escape hatch for tracked fn ([#867](https://github.com/salsa-rs/salsa/pull/867)) - Only enable `boxcar/loom` when `loom` feature is enabled ([#869](https://github.com/salsa-rs/salsa/pull/869)) - Remove default `PartialOrd` and `Ord` derives for salsa-structs ([#868](https://github.com/salsa-rs/salsa/pull/868)) - update boxcar ([#865](https://github.com/salsa-rs/salsa/pull/865)) - speed-up cycle-retry logic ([#861](https://github.com/salsa-rs/salsa/pull/861)) - Fix returns(deref | as_ref | as_deref) in tracked methods ([#857](https://github.com/salsa-rs/salsa/pull/857)) - Changed `return_ref` syntax to `returns(as_ref)` and `returns(cloned)` ([#772](https://github.com/salsa-rs/salsa/pull/772)) - Work around a rust-analyzer bug ([#855](https://github.com/salsa-rs/salsa/pull/855)) - Lazy finalization of cycle participants in `maybe_changed_after` ([#854](https://github.com/salsa-rs/salsa/pull/854)) - Do not re-verify already verified memoized value in cycle verification ([#851](https://github.com/salsa-rs/salsa/pull/851)) - Pass cycle heads as out parameter for `maybe_changed_after` ([#852](https://github.com/salsa-rs/salsa/pull/852)) - Move salsa event system into `Zalsa` ([#849](https://github.com/salsa-rs/salsa/pull/849)) - gate loom dependency under feature flag ([#844](https://github.com/salsa-rs/salsa/pull/844)) - Add loom support ([#842](https://github.com/salsa-rs/salsa/pull/842)) - Clean up some unsafety ([#830](https://github.com/salsa-rs/salsa/pull/830)) ## [0.21.1](https://github.com/salsa-rs/salsa/compare/salsa-v0.21.0...salsa-v0.21.1) - 2025-04-30 ### Added - Make `attach` pub ([#832](https://github.com/salsa-rs/salsa/pull/832)) ### Other - better debug name for interned query arguments ([#837](https://github.com/salsa-rs/salsa/pull/837)) - Avoid panic in `Backtrace::capture` if `query_stack` is already borrowed ([#835](https://github.com/salsa-rs/salsa/pull/835)) - Clean up `function::execute` ([#833](https://github.com/salsa-rs/salsa/pull/833)) - Change an `assert!` to `assert_eq!` ([#828](https://github.com/salsa-rs/salsa/pull/828)) ## [0.21.0](https://github.com/salsa-rs/salsa/compare/salsa-v0.20.0...salsa-v0.21.0) - 2025-04-29 ### Fixed - Access to tracked-struct that was freed during fixpoint ([#817](https://github.com/salsa-rs/salsa/pull/817)) - correct debug output for tracked fields ([#826](https://github.com/salsa-rs/salsa/pull/826)) - Fix incorrect `values_equal` signature ([#825](https://github.com/salsa-rs/salsa/pull/825)) - allow unused lifetimes in tracked_struct expansion ([#824](https://github.com/salsa-rs/salsa/pull/824)) ### Other - Implement a query stack `Backtrace` analog ([#827](https://github.com/salsa-rs/salsa/pull/827)) - Simplify ID conversions ([#822](https://github.com/salsa-rs/salsa/pull/822)) - Attempt to fix codspeed ([#823](https://github.com/salsa-rs/salsa/pull/823)) - Remove unnecessary `Array` abstraction ([#821](https://github.com/salsa-rs/salsa/pull/821)) - Add a compile-fail test for a `'static` `!Update` struct ([#820](https://github.com/salsa-rs/salsa/pull/820)) - squelch most clippy warnings in generated code ([#809](https://github.com/salsa-rs/salsa/pull/809)) - Include struct name in formatted input-field index ([#819](https://github.com/salsa-rs/salsa/pull/819)) - Force inline `fetch_hot` ([#818](https://github.com/salsa-rs/salsa/pull/818)) - Per ingredient sync table ([#650](https://github.com/salsa-rs/salsa/pull/650)) - Use `DatabaseKey` for interned events ([#813](https://github.com/salsa-rs/salsa/pull/813)) - [refactor] More `fetch_hot` simplification ([#793](https://github.com/salsa-rs/salsa/pull/793)) - Don't store the fields in the interned map ([#812](https://github.com/salsa-rs/salsa/pull/812)) - Fix ci not always running ([#810](https://github.com/salsa-rs/salsa/pull/810)) ## [0.20.0](https://github.com/salsa-rs/salsa/compare/salsa-v0.19.0...salsa-v0.20.0) - 2025-04-22 ### Added - Drop `Debug` requirements and flip implementation defaults ([#756](https://github.com/salsa-rs/salsa/pull/756)) ### Fixed - Dereferencing freed memos when verifying provisional memos ([#788](https://github.com/salsa-rs/salsa/pull/788)) - `#[doc(hidden)]` `plumbing` module ([#781](https://github.com/salsa-rs/salsa/pull/781)) - Use `changed_at` revision when updating fields ([#778](https://github.com/salsa-rs/salsa/pull/778)) ### Other - Reduce memory usage by deduplicating type information ([#803](https://github.com/salsa-rs/salsa/pull/803)) - Make interned's `last_interned_at` equal `Revision::MAX` if they are interned outside a query ([#804](https://github.com/salsa-rs/salsa/pull/804)) - Add a third cycle mode, equivalent to old Salsa cycle behavior ([#801](https://github.com/salsa-rs/salsa/pull/801)) - Update compact_str from 0.8 to 0.9 ([#794](https://github.com/salsa-rs/salsa/pull/794)) - Implement `Update` for `ThinVec` ([#807](https://github.com/salsa-rs/salsa/pull/807)) - Don't push an unnecessary active query for `deep_verify_memo` ([#806](https://github.com/salsa-rs/salsa/pull/806)) - Inline/Outline more cold and slow paths ([#805](https://github.com/salsa-rs/salsa/pull/805)) - `#[inline]` some things ([#799](https://github.com/salsa-rs/salsa/pull/799)) - Discard unnecessary atomic load ([#780](https://github.com/salsa-rs/salsa/pull/780)) - Print query stack when encountering unexpected cycle ([#796](https://github.com/salsa-rs/salsa/pull/796)) - Remove incorrect `parallel_scope` API ([#797](https://github.com/salsa-rs/salsa/pull/797)) - [refactor] Simplify `fetch_hot` ([#792](https://github.com/salsa-rs/salsa/pull/792)) - [refactor] Reuse the same stack for all cycles heads in `validate_same_iteration` ([#791](https://github.com/salsa-rs/salsa/pull/791)) - add WillIterateCycle event ([#790](https://github.com/salsa-rs/salsa/pull/790)) - [fix] Use `validate_maybe_provisional` instead of `validate_provisional` ([#789](https://github.com/salsa-rs/salsa/pull/789)) - Use `ThinVec` for `CycleHeads` ([#787](https://github.com/salsa-rs/salsa/pull/787)) - Keep edge condvar on stack instead of allocating it in an `Arc` ([#773](https://github.com/salsa-rs/salsa/pull/773)) - allow reuse of cached provisional memos within the same cycle iteration ([#786](https://github.com/salsa-rs/salsa/pull/786)) - Implement `Lookup`/`HashEqLike` for `Arc` ([#784](https://github.com/salsa-rs/salsa/pull/784)) - Normalize imports style ([#779](https://github.com/salsa-rs/salsa/pull/779)) - Clean up `par_map` a bit ([#742](https://github.com/salsa-rs/salsa/pull/742)) - Fix typo in comment ([#777](https://github.com/salsa-rs/salsa/pull/777)) - Document most safety blocks ([#776](https://github.com/salsa-rs/salsa/pull/776)) - Use html directory for mdbook artifact ([#774](https://github.com/salsa-rs/salsa/pull/774)) - Move `verified_final` from `Memo` into `QueryRevisions` ([#769](https://github.com/salsa-rs/salsa/pull/769)) - Use `ThinVec` for `MemoTable`, halving its size ([#770](https://github.com/salsa-rs/salsa/pull/770)) - Remove unnecessary query stack acess in `block_on` ([#771](https://github.com/salsa-rs/salsa/pull/771)) - Replace memo queue with append-only vector ([#767](https://github.com/salsa-rs/salsa/pull/767)) - update boxcar ([#696](https://github.com/salsa-rs/salsa/pull/696)) - Remove extra page indirection in `Table` ([#710](https://github.com/salsa-rs/salsa/pull/710)) - update release steps ([#705](https://github.com/salsa-rs/salsa/pull/705)) - Remove some unnecessary panicking paths in cycle execution ([#765](https://github.com/salsa-rs/salsa/pull/765)) - *(perf)* Pool `ActiveQuerys` in the query stack ([#629](https://github.com/salsa-rs/salsa/pull/629)) - Resolve unwind safety fixme ([#761](https://github.com/salsa-rs/salsa/pull/761)) - Enable Garbage Collection for Interned Values ([#602](https://github.com/salsa-rs/salsa/pull/602)) - bug [salsa-macros]: Improve debug name of tracked methods ([#755](https://github.com/salsa-rs/salsa/pull/755)) - Remove dead code ([#764](https://github.com/salsa-rs/salsa/pull/764)) - Reduce unnecessary conditional work in `deep_verify_memo` ([#759](https://github.com/salsa-rs/salsa/pull/759)) - Use a `Vec` for `CycleHeads` ([#760](https://github.com/salsa-rs/salsa/pull/760)) - Use nextest for miri test runs ([#758](https://github.com/salsa-rs/salsa/pull/758)) - Pin `half` version to prevent CI failure ([#757](https://github.com/salsa-rs/salsa/pull/757)) - rewrite cycle handling to support fixed-point iteration ([#603](https://github.com/salsa-rs/salsa/pull/603)) ## [0.19.0](https://github.com/salsa-rs/salsa/compare/salsa-v0.18.0...salsa-v0.19.0) - 2025-03-10 ### Fixed - fix typo - fix enums bug ### Other - Have salsa not depend on salsa-macros ([#750](https://github.com/salsa-rs/salsa/pull/750)) - Group versions of packages together for releases ([#751](https://github.com/salsa-rs/salsa/pull/751)) - use `portable-atomic` in `IngredientCache` to compile on `powerpc-unknown-linux-gnu` ([#749](https://github.com/salsa-rs/salsa/pull/749)) - Store view downcaster in function ingredients directly ([#720](https://github.com/salsa-rs/salsa/pull/720)) - Some small perf things ([#744](https://github.com/salsa-rs/salsa/pull/744)) - :replace instead of std::mem::replace ([#746](https://github.com/salsa-rs/salsa/pull/746)) - Cleanup `Cargo.toml`s ([#745](https://github.com/salsa-rs/salsa/pull/745)) - Drop clone requirement for accumulated values - implement `Update` trait for `IndexMap`, and `IndexSet` - more correct bounds on `Send` and `Sync` implementation `DeletedEntries` - replace `arc-swap` with manual `AtomicPtr` - Remove unnecessary `current_revision` call from `setup_interned_struct` - Merge pull request #731 from Veykril/veykril/push-nzkwqzxxkxou - Remove some dynamically dispatched `Database::event` calls - Lazy fetching - Add small supertype input benchmark - Replace a `DashMap` with `RwLock` as writing is rare for it - address review comments - Skip memo ingredient index mapping for non enum tracked functions - Trade off a bit of memory for more speed in `MemoIngredientIndices` - Introduce Salsa enums - Cancel duplicate test workflow runs - implement `Update` trait for `hashbrown::HashMap` - Move `unwind_if_revision_cancelled` from `ZalsaLocal` to `Zalsa` - Don't clone strings in benchmarks - Merge pull request #714 from Veykril/veykril/push-synxntlkqqsq - Merge pull request #711 from Veykril/veykril/push-stmmwmtprovt - Merge pull request #715 from Veykril/veykril/push-plwpsqknwulq - Enforce `unsafe_op_in_unsafe_fn` - Remove some `ZalsaDatabase::zalsa` calls - Remove outdated FIXME - Replace `IngredientCache` lock with atomic primitive - Reduce method delegation duplication - Automatically clear the cancellation flag when cancellation completes - Allow trigger LRU eviction without increasing the current revision - Simplify `Ingredient::reset_for_new_revision` setup - Require mut Zalsa access for setting the lru limit - Split off revision bumping from `zalsa_mut` access - Update `hashbrown` (0.15) and `hashlink` (0.10) salsa-0.23.0/Cargo.lock0000644000001374110000000000100102030ustar # This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 4 [[package]] name = "aho-corasick" version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ "memchr", ] [[package]] name = "allocator-api2" version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "anes" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "annotate-snippets" version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "710e8eae58854cdc1790fcb56cca04d712a17be849eeb81da2a724bf4bae2bc4" dependencies = [ "anstyle", "unicode-width", ] [[package]] name = "anstream" version = "0.6.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "301af1932e46185686725e0fad2f8f2aa7da69dd70bf6ecc44d6b703844a3933" dependencies = [ "anstyle", "anstyle-parse", "anstyle-query", "anstyle-wincon", "colorchoice", "is_terminal_polyfill", "utf8parse", ] [[package]] name = "anstyle" version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "862ed96ca487e809f1c8e5a8447f6ee2cf102f846893800b20cebdf541fc6bbd" [[package]] name = "anstyle-parse" version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4e7644824f0aa2c7b9384579234ef10eb7efb6a0deb83f9630a49594dd9c15c2" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c8bdeb6047d8983be085bab0ba1472e6dc604e7041dbf6fcd5e71523014fae9" dependencies = [ "windows-sys 0.59.0", ] [[package]] name = "anstyle-wincon" version = "3.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "403f75924867bb1033c59fbf0797484329750cfbe3c4325cd33127941fabc882" dependencies = [ "anstyle", "once_cell_polyfill", "windows-sys 0.59.0", ] [[package]] name = "assoc" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfdc70193dadb9d7287fa4b633f15f90c876915b31f6af17da307fc59c9859a8" [[package]] name = "autocfg" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "bitflags" version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" [[package]] name = "bitvec" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" dependencies = [ "funty", "radium", "tap", "wyz", ] [[package]] name = "boxcar" version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26c4925bc979b677330a8c7fe7a8c94af2dbb4a2d37b4a20a80d884400f46baa" [[package]] name = "bumpalo" version = "3.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" [[package]] name = "cast" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "castaway" version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0abae9be0aaf9ea96a3b1b8b1b55c602ca751eba1b1500220cea4ecbafe7c0d5" dependencies = [ "rustversion", ] [[package]] name = "cc" version = "1.2.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d487aa071b5f64da6f19a3e848e3578944b726ee5a4854b82172f02aa876bfdc" dependencies = [ "shlex", ] [[package]] name = "cfg-if" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" [[package]] name = "ciborium" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" dependencies = [ "ciborium-io", "ciborium-ll", "serde", ] [[package]] name = "ciborium-io" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" [[package]] name = "ciborium-ll" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" dependencies = [ "ciborium-io", "half", ] [[package]] name = "clap" version = "4.5.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "40b6887a1d8685cebccf115538db5c0efe625ccac9696ad45c409d96566e910f" dependencies = [ "clap_builder", ] [[package]] name = "clap_builder" version = "4.5.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e0c66c08ce9f0c698cbce5c0279d0bb6ac936d8674174fe48f736533b964f59e" dependencies = [ "anstyle", "clap_lex", ] [[package]] name = "clap_lex" version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675" [[package]] name = "codspeed" version = "2.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93f4cce9c27c49c4f101fffeebb1826f41a9df2e7498b7cd4d95c0658b796c6c" dependencies = [ "colored", "libc", "serde", "serde_json", "uuid", ] [[package]] name = "codspeed-criterion-compat" version = "2.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3c23d880a28a2aab52d38ca8481dd7a3187157d0a952196b6db1db3c8499725" dependencies = [ "codspeed", "codspeed-criterion-compat-walltime", "colored", ] [[package]] name = "codspeed-criterion-compat-walltime" version = "2.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b0a2f7365e347f4f22a67e9ea689bf7bc89900a354e22e26cf8a531a42c8fbb" dependencies = [ "anes", "cast", "ciborium", "clap", "codspeed", "criterion-plot", "is-terminal", "itertools", "num-traits", "once_cell", "oorandom", "regex", "serde", "serde_derive", "serde_json", "tinytemplate", "walkdir", ] [[package]] name = "colorchoice" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" [[package]] name = "colored" version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "117725a109d387c937a1533ce01b450cbde6b88abceea8473c4d7a85853cda3c" dependencies = [ "lazy_static", "windows-sys 0.59.0", ] [[package]] name = "compact_str" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3fdb1325a1cece981e8a296ab8f0f9b63ae357bd0784a9faaf548cc7b480707a" dependencies = [ "castaway", "cfg-if", "itoa", "rustversion", "ryu", "static_assertions", ] [[package]] name = "criterion-plot" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" dependencies = [ "cast", "itertools", ] [[package]] name = "crossbeam-channel" version = "0.5.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2" dependencies = [ "crossbeam-utils", ] [[package]] name = "crossbeam-deque" version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" dependencies = [ "crossbeam-epoch", "crossbeam-utils", ] [[package]] name = "crossbeam-epoch" version = "0.9.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" dependencies = [ "crossbeam-utils", ] [[package]] name = "crossbeam-queue" version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f58bbc28f91df819d0aa2a2c00cd19754769c2fad90579b3592b1c9ba7a3115" dependencies = [ "crossbeam-utils", ] [[package]] name = "crossbeam-utils" version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] name = "crunchy" version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" [[package]] name = "dashmap" version = "6.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf" dependencies = [ "cfg-if", "crossbeam-utils", "hashbrown 0.14.5", "lock_api", "once_cell", "parking_lot_core", ] [[package]] name = "dissimilar" version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8975ffdaa0ef3661bfe02dbdcc06c9f829dfafe6a3c474de366a8d5e44276921" [[package]] name = "either" version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" [[package]] name = "env_filter" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "186e05a59d4c50738528153b83b0b0194d3a29507dfec16eccd4b342903397d0" dependencies = [ "log", ] [[package]] name = "env_logger" version = "0.11.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13c863f0904021b108aa8b2f55046443e6b1ebde8fd4a15c399893aae4fa069f" dependencies = [ "anstream", "anstyle", "env_filter", "log", ] [[package]] name = "equivalent" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "expect-test" version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "63af43ff4431e848fb47472a920f14fa71c24de13255a5692e93d4e90302acb0" dependencies = [ "dissimilar", "once_cell", ] [[package]] name = "eyre" version = "0.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7cd915d99f24784cdc19fd37ef22b97e3ff0ae756c7e492e9fbfe897d61e2aec" dependencies = [ "indenter", "once_cell", ] [[package]] name = "filetime" version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35c0522e981e68cbfa8c3f978441a5f34b30b96e146b33cd3359176b50fe8586" dependencies = [ "cfg-if", "libc", "libredox", "windows-sys 0.59.0", ] [[package]] name = "foldhash" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" [[package]] name = "fsevent-sys" version = "4.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76ee7a02da4d231650c7cea31349b889be2f45ddb3ef3032d2ec8185f6313fd2" dependencies = [ "libc", ] [[package]] name = "funty" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "generator" version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d18470a76cb7f8ff746cf1f7470914f900252ec36bbc40b569d74b1258446827" dependencies = [ "cc", "cfg-if", "libc", "log", "rustversion", "windows", ] [[package]] name = "getrandom" version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" dependencies = [ "cfg-if", "libc", "wasi 0.11.1+wasi-snapshot-preview1", ] [[package]] name = "getrandom" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" dependencies = [ "cfg-if", "libc", "r-efi", "wasi 0.14.2+wasi-0.2.4", ] [[package]] name = "glob" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" [[package]] name = "half" version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "459196ed295495a68f7d7fe1d84f6c4b7ff0e21fe3017b2f283c6fac3ad803c9" dependencies = [ "cfg-if", "crunchy", ] [[package]] name = "hashbrown" version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" [[package]] name = "hashbrown" version = "0.15.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5971ac85611da7067dbfcabef3c70ebb5606018acd9e2a3903a0da507521e0d5" dependencies = [ "allocator-api2", "equivalent", "foldhash", ] [[package]] name = "hashlink" version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1" dependencies = [ "hashbrown 0.15.4", ] [[package]] name = "hermit-abi" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" [[package]] name = "hex" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "indenter" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" [[package]] name = "indexmap" version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fe4cd85333e22411419a0bcae1297d25e58c9443848b11dc6a86fefe8c78a661" dependencies = [ "equivalent", "hashbrown 0.15.4", ] [[package]] name = "inotify" version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8069d3ec154eb856955c1c0fbffefbf5f3c40a104ec912d4797314c1801abff" dependencies = [ "bitflags 1.3.2", "inotify-sys", "libc", ] [[package]] name = "inotify-sys" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e05c02b5e89bff3b946cedeca278abc628fe811e604f027c45a8aa3cf793d0eb" dependencies = [ "libc", ] [[package]] name = "intrusive-collections" version = "0.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "189d0897e4cbe8c75efedf3502c18c887b05046e59d28404d4d8e46cbc4d1e86" dependencies = [ "memoffset", ] [[package]] name = "is-terminal" version = "0.4.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9" dependencies = [ "hermit-abi", "libc", "windows-sys 0.59.0", ] [[package]] name = "is_terminal_polyfill" version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" [[package]] name = "itertools" version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" dependencies = [ "either", ] [[package]] name = "itoa" version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" [[package]] name = "js-sys" version = "0.3.77" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" dependencies = [ "once_cell", "wasm-bindgen", ] [[package]] name = "kqueue" version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eac30106d7dce88daf4a3fcb4879ea939476d5074a9b7ddd0fb97fa4bed5596a" dependencies = [ "kqueue-sys", "libc", ] [[package]] name = "kqueue-sys" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed9625ffda8729b85e45cf04090035ac368927b8cebc34898e7c120f52e4838b" dependencies = [ "bitflags 1.3.2", "libc", ] [[package]] name = "lazy_static" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" version = "0.2.174" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" [[package]] name = "libredox" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1580801010e535496706ba011c15f8532df6b42297d2e471fec38ceadd8c0638" dependencies = [ "bitflags 2.9.1", "libc", "redox_syscall", ] [[package]] name = "lock_api" version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96936507f153605bddfcda068dd804796c84324ed2510809e5b2a624c81da765" dependencies = [ "autocfg", "scopeguard", ] [[package]] name = "log" version = "0.4.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" [[package]] name = "matchers" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" dependencies = [ "regex-automata 0.1.10", ] [[package]] name = "memchr" version = "2.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0" [[package]] name = "memoffset" version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a" dependencies = [ "autocfg", ] [[package]] name = "mio" version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", "log", "wasi 0.11.1+wasi-snapshot-preview1", "windows-sys 0.48.0", ] [[package]] name = "notify" version = "6.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6205bd8bb1e454ad2e27422015fb5e4f2bcc7e08fa8f27058670d208324a4d2d" dependencies = [ "bitflags 2.9.1", "crossbeam-channel", "filetime", "fsevent-sys", "inotify", "kqueue", "libc", "log", "mio", "walkdir", "windows-sys 0.48.0", ] [[package]] name = "notify-debouncer-mini" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d40b221972a1fc5ef4d858a2f671fb34c75983eb385463dff3780eeff6a9d43" dependencies = [ "crossbeam-channel", "log", "notify", ] [[package]] name = "nu-ansi-term" version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" dependencies = [ "overload", "winapi", ] [[package]] name = "num-traits" version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", ] [[package]] name = "once_cell" version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" [[package]] name = "once_cell_polyfill" version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad" [[package]] name = "oorandom" version = "11.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" [[package]] name = "ordered-float" version = "4.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7bb71e1b3fa6ca1c61f383464aaf2bb0e2f8e772a1f01d486832464de363b951" dependencies = [ "num-traits", ] [[package]] name = "overload" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" [[package]] name = "owo-colors" version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c1b04fb49957986fdce4d6ee7a65027d55d4b6d2265e5848bbb507b58ccfdb6f" [[package]] name = "papaya" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af228bb1296c9b044ee75e2a2325409c2d899bcfcc6150e5e41f148e0a87dd20" dependencies = [ "equivalent", "seize", ] [[package]] name = "parking_lot" version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70d58bf43669b5795d1576d0641cfb6fbb2057bf629506267a92807158584a13" dependencies = [ "lock_api", "parking_lot_core", ] [[package]] name = "parking_lot_core" version = "0.9.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc838d2a56b5b1a6c25f55575dfc605fabb63bb2365f6c2353ef9159aa69e4a5" dependencies = [ "cfg-if", "libc", "redox_syscall", "smallvec", "windows-targets 0.52.6", ] [[package]] name = "pin-project-lite" version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" [[package]] name = "portable-atomic" version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" [[package]] name = "ppv-lite86" version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" dependencies = [ "zerocopy", ] [[package]] name = "proc-macro2" version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" dependencies = [ "unicode-ident", ] [[package]] name = "quote" version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" dependencies = [ "proc-macro2", ] [[package]] name = "r-efi" version = "5.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" [[package]] name = "radium" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" [[package]] name = "rand" version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", "rand_chacha", "rand_core", ] [[package]] name = "rand_chacha" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", "rand_core", ] [[package]] name = "rand_core" version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ "getrandom 0.2.16", ] [[package]] name = "rand_pcg" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59cad018caf63deb318e5a4586d99a24424a364f40f1e5778c29aca23f4fc73e" dependencies = [ "rand_core", ] [[package]] name = "rayon" version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" dependencies = [ "either", "rayon-core", ] [[package]] name = "rayon-core" version = "1.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" dependencies = [ "crossbeam-deque", "crossbeam-utils", ] [[package]] name = "redox_syscall" version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d04b7d0ee6b4a0207a0a7adb104d23ecb0b47d6beae7152d0fa34b692b29fd6" dependencies = [ "bitflags 2.9.1", ] [[package]] name = "regex" version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", "regex-automata 0.4.9", "regex-syntax 0.8.5", ] [[package]] name = "regex-automata" version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" dependencies = [ "regex-syntax 0.6.29", ] [[package]] name = "regex-automata" version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" dependencies = [ "aho-corasick", "memchr", "regex-syntax 0.8.5", ] [[package]] name = "regex-syntax" version = "0.6.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "rustc-hash" version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" [[package]] name = "rustversion" version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d" [[package]] name = "ryu" version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" [[package]] name = "salsa" version = "0.23.0" dependencies = [ "annotate-snippets", "boxcar", "codspeed-criterion-compat", "compact_str", "crossbeam-channel", "crossbeam-queue", "crossbeam-utils", "dashmap", "expect-test", "eyre", "hashbrown 0.15.4", "hashlink", "indexmap", "intrusive-collections", "notify-debouncer-mini", "ordered-float", "papaya", "parking_lot", "portable-atomic", "rayon", "rustc-hash", "rustversion", "salsa-macro-rules", "salsa-macros", "shuttle", "smallvec", "test-log", "thin-vec", "tikv-jemallocator", "tracing", "trybuild", ] [[package]] name = "salsa-macro-rules" version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2edb86a7e9c91f6d30c9ce054312721dbe773a162db27bbfae834d16177b30ce" [[package]] name = "salsa-macros" version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0778d6e209051bc4e75acfe83bcd7848601ec3dbe9c3dbb982829020e9128af" dependencies = [ "proc-macro2", "quote", "syn", "synstructure", ] [[package]] name = "same-file" version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" dependencies = [ "winapi-util", ] [[package]] name = "scoped-tls" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" [[package]] name = "scopeguard" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "seize" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e4b8d813387d566f627f3ea1b914c068aac94c40ae27ec43f5f33bde65abefe7" dependencies = [ "libc", "windows-sys 0.52.0", ] [[package]] name = "serde" version = "1.0.219" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" version = "1.0.219" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "serde_json" version = "1.0.140" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" dependencies = [ "itoa", "memchr", "ryu", "serde", ] [[package]] name = "serde_spanned" version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3" dependencies = [ "serde", ] [[package]] name = "sharded-slab" version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" dependencies = [ "lazy_static", ] [[package]] name = "shlex" version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "shuttle" version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2ab17edba38d63047f46780cf7360acf7467fec2c048928689a5c1dd1c2b4e31" dependencies = [ "assoc", "bitvec", "cfg-if", "generator", "hex", "owo-colors", "rand", "rand_core", "rand_pcg", "scoped-tls", "smallvec", "tracing", ] [[package]] name = "smallvec" version = "1.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" [[package]] name = "static_assertions" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" [[package]] name = "syn" version = "2.0.104" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17b6f705963418cdb9927482fa304bc562ece2fdd4f616084c50b7023b435a40" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "synstructure" version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "tap" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "target-triple" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ac9aa371f599d22256307c24a9d748c041e548cbf599f35d890f9d365361790" [[package]] name = "termcolor" version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" dependencies = [ "winapi-util", ] [[package]] name = "test-log" version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7f46083d221181166e5b6f6b1e5f1d499f3a76888826e6cb1d057554157cd0f" dependencies = [ "env_logger", "test-log-macros", "tracing-subscriber", ] [[package]] name = "test-log-macros" version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "888d0c3c6db53c0fdab160d2ed5e12ba745383d3e85813f2ea0f2b1475ab553f" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "thin-vec" version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "144f754d318415ac792f9d69fc87abbbfc043ce2ef041c60f16ad828f638717d" [[package]] name = "thread_local" version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" dependencies = [ "cfg-if", ] [[package]] name = "tikv-jemalloc-sys" version = "0.6.0+5.3.0-1-ge13ca993e8ccb9ba9847cc330696e02839f328f7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd3c60906412afa9c2b5b5a48ca6a5abe5736aec9eb48ad05037a677e52e4e2d" dependencies = [ "cc", "libc", ] [[package]] name = "tikv-jemallocator" version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4cec5ff18518d81584f477e9bfdf957f5bb0979b0bac3af4ca30b5b3ae2d2865" dependencies = [ "libc", "tikv-jemalloc-sys", ] [[package]] name = "tinytemplate" version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" dependencies = [ "serde", "serde_json", ] [[package]] name = "toml" version = "0.8.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" dependencies = [ "serde", "serde_spanned", "toml_datetime", "toml_edit", ] [[package]] name = "toml_datetime" version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" dependencies = [ "serde", ] [[package]] name = "toml_edit" version = "0.22.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ "indexmap", "serde", "serde_spanned", "toml_datetime", "toml_write", "winnow", ] [[package]] name = "toml_write" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" [[package]] name = "tracing" version = "0.1.41" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" dependencies = [ "pin-project-lite", "tracing-core", ] [[package]] name = "tracing-core" version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" dependencies = [ "once_cell", "valuable", ] [[package]] name = "tracing-log" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" dependencies = [ "log", "once_cell", "tracing-core", ] [[package]] name = "tracing-subscriber" version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" dependencies = [ "matchers", "nu-ansi-term", "once_cell", "regex", "sharded-slab", "thread_local", "tracing", "tracing-core", "tracing-log", ] [[package]] name = "trybuild" version = "1.0.105" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1c9bf9513a2f4aeef5fdac8677d7d349c79fdbcc03b9c86da6e9d254f1e43be2" dependencies = [ "glob", "serde", "serde_derive", "serde_json", "target-triple", "termcolor", "toml", ] [[package]] name = "unicode-ident" version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" [[package]] name = "unicode-width" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a1a07cc7db3810833284e8d372ccdc6da29741639ecc70c9ec107df0fa6154c" [[package]] name = "utf8parse" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3cf4199d1e5d15ddd86a694e4d0dffa9c323ce759fea589f00fef9d81cc1931d" dependencies = [ "getrandom 0.3.3", "js-sys", "wasm-bindgen", ] [[package]] name = "valuable" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" [[package]] name = "walkdir" version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" dependencies = [ "same-file", "winapi-util", ] [[package]] name = "wasi" version = "0.11.1+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] name = "wasi" version = "0.14.2+wasi-0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" dependencies = [ "wit-bindgen-rt", ] [[package]] name = "wasm-bindgen" version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" dependencies = [ "cfg-if", "once_cell", "rustversion", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" dependencies = [ "bumpalo", "log", "proc-macro2", "quote", "syn", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-macro" version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" dependencies = [ "quote", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", "syn", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" dependencies = [ "unicode-ident", ] [[package]] name = "winapi" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" dependencies = [ "winapi-i686-pc-windows-gnu", "winapi-x86_64-pc-windows-gnu", ] [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ "windows-sys 0.59.0", ] [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows" version = "0.61.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9babd3a767a4c1aef6900409f85f5d53ce2544ccdfaa86dad48c91782c6d6893" dependencies = [ "windows-collections", "windows-core", "windows-future", "windows-link", "windows-numerics", ] [[package]] name = "windows-collections" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3beeceb5e5cfd9eb1d76b381630e82c4241ccd0d27f1a39ed41b2760b255c5e8" dependencies = [ "windows-core", ] [[package]] name = "windows-core" version = "0.61.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" dependencies = [ "windows-implement", "windows-interface", "windows-link", "windows-result", "windows-strings", ] [[package]] name = "windows-future" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc6a41e98427b19fe4b73c550f060b59fa592d7d686537eebf9385621bfbad8e" dependencies = [ "windows-core", "windows-link", "windows-threading", ] [[package]] name = "windows-implement" version = "0.60.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "windows-interface" version = "0.59.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "windows-link" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" [[package]] name = "windows-numerics" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9150af68066c4c5c07ddc0ce30421554771e528bde427614c61038bc2c92c2b1" dependencies = [ "windows-core", "windows-link", ] [[package]] name = "windows-result" version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" dependencies = [ "windows-link", ] [[package]] name = "windows-strings" version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" dependencies = [ "windows-link", ] [[package]] name = "windows-sys" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ "windows-targets 0.48.5", ] [[package]] name = "windows-sys" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ "windows-targets 0.52.6", ] [[package]] name = "windows-sys" version = "0.59.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" dependencies = [ "windows-targets 0.52.6", ] [[package]] name = "windows-targets" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" dependencies = [ "windows_aarch64_gnullvm 0.48.5", "windows_aarch64_msvc 0.48.5", "windows_i686_gnu 0.48.5", "windows_i686_msvc 0.48.5", "windows_x86_64_gnu 0.48.5", "windows_x86_64_gnullvm 0.48.5", "windows_x86_64_msvc 0.48.5", ] [[package]] name = "windows-targets" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ "windows_aarch64_gnullvm 0.52.6", "windows_aarch64_msvc 0.52.6", "windows_i686_gnu 0.52.6", "windows_i686_gnullvm", "windows_i686_msvc 0.52.6", "windows_x86_64_gnu 0.52.6", "windows_x86_64_gnullvm 0.52.6", "windows_x86_64_msvc 0.52.6", ] [[package]] name = "windows-threading" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b66463ad2e0ea3bbf808b7f1d371311c80e115c0b71d60efc142cafbcfb057a6" dependencies = [ "windows-link", ] [[package]] name = "windows_aarch64_gnullvm" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_i686_gnu" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" [[package]] name = "windows_i686_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_x86_64_gnu" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnullvm" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" version = "0.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74c7b26e3480b707944fc872477815d29a8e429d2f93a1ce000f5fa84a15cbcd" dependencies = [ "memchr", ] [[package]] name = "wit-bindgen-rt" version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" dependencies = [ "bitflags 2.9.1", ] [[package]] name = "wyz" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" dependencies = [ "tap", ] [[package]] name = "zerocopy" version = "0.8.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1039dd0d3c310cf05de012d8a39ff557cb0d23087fd44cad61df08fc31907a2f" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" version = "0.8.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181" dependencies = [ "proc-macro2", "quote", "syn", ] salsa-0.23.0/Cargo.toml0000644000000267200000000000100102260ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" rust-version = "1.85" name = "salsa" version = "0.23.0" authors = ["Salsa developers"] build = false autolib = false autobins = false autoexamples = false autotests = false autobenches = false description = "A generic framework for on-demand, incrementalized computation (experimental)" readme = "README.md" license = "Apache-2.0 OR MIT" repository = "https://github.com/salsa-rs/salsa" [features] default = [ "salsa_unstable", "rayon", "macros", ] macros = ["dep:salsa-macros"] salsa_unstable = [] shuttle = ["dep:shuttle"] [lib] name = "salsa" path = "src/lib.rs" [[example]] name = "calc" path = "examples/calc/main.rs" [[example]] name = "lazy-input" path = "examples/lazy-input/main.rs" [[test]] name = "accumulate" path = "tests/accumulate.rs" [[test]] name = "accumulate-chain" path = "tests/accumulate-chain.rs" [[test]] name = "accumulate-custom-debug" path = "tests/accumulate-custom-debug.rs" [[test]] name = "accumulate-dag" path = "tests/accumulate-dag.rs" [[test]] name = "accumulate-execution-order" path = "tests/accumulate-execution-order.rs" [[test]] name = "accumulate-from-tracked-fn" path = "tests/accumulate-from-tracked-fn.rs" [[test]] name = "accumulate-no-duplicates" path = "tests/accumulate-no-duplicates.rs" [[test]] name = "accumulate-reuse" path = "tests/accumulate-reuse.rs" [[test]] name = "accumulate-reuse-workaround" path = "tests/accumulate-reuse-workaround.rs" [[test]] name = "accumulated_backdate" path = "tests/accumulated_backdate.rs" [[test]] name = "backtrace" path = "tests/backtrace.rs" [[test]] name = "check_auto_traits" path = "tests/check_auto_traits.rs" [[test]] name = "compile_fail" path = "tests/compile_fail.rs" [[test]] name = "cycle" path = "tests/cycle.rs" [[test]] name = "cycle_accumulate" path = "tests/cycle_accumulate.rs" [[test]] name = "cycle_fallback_immediate" path = "tests/cycle_fallback_immediate.rs" [[test]] name = "cycle_initial_call_back_into_cycle" path = "tests/cycle_initial_call_back_into_cycle.rs" [[test]] name = "cycle_initial_call_query" path = "tests/cycle_initial_call_query.rs" [[test]] name = "cycle_maybe_changed_after" path = "tests/cycle_maybe_changed_after.rs" [[test]] name = "cycle_output" path = "tests/cycle_output.rs" [[test]] name = "cycle_recovery_call_back_into_cycle" path = "tests/cycle_recovery_call_back_into_cycle.rs" [[test]] name = "cycle_recovery_call_query" path = "tests/cycle_recovery_call_query.rs" [[test]] name = "cycle_regression_455" path = "tests/cycle_regression_455.rs" [[test]] name = "cycle_result_dependencies" path = "tests/cycle_result_dependencies.rs" [[test]] name = "cycle_tracked" path = "tests/cycle_tracked.rs" [[test]] name = "cycle_tracked_own_input" path = "tests/cycle_tracked_own_input.rs" [[test]] name = "dataflow" path = "tests/dataflow.rs" [[test]] name = "debug" path = "tests/debug.rs" [[test]] name = "debug_db_contents" path = "tests/debug_db_contents.rs" [[test]] name = "deletion" path = "tests/deletion.rs" [[test]] name = "deletion-cascade" path = "tests/deletion-cascade.rs" [[test]] name = "deletion-drops" path = "tests/deletion-drops.rs" [[test]] name = "derive_update" path = "tests/derive_update.rs" [[test]] name = "durability" path = "tests/durability.rs" [[test]] name = "elided-lifetime-in-tracked-fn" path = "tests/elided-lifetime-in-tracked-fn.rs" [[test]] name = "expect_reuse_field_x_of_a_tracked_struct_changes_but_fn_depends_on_field_y" path = "tests/expect_reuse_field_x_of_a_tracked_struct_changes_but_fn_depends_on_field_y.rs" [[test]] name = "expect_reuse_field_x_of_an_input_changes_but_fn_depends_on_field_y" path = "tests/expect_reuse_field_x_of_an_input_changes_but_fn_depends_on_field_y.rs" [[test]] name = "hash_collision" path = "tests/hash_collision.rs" [[test]] name = "hello_world" path = "tests/hello_world.rs" [[test]] name = "input_default" path = "tests/input_default.rs" [[test]] name = "input_field_durability" path = "tests/input_field_durability.rs" [[test]] name = "input_setter_preserves_durability" path = "tests/input_setter_preserves_durability.rs" [[test]] name = "intern_access_in_different_revision" path = "tests/intern_access_in_different_revision.rs" [[test]] name = "interned-revisions" path = "tests/interned-revisions.rs" [[test]] name = "interned-structs" path = "tests/interned-structs.rs" [[test]] name = "interned-structs_self_ref" path = "tests/interned-structs_self_ref.rs" [[test]] name = "lru" path = "tests/lru.rs" [[test]] name = "memory-usage" path = "tests/memory-usage.rs" [[test]] name = "mutate_in_place" path = "tests/mutate_in_place.rs" [[test]] name = "override_new_get_set" path = "tests/override_new_get_set.rs" [[test]] name = "panic-when-creating-tracked-struct-outside-of-tracked-fn" path = "tests/panic-when-creating-tracked-struct-outside-of-tracked-fn.rs" [[test]] name = "parallel" path = "tests/parallel/main.rs" [[test]] name = "preverify-struct-with-leaked-data" path = "tests/preverify-struct-with-leaked-data.rs" [[test]] name = "preverify-struct-with-leaked-data-2" path = "tests/preverify-struct-with-leaked-data-2.rs" [[test]] name = "return_mode" path = "tests/return_mode.rs" [[test]] name = "singleton" path = "tests/singleton.rs" [[test]] name = "specify-only-works-if-the-key-is-created-in-the-current-query" path = "tests/specify-only-works-if-the-key-is-created-in-the-current-query.rs" [[test]] name = "synthetic_write" path = "tests/synthetic_write.rs" [[test]] name = "tracked-struct-id-field-bad-eq" path = "tests/tracked-struct-id-field-bad-eq.rs" [[test]] name = "tracked-struct-id-field-bad-hash" path = "tests/tracked-struct-id-field-bad-hash.rs" [[test]] name = "tracked-struct-unchanged-in-new-rev" path = "tests/tracked-struct-unchanged-in-new-rev.rs" [[test]] name = "tracked-struct-value-field-bad-eq" path = "tests/tracked-struct-value-field-bad-eq.rs" [[test]] name = "tracked-struct-value-field-not-eq" path = "tests/tracked-struct-value-field-not-eq.rs" [[test]] name = "tracked_assoc_fn" path = "tests/tracked_assoc_fn.rs" [[test]] name = "tracked_fn_constant" path = "tests/tracked_fn_constant.rs" [[test]] name = "tracked_fn_high_durability_dependency" path = "tests/tracked_fn_high_durability_dependency.rs" [[test]] name = "tracked_fn_interned_lifetime" path = "tests/tracked_fn_interned_lifetime.rs" [[test]] name = "tracked_fn_multiple_args" path = "tests/tracked_fn_multiple_args.rs" [[test]] name = "tracked_fn_no_eq" path = "tests/tracked_fn_no_eq.rs" [[test]] name = "tracked_fn_on_input" path = "tests/tracked_fn_on_input.rs" [[test]] name = "tracked_fn_on_input_with_high_durability" path = "tests/tracked_fn_on_input_with_high_durability.rs" [[test]] name = "tracked_fn_on_interned" path = "tests/tracked_fn_on_interned.rs" [[test]] name = "tracked_fn_on_interned_enum" path = "tests/tracked_fn_on_interned_enum.rs" [[test]] name = "tracked_fn_on_tracked" path = "tests/tracked_fn_on_tracked.rs" [[test]] name = "tracked_fn_on_tracked_specify" path = "tests/tracked_fn_on_tracked_specify.rs" [[test]] name = "tracked_fn_orphan_escape_hatch" path = "tests/tracked_fn_orphan_escape_hatch.rs" [[test]] name = "tracked_fn_read_own_entity" path = "tests/tracked_fn_read_own_entity.rs" [[test]] name = "tracked_fn_read_own_specify" path = "tests/tracked_fn_read_own_specify.rs" [[test]] name = "tracked_fn_return_ref" path = "tests/tracked_fn_return_ref.rs" [[test]] name = "tracked_method" path = "tests/tracked_method.rs" [[test]] name = "tracked_method_inherent_return_deref" path = "tests/tracked_method_inherent_return_deref.rs" [[test]] name = "tracked_method_inherent_return_ref" path = "tests/tracked_method_inherent_return_ref.rs" [[test]] name = "tracked_method_on_tracked_struct" path = "tests/tracked_method_on_tracked_struct.rs" [[test]] name = "tracked_method_trait_return_ref" path = "tests/tracked_method_trait_return_ref.rs" [[test]] name = "tracked_method_with_self_ty" path = "tests/tracked_method_with_self_ty.rs" [[test]] name = "tracked_struct" path = "tests/tracked_struct.rs" [[test]] name = "tracked_struct_db1_lt" path = "tests/tracked_struct_db1_lt.rs" [[test]] name = "tracked_struct_disambiguates" path = "tests/tracked_struct_disambiguates.rs" [[test]] name = "tracked_struct_durability" path = "tests/tracked_struct_durability.rs" [[test]] name = "tracked_struct_manual_update" path = "tests/tracked_struct_manual_update.rs" [[test]] name = "tracked_struct_mixed_tracked_fields" path = "tests/tracked_struct_mixed_tracked_fields.rs" [[test]] name = "tracked_struct_recreate_new_revision" path = "tests/tracked_struct_recreate_new_revision.rs" [[test]] name = "tracked_struct_with_interned_query" path = "tests/tracked_struct_with_interned_query.rs" [[test]] name = "tracked_with_intern" path = "tests/tracked_with_intern.rs" [[test]] name = "tracked_with_struct_db" path = "tests/tracked_with_struct_db.rs" [[test]] name = "tracked_with_struct_ord" path = "tests/tracked_with_struct_ord.rs" [[test]] name = "warnings" path = "tests/warnings/main.rs" [[bench]] name = "accumulator" path = "benches/accumulator.rs" harness = false [[bench]] name = "compare" path = "benches/compare.rs" harness = false [[bench]] name = "dataflow" path = "benches/dataflow.rs" harness = false [[bench]] name = "incremental" path = "benches/incremental.rs" harness = false [dependencies.boxcar] version = "0.2.13" [dependencies.compact_str] version = "0.9" optional = true [dependencies.crossbeam-queue] version = "0.3.11" [dependencies.crossbeam-utils] version = "0.8.21" [dependencies.hashbrown] version = "0.15" [dependencies.hashlink] version = "0.10" [dependencies.indexmap] version = "2" [dependencies.intrusive-collections] version = "0.9.7" [dependencies.papaya] version = "0.2.2" [dependencies.parking_lot] version = "0.12" [dependencies.portable-atomic] version = "1" [dependencies.rayon] version = "1.10.0" optional = true [dependencies.rustc-hash] version = "2" [dependencies.salsa-macro-rules] version = "0.23.0" [dependencies.salsa-macros] version = "0.23.0" optional = true [dependencies.shuttle] version = "0.8.0" optional = true [dependencies.smallvec] version = "1" [dependencies.thin-vec] version = "0.2.13" [dependencies.tracing] version = "0.1" features = ["std"] default-features = false [dev-dependencies.annotate-snippets] version = "0.11.5" [dev-dependencies.codspeed-criterion-compat] version = "2.6.0" default-features = false [dev-dependencies.crossbeam-channel] version = "0.5.14" [dev-dependencies.dashmap] version = "6" features = ["raw-api"] [dev-dependencies.expect-test] version = "1.5.0" [dev-dependencies.eyre] version = "0.6.8" [dev-dependencies.notify-debouncer-mini] version = "0.4.1" [dev-dependencies.ordered-float] version = "4.2.1" [dev-dependencies.rustversion] version = "1.0" [dev-dependencies.test-log] version = "0.2.11" features = ["trace"] [dev-dependencies.trybuild] version = "1.0" [target.'cfg(all(not(target_os = "windows"), not(target_os = "openbsd"), any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "powerpc64")))'.dev-dependencies.tikv-jemallocator] version = "0.6.0" [target."cfg(any())".dependencies.salsa-macros] version = "=0.23.0" salsa-0.23.0/Cargo.toml.orig0000644000000050730000000000100111630ustar [package] name = "salsa" version = "0.23.0" authors.workspace = true edition.workspace = true license.workspace = true repository.workspace = true rust-version.workspace = true description = "A generic framework for on-demand, incrementalized computation (experimental)" [dependencies] salsa-macro-rules = { version = "0.23.0", path = "components/salsa-macro-rules" } salsa-macros = { version = "0.23.0", path = "components/salsa-macros", optional = true } boxcar = "0.2.13" crossbeam-queue = "0.3.11" crossbeam-utils = "0.8.21" hashbrown = "0.15" hashlink = "0.10" indexmap = "2" intrusive-collections = "0.9.7" papaya = "0.2.2" parking_lot = "0.12" portable-atomic = "1" rustc-hash = "2" smallvec = "1" tracing = { version = "0.1", default-features = false, features = ["std"] } # parallel map rayon = { version = "1.10.0", optional = true } # Stuff we want Update impls for by default compact_str = { version = "0.9", optional = true } thin-vec = "0.2.13" shuttle = { version = "0.8.0", optional = true } [features] default = ["salsa_unstable", "rayon", "macros"] shuttle = ["dep:shuttle"] # FIXME: remove `salsa_unstable` before 1.0. salsa_unstable = [] macros = ["dep:salsa-macros"] # This interlocks the `salsa-macros` and `salsa` versions together # preventing scenarios where they could diverge in a given project # which may ultimately result in odd issues due to the proc-macro # output mismatching with the declarative macro inputs [target.'cfg(any())'.dependencies] salsa-macros = { version = "=0.23.0", path = "components/salsa-macros" } [dev-dependencies] # examples crossbeam-channel = "0.5.14" dashmap = { version = "6", features = ["raw-api"] } eyre = "0.6.8" notify-debouncer-mini = "0.4.1" ordered-float = "4.2.1" # tests/benches annotate-snippets = "0.11.5" codspeed-criterion-compat = { version = "2.6.0", default-features = false } expect-test = "1.5.0" rustversion = "1.0" test-log = { version = "0.2.11", features = ["trace"] } trybuild = "1.0" [target.'cfg(all(not(target_os = "windows"), not(target_os = "openbsd"), any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "powerpc64")))'.dev-dependencies] tikv-jemallocator = "0.6.0" [[bench]] name = "compare" harness = false [[bench]] name = "incremental" harness = false [[bench]] name = "accumulator" harness = false [[bench]] name = "dataflow" harness = false [workspace] members = ["components/salsa-macro-rules", "components/salsa-macros"] [workspace.package] authors = ["Salsa developers"] edition = "2021" license = "Apache-2.0 OR MIT" repository = "https://github.com/salsa-rs/salsa" rust-version = "1.85" salsa-0.23.0/Cargo.toml.orig000064400000000000000000000050731046102023000137050ustar 00000000000000[package] name = "salsa" version = "0.23.0" authors.workspace = true edition.workspace = true license.workspace = true repository.workspace = true rust-version.workspace = true description = "A generic framework for on-demand, incrementalized computation (experimental)" [dependencies] salsa-macro-rules = { version = "0.23.0", path = "components/salsa-macro-rules" } salsa-macros = { version = "0.23.0", path = "components/salsa-macros", optional = true } boxcar = "0.2.13" crossbeam-queue = "0.3.11" crossbeam-utils = "0.8.21" hashbrown = "0.15" hashlink = "0.10" indexmap = "2" intrusive-collections = "0.9.7" papaya = "0.2.2" parking_lot = "0.12" portable-atomic = "1" rustc-hash = "2" smallvec = "1" tracing = { version = "0.1", default-features = false, features = ["std"] } # parallel map rayon = { version = "1.10.0", optional = true } # Stuff we want Update impls for by default compact_str = { version = "0.9", optional = true } thin-vec = "0.2.13" shuttle = { version = "0.8.0", optional = true } [features] default = ["salsa_unstable", "rayon", "macros"] shuttle = ["dep:shuttle"] # FIXME: remove `salsa_unstable` before 1.0. salsa_unstable = [] macros = ["dep:salsa-macros"] # This interlocks the `salsa-macros` and `salsa` versions together # preventing scenarios where they could diverge in a given project # which may ultimately result in odd issues due to the proc-macro # output mismatching with the declarative macro inputs [target.'cfg(any())'.dependencies] salsa-macros = { version = "=0.23.0", path = "components/salsa-macros" } [dev-dependencies] # examples crossbeam-channel = "0.5.14" dashmap = { version = "6", features = ["raw-api"] } eyre = "0.6.8" notify-debouncer-mini = "0.4.1" ordered-float = "4.2.1" # tests/benches annotate-snippets = "0.11.5" codspeed-criterion-compat = { version = "2.6.0", default-features = false } expect-test = "1.5.0" rustversion = "1.0" test-log = { version = "0.2.11", features = ["trace"] } trybuild = "1.0" [target.'cfg(all(not(target_os = "windows"), not(target_os = "openbsd"), any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "powerpc64")))'.dev-dependencies] tikv-jemallocator = "0.6.0" [[bench]] name = "compare" harness = false [[bench]] name = "incremental" harness = false [[bench]] name = "accumulator" harness = false [[bench]] name = "dataflow" harness = false [workspace] members = ["components/salsa-macro-rules", "components/salsa-macros"] [workspace.package] authors = ["Salsa developers"] edition = "2021" license = "Apache-2.0 OR MIT" repository = "https://github.com/salsa-rs/salsa" rust-version = "1.85" salsa-0.23.0/FAQ.md000064400000000000000000000030621046102023000117430ustar 00000000000000# Frequently asked questions ## Why is it called salsa? I like salsa! Don't you?! Well, ok, there's a bit more to it. The underlying algorithm for figuring out which bits of code need to be re-executed after any given change is based on the algorithm used in rustc. Michael Woerister and I first described the rustc algorithm in terms of two colors, red and green, and hence we called it the "red-green algorithm". This made me think of the New Mexico State Question --- ["Red or green?"][nm] --- which refers to chile (salsa). Although this version no longer uses colors (we borrowed revision counters from Glimmer, instead), I still like the name. [nm]: https://www.sos.state.nm.us/about-new-mexico/state-question/ ## What is the relationship between salsa and an Entity-Component System (ECS)? You may have noticed that Salsa "feels" a lot like an ECS in some ways. That's true -- Salsa's queries are a bit like *components* (and the keys to the queries are a bit like *entities*). But there is one big difference: **ECS is -- at its heart -- a mutable system**. You can get or set a component of some entity whenever you like. In contrast, salsa's queries **define "derived values" via pure computations**. Partly as a consequence, ECS doesn't handle incremental updates for you. When you update some component of some entity, you have to ensure that other entities' components are updated appropriately. Finally, ECS offers interesting metadata and "aspect-like" facilities, such as iterating over all entities that share certain components. Salsa has no analogue to that. salsa-0.23.0/LICENSE-APACHE000064400000000000000000000251371046102023000127450ustar 00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. salsa-0.23.0/LICENSE-MIT000064400000000000000000000017771046102023000124610ustar 00000000000000Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. salsa-0.23.0/README.md000064400000000000000000000045641046102023000123010ustar 00000000000000# salsa [![Test](https://github.com/salsa-rs/salsa/workflows/Test/badge.svg)](https://github.com/salsa-rs/salsa/actions?query=workflow%3ATest) [![Book](https://github.com/salsa-rs/salsa/workflows/Book/badge.svg)](https://github.com/salsa-rs/salsa/actions?query=workflow%3ABook) [![Released API docs](https://docs.rs/salsa/badge.svg)](https://docs.rs/salsa) [![Crates.io](https://img.shields.io/crates/v/salsa.svg)](https://crates.io/crates/salsa) *A generic framework for on-demand, incrementalized computation.* Salsa Logo ## Obligatory warning Very much a WORK IN PROGRESS at this point. ## Credits This system is heavily inspired by [adapton](http://adapton.org/), [glimmer](https://github.com/glimmerjs/glimmer-vm), and rustc's query system. So credit goes to Eduard-Mihai Burtescu, Matthew Hammer, Yehuda Katz, and Michael Woerister. ## Key idea The key idea of `salsa` is that you define your program as a set of **queries**. Every query is used like function `K -> V` that maps from some key of type `K` to a value of type `V`. Queries come in two basic varieties: - **Inputs**: the base inputs to your system. You can change these whenever you like. - **Functions**: pure functions (no side effects) that transform your inputs into other values. The results of queries are memoized to avoid recomputing them a lot. When you make changes to the inputs, we'll figure out (fairly intelligently) when we can re-use these memoized values and when we have to recompute them. ## Want to learn more? To learn more about Salsa, try one of the following: - read the [heavily commented examples](https://github.com/salsa-rs/salsa/tree/master/examples); - check out the [Salsa book](https://salsa-rs.github.io/salsa); - [中文版](https://rust-chinese-translation.github.io/salsa-book) - watch one of our [videos](https://salsa-rs.github.io/salsa/videos.html). ## Getting in touch The bulk of the discussion happens in the [issues](https://github.com/salsa-rs/salsa/issues) and [pull requests](https://github.com/salsa-rs/salsa/pulls), but we have a [zulip chat](https://salsa.zulipchat.com/) as well. ## Contributing To create a release and publish to crates.io, update the `version` field in Cargo.toml. After pushed, GitHub Actions will publish the crates to crates.io automatically. salsa-0.23.0/RELEASES.md000064400000000000000000000012131046102023000125330ustar 00000000000000# 0.13.0 - **Breaking change:** adopt the new `Durability` API proposed in [RFC #6] - this replaces and generalizes the existing concepts of constants - **Breaking change:** remove "volatile" queries - instead, create a normal query which invokes the `report_untracked_read` method on the salsa runtime - introduce "slots", an optimization to salsa's internal workings - document `#[salsa::requires]` attribute, which permits private dependencies - Adopt `AtomicU64` for `runtimeId` (#182) - use `ptr::eq` and `ptr::hash` for readability - upgrade parking lot, rand dependencies [RFC #6]: https://github.com/salsa-rs/salsa-rfcs/pull/6 salsa-0.23.0/benches/accumulator.rs000064400000000000000000000036721046102023000153150ustar 00000000000000use std::hint::black_box; use codspeed_criterion_compat::{criterion_group, criterion_main, BatchSize, Criterion}; use salsa::Accumulator; include!("shims/global_alloc_overwrite.rs"); #[salsa::input] struct Input { expressions: usize, } #[allow(dead_code)] #[salsa::accumulator] struct Diagnostic(String); #[salsa::interned] struct Expression<'db> { number: usize, } #[salsa::tracked] #[inline(never)] fn root<'db>(db: &'db dyn salsa::Database, input: Input) -> Vec { (0..input.expressions(db)) .map(|i| infer_expression(db, Expression::new(db, i))) .collect() } #[salsa::tracked] #[inline(never)] fn infer_expression<'db>(db: &'db dyn salsa::Database, expression: Expression<'db>) -> usize { let number = expression.number(db); if number % 10 == 0 { Diagnostic(format!("Number is {number}")).accumulate(db); } if number != 0 && number % 2 == 0 { let sub_expression = Expression::new(db, number / 2); let _ = infer_expression(db, sub_expression); } number } fn accumulator(criterion: &mut Criterion) { criterion.bench_function("accumulator", |b| { b.iter_batched_ref( || { let db = salsa::DatabaseImpl::new(); let input = Input::new(black_box(&db), black_box(10_000)); // Pre-warm let result = root(black_box(&db), black_box(input)); assert!(!black_box(result).is_empty()); (db, input) }, |(db, input)| { // Measure the cost of collecting accumulators ignoring the cost of running the // query itself. let diagnostics = root::accumulated::(black_box(db), *black_box(input)); assert_eq!(black_box(diagnostics).len(), 1000); }, BatchSize::SmallInput, ); }); } criterion_group!(benches, accumulator); criterion_main!(benches); salsa-0.23.0/benches/compare.rs000064400000000000000000000213561046102023000144230ustar 00000000000000use std::hint::black_box; use std::mem::transmute; use codspeed_criterion_compat::{ criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion, }; use salsa::Setter; include!("shims/global_alloc_overwrite.rs"); #[salsa::input] pub struct Input { #[returns(ref)] pub text: String, } #[salsa::tracked] #[inline(never)] pub fn length(db: &dyn salsa::Database, input: Input) -> usize { input.text(db).len() } #[salsa::interned] pub struct InternedInput<'db> { #[returns(ref)] pub text: String, } #[derive(Clone, Copy, PartialEq, Eq, Hash, salsa::Supertype)] enum SupertypeInput<'db> { InternedInput(InternedInput<'db>), Input(Input), } #[salsa::tracked] #[inline(never)] pub fn interned_length<'db>(db: &'db dyn salsa::Database, input: InternedInput<'db>) -> usize { input.text(db).len() } #[salsa::tracked] #[inline(never)] pub fn either_length<'db>(db: &'db dyn salsa::Database, input: SupertypeInput<'db>) -> usize { match input { SupertypeInput::InternedInput(input) => interned_length(db, input), SupertypeInput::Input(input) => length(db, input), } } fn mutating_inputs(c: &mut Criterion) { let mut group: codspeed_criterion_compat::BenchmarkGroup< codspeed_criterion_compat::measurement::WallTime, > = c.benchmark_group("Mutating Inputs"); for n in &[10, 20, 30] { group.bench_function(BenchmarkId::new("mutating", n), |b| { b.iter_batched_ref( || { let db = salsa::DatabaseImpl::default(); let base_string = "hello, world!".to_owned(); let base_len = base_string.len(); let string = base_string.clone().repeat(*n); let new_len = string.len(); let input = Input::new(black_box(&db), black_box(base_string.clone())); let actual_len = length(&db, input); assert_eq!(black_box(actual_len), base_len); (db, input, string, new_len) }, |&mut (ref mut db, input, ref string, new_len)| { input.set_text(black_box(db)).to(black_box(string).clone()); let actual_len = length(db, input); assert_eq!(black_box(actual_len), new_len); }, BatchSize::SmallInput, ) }); } group.finish(); } fn inputs(c: &mut Criterion) { let mut group: codspeed_criterion_compat::BenchmarkGroup< codspeed_criterion_compat::measurement::WallTime, > = c.benchmark_group("Mutating Inputs"); group.bench_function(BenchmarkId::new("new", "InternedInput"), |b| { b.iter_batched_ref( || { let db = salsa::DatabaseImpl::default(); // Prepopulate ingredients. let input = InternedInput::new(black_box(&db), black_box("hello, world!".to_owned())); let interned_len = interned_length(black_box(&db), black_box(input)); assert_eq!(black_box(interned_len), 13); db }, |db| { let input = InternedInput::new(black_box(db), black_box("hello, world!".to_owned())); let interned_len = interned_length(black_box(db), black_box(input)); assert_eq!(black_box(interned_len), 13); }, BatchSize::SmallInput, ) }); group.bench_function(BenchmarkId::new("amortized", "InternedInput"), |b| { b.iter_batched_ref( || { let db = salsa::DatabaseImpl::default(); // we can't pass this along otherwise, and the lifetime is generally informational let input: InternedInput<'static> = unsafe { transmute(InternedInput::new(&db, "hello, world!".to_owned())) }; let interned_len = interned_length(black_box(&db), black_box(input)); assert_eq!(black_box(interned_len), 13); (db, input) }, |&mut (ref db, input)| { let interned_len = interned_length(black_box(db), black_box(input)); assert_eq!(black_box(interned_len), 13); }, BatchSize::SmallInput, ) }); group.bench_function(BenchmarkId::new("new", "Input"), |b| { b.iter_batched_ref( || { let db = salsa::DatabaseImpl::default(); // Prepopulate ingredients. let input = Input::new(black_box(&db), black_box("hello, world!".to_owned())); let len = length(black_box(&db), black_box(input)); assert_eq!(black_box(len), 13); db }, |db| { let input = Input::new(black_box(db), black_box("hello, world!".to_owned())); let len = length(black_box(db), black_box(input)); assert_eq!(black_box(len), 13); }, BatchSize::SmallInput, ) }); group.bench_function(BenchmarkId::new("amortized", "Input"), |b| { b.iter_batched_ref( || { let db = salsa::DatabaseImpl::default(); let input = Input::new(black_box(&db), black_box("hello, world!".to_owned())); let len = length(black_box(&db), black_box(input)); assert_eq!(black_box(len), 13); (db, input) }, |&mut (ref db, input)| { let len = length(black_box(db), black_box(input)); assert_eq!(black_box(len), 13); }, BatchSize::SmallInput, ) }); group.bench_function(BenchmarkId::new("new", "SupertypeInput"), |b| { b.iter_batched_ref( || { let db = salsa::DatabaseImpl::default(); // Prepopulate ingredients. let input = SupertypeInput::Input(Input::new( black_box(&db), black_box("hello, world!".to_owned()), )); let interned_input = SupertypeInput::InternedInput(InternedInput::new( black_box(&db), black_box("hello, world!".to_owned()), )); let len = either_length(black_box(&db), black_box(input)); assert_eq!(black_box(len), 13); let len = either_length(black_box(&db), black_box(interned_input)); assert_eq!(black_box(len), 13); db }, |db| { let input = SupertypeInput::Input(Input::new( black_box(db), black_box("hello, world!".to_owned()), )); let interned_input = SupertypeInput::InternedInput(InternedInput::new( black_box(db), black_box("hello, world!".to_owned()), )); let len = either_length(black_box(db), black_box(input)); assert_eq!(black_box(len), 13); let len = either_length(black_box(db), black_box(interned_input)); assert_eq!(black_box(len), 13); }, BatchSize::SmallInput, ) }); group.bench_function(BenchmarkId::new("amortized", "SupertypeInput"), |b| { b.iter_batched_ref( || { let db = salsa::DatabaseImpl::default(); let input = SupertypeInput::Input(Input::new( black_box(&db), black_box("hello, world!".to_owned()), )); let interned_input = SupertypeInput::InternedInput(InternedInput::new( black_box(&db), black_box("hello, world!".to_owned()), )); // we can't pass this along otherwise, and the lifetime is generally informational let interned_input: SupertypeInput<'static> = unsafe { transmute(interned_input) }; let len = either_length(black_box(&db), black_box(input)); assert_eq!(black_box(len), 13); let len = either_length(black_box(&db), black_box(interned_input)); assert_eq!(black_box(len), 13); (db, input, interned_input) }, |&mut (ref db, input, interned_input)| { let len = either_length(black_box(db), black_box(input)); assert_eq!(black_box(len), 13); let len = either_length(black_box(db), black_box(interned_input)); assert_eq!(black_box(len), 13); }, BatchSize::SmallInput, ) }); group.finish(); } criterion_group!(benches, mutating_inputs, inputs); criterion_main!(benches); salsa-0.23.0/benches/dataflow.rs000064400000000000000000000122321046102023000145670ustar 00000000000000//! Benchmark for fixpoint iteration cycle resolution. //! //! This benchmark simulates a (very simplified) version of a real dataflow analysis using fixpoint //! iteration. use std::collections::BTreeSet; use std::iter::IntoIterator; use codspeed_criterion_compat::{criterion_group, criterion_main, BatchSize, Criterion}; use salsa::{CycleRecoveryAction, Database as Db, Setter}; include!("shims/global_alloc_overwrite.rs"); /// A Use of a symbol. #[salsa::input] struct Use { reaching_definitions: Vec, } /// A Definition of a symbol, either of the form `base + increment` or `0 + increment`. #[salsa::input] struct Definition { base: Option, increment: usize, } #[derive(Eq, PartialEq, Clone, Debug, salsa::Update)] enum Type { Bottom, Values(Box<[usize]>), Top, } impl Type { fn join(tys: impl IntoIterator) -> Type { let mut result = Type::Bottom; for ty in tys.into_iter() { result = match (result, ty) { (result, Type::Bottom) => result, (_, Type::Top) => Type::Top, (Type::Top, _) => Type::Top, (Type::Bottom, ty) => ty, (Type::Values(a_ints), Type::Values(b_ints)) => { let mut set = BTreeSet::new(); set.extend(a_ints); set.extend(b_ints); Type::Values(set.into_iter().collect()) } } } result } } #[salsa::tracked(cycle_fn=use_cycle_recover, cycle_initial=use_cycle_initial)] fn infer_use<'db>(db: &'db dyn Db, u: Use) -> Type { let defs = u.reaching_definitions(db); match defs[..] { [] => Type::Bottom, [def] => infer_definition(db, def), _ => Type::join(defs.iter().map(|&def| infer_definition(db, def))), } } #[salsa::tracked(cycle_fn=def_cycle_recover, cycle_initial=def_cycle_initial)] fn infer_definition<'db>(db: &'db dyn Db, def: Definition) -> Type { let increment_ty = Type::Values(Box::from([def.increment(db)])); if let Some(base) = def.base(db) { let base_ty = infer_use(db, base); add(&base_ty, &increment_ty) } else { increment_ty } } fn def_cycle_initial(_db: &dyn Db, _def: Definition) -> Type { Type::Bottom } fn def_cycle_recover( _db: &dyn Db, value: &Type, count: u32, _def: Definition, ) -> CycleRecoveryAction { cycle_recover(value, count) } fn use_cycle_initial(_db: &dyn Db, _use: Use) -> Type { Type::Bottom } fn use_cycle_recover( _db: &dyn Db, value: &Type, count: u32, _use: Use, ) -> CycleRecoveryAction { cycle_recover(value, count) } fn cycle_recover(value: &Type, count: u32) -> CycleRecoveryAction { match value { Type::Bottom => CycleRecoveryAction::Iterate, Type::Values(_) => { if count > 4 { CycleRecoveryAction::Fallback(Type::Top) } else { CycleRecoveryAction::Iterate } } Type::Top => CycleRecoveryAction::Iterate, } } fn add(a: &Type, b: &Type) -> Type { match (a, b) { (Type::Bottom, _) | (_, Type::Bottom) => Type::Bottom, (Type::Top, _) | (_, Type::Top) => Type::Top, (Type::Values(a_ints), Type::Values(b_ints)) => { let mut set = BTreeSet::new(); set.extend( a_ints .into_iter() .flat_map(|a| b_ints.into_iter().map(move |b| a + b)), ); Type::Values(set.into_iter().collect()) } } } fn dataflow(criterion: &mut Criterion) { criterion.bench_function("converge_diverge", |b| { b.iter_batched_ref( || { let mut db = salsa::DatabaseImpl::new(); let defx0 = Definition::new(&db, None, 0); let defy0 = Definition::new(&db, None, 0); let defx1 = Definition::new(&db, None, 0); let defy1 = Definition::new(&db, None, 0); let use_x = Use::new(&db, vec![defx0, defx1]); let use_y = Use::new(&db, vec![defy0, defy1]); defx1.set_base(&mut db).to(Some(use_y)); defy1.set_base(&mut db).to(Some(use_x)); // prewarm cache let _ = infer_use(&db, use_x); let _ = infer_use(&db, use_y); (db, defx1, use_x, use_y) }, |(db, defx1, use_x, use_y)| { // Set the increment on x to 0. defx1.set_increment(db).to(0); // Both symbols converge on 0. assert_eq!(infer_use(db, *use_x), Type::Values(Box::from([0]))); assert_eq!(infer_use(db, *use_y), Type::Values(Box::from([0]))); // Set the increment on x to 1. defx1.set_increment(db).to(1); // Now the loop diverges and we fall back to Top. assert_eq!(infer_use(db, *use_x), Type::Top); assert_eq!(infer_use(db, *use_y), Type::Top); }, BatchSize::LargeInput, ); }); } criterion_group!(benches, dataflow); criterion_main!(benches); salsa-0.23.0/benches/incremental.rs000064400000000000000000000033511046102023000152710ustar 00000000000000use std::hint::black_box; use codspeed_criterion_compat::{criterion_group, criterion_main, BatchSize, Criterion}; use salsa::Setter; include!("shims/global_alloc_overwrite.rs"); #[salsa::input] struct Input { field: usize, } #[salsa::tracked] struct Tracked<'db> { number: usize, } #[salsa::tracked(returns(ref))] #[inline(never)] fn index<'db>(db: &'db dyn salsa::Database, input: Input) -> Vec> { (0..input.field(db)).map(|i| Tracked::new(db, i)).collect() } #[salsa::tracked] #[inline(never)] fn root(db: &dyn salsa::Database, input: Input) -> usize { let index = index(db, input); index.len() } fn many_tracked_structs(criterion: &mut Criterion) { criterion.bench_function("many_tracked_structs", |b| { b.iter_batched_ref( || { let db = salsa::DatabaseImpl::new(); let input = Input::new(black_box(&db), black_box(1_000)); let input2 = Input::new(black_box(&db), black_box(1)); // prewarm cache let root1 = root(black_box(&db), black_box(input)); assert_eq!(black_box(root1), 1_000); let root2 = root(black_box(&db), black_box(input2)); assert_eq!(black_box(root2), 1); (db, input, input2) }, |(db, input, input2)| { // Make a change, but fetch the result for the other input input2.set_field(black_box(db)).to(black_box(2)); let result = root(black_box(db), *black_box(input)); assert_eq!(black_box(result), 1_000); }, BatchSize::LargeInput, ); }); } criterion_group!(benches, many_tracked_structs); criterion_main!(benches); salsa-0.23.0/benches/shims/global_alloc_overwrite.rs000064400000000000000000000016251046102023000206350ustar 00000000000000#[cfg(all( not(target_os = "windows"), not(target_os = "openbsd"), any( target_arch = "x86_64", target_arch = "aarch64", target_arch = "powerpc64" ) ))] #[global_allocator] static GLOBAL: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; // Disable decay after 10s because it can show up as *random* slow allocations // in benchmarks. We don't need purging in benchmarks because it isn't important // to give unallocated pages back to the OS. // https://jemalloc.net/jemalloc.3.html#opt.dirty_decay_ms #[cfg(all( not(target_os = "windows"), not(target_os = "openbsd"), any( target_arch = "x86_64", target_arch = "aarch64", target_arch = "powerpc64" ) ))] #[allow(non_upper_case_globals)] #[export_name = "_rjem_malloc_conf"] #[allow(unsafe_code)] pub static _rjem_malloc_conf: &[u8] = b"dirty_decay_ms:-1,muzzy_decay_ms:-1\0"; salsa-0.23.0/book/.gitignore000064400000000000000000000000051046102023000137260ustar 00000000000000book salsa-0.23.0/book/book.toml000064400000000000000000000007161046102023000135760ustar 00000000000000[book] authors = ["Salsa Contributors"] multilingual = false src = "src" title = "Salsa" [build] create-missing = false [preprocess.links] [output.html] additional-css =["mermaid.css"] additional-js =["mermaid.min.js", "mermaid-init.js"] [output.linkcheck] # follow-web-links = true --- this is commented out b/c of false errors traverse-parent-directories = false exclude = ['bilibili\.com'] [preprocessor] [preprocessor.mermaid] command = "mdbook-mermaid" salsa-0.23.0/book/mermaid-init.js000064400000000000000000000000501046102023000146530ustar 00000000000000mermaid.initialize({startOnLoad:true}); salsa-0.23.0/book/netlify.sh000075500000000000000000000015431046102023000137570ustar 00000000000000#!/bin/bash # # Script meant to be run from netlify set -x MDBOOK_VERSION='0.4.12' MDBOOK_LINKCHECK_VERSION='0.7.4' MDBOOK_MERMAID_VERSION='0.8.3' curl -L https://github.com/rust-lang/mdBook/releases/download/v$MDBOOK_VERSION/mdbook-v$MDBOOK_VERSION-x86_64-unknown-linux-gnu.tar.gz | tar xz -C ~/.cargo/bin curl -L https://github.com/badboy/mdbook-mermaid/releases/download/v$MDBOOK_MERMAID_VERSION/mdbook-mermaid-v$MDBOOK_MERMAID_VERSION-x86_64-unknown-linux-gnu.tar.gz | tar xz -C ~/.cargo/bin curl -L https://github.com/Michael-F-Bryan/mdbook-linkcheck/releases/download/v$MDBOOK_LINKCHECK_VERSION/mdbook-linkcheck.v$MDBOOK_LINKCHECK_VERSION.x86_64-unknown-linux-gnu.zip -O unzip mdbook-linkcheck.v$MDBOOK_LINKCHECK_VERSION.x86_64-unknown-linux-gnu.zip -d ~/.cargo/bin chmod +x ~/.cargo/bin/mdbook-linkcheck mdbook build mkdir versions mv book/html/* versions salsa-0.23.0/book/src/SUMMARY.md000064400000000000000000000047001046102023000142120ustar 00000000000000# Summary - [About salsa](./about_salsa.md) # How to use Salsa - [Overview](./overview.md) - [Tutorial: calc language](./tutorial.md) - [Basic structure](./tutorial/structure.md) - [Defining the database struct](./tutorial/db.md) - [Defining the IR: the various "salsa structs"](./tutorial/ir.md) - [Defining the parser: memoized functions and inputs](./tutorial/parser.md) - [Defining the parser: reporting errors](./tutorial/accumulators.md) - [Defining the parser: debug impls and testing](./tutorial/debug.md) - [Defining the checker](./tutorial/checker.md) - [Defining the interpreter](./tutorial/interpreter.md) - [Reference](./reference.md) - [Durability](./reference/durability.md) - [Algorithm](./reference/algorithm.md) - [Common patterns](./common_patterns.md) - [On-demand (Lazy) inputs](./common_patterns/on_demand_inputs.md) - [Tuning](./tuning.md) - [Cycle handling](./cycles.md) # How Salsa works internally - [How Salsa works](./how_salsa_works.md) - [Videos](./videos.md) - [Plumbing](./plumbing.md) - [Databases and runtime](./plumbing/database_and_runtime.md) - [The db lifetime on tracked/interned structs](./plumbing/db_lifetime.md) - [Tracked structures](./plumbing/tracked_structs.md) - [Query operations](./plumbing/query_ops.md) - [maybe changed after](./plumbing/maybe_changed_after.md) - [Fetch](./plumbing/fetch.md) - [Derived queries flowchart](./plumbing/derived_flowchart.md) - [Cycle handling](./plumbing/cycles.md) - [Terminology](./plumbing/terminology.md) - [Backdate](./plumbing/terminology/backdate.md) - [Changed at](./plumbing/terminology/changed_at.md) - [Dependency](./plumbing/terminology/dependency.md) - [Derived query](./plumbing/terminology/derived_query.md) - [Durability](./plumbing/terminology/durability.md) - [Input query](./plumbing/terminology/input_query.md) - [Ingredient](./plumbing/terminology/ingredient.md) - [LRU](./plumbing/terminology/LRU.md) - [Memo](./plumbing/terminology/memo.md) - [Query](./plumbing/terminology/query.md) - [Query function](./plumbing/terminology/query_function.md) - [Revision](./plumbing/terminology/revision.md) - [Salsa item](./plumbing/terminology/salsa_item.md) - [Salsa struct](./plumbing/terminology/salsa_struct.md) - [Untracked dependency](./plumbing/terminology/untracked.md) - [Verified](./plumbing/terminology/verified.md) # Appendices - [Meta: about the book itself](./meta.md) salsa-0.23.0/book/src/about_salsa.md000064400000000000000000000014611046102023000153530ustar 00000000000000# About salsa Salsa is a Rust framework for writing incremental, on-demand programs -- these are programs that want to adapt to changes in their inputs, continuously producing a new output that is up-to-date. Salsa is based on the incremental recompilation techniques that we built for rustc, and many (but not all) of its users are building compilers or other similar tooling. If you'd like to learn more about Salsa, check out: - The [overview](./overview.md), for a brief summary. - The [tutorial](./tutorial.md), for a detailed look. - You can also watch some of our [videos](./videos.md), though the content there is rather out of date. If you'd like to chat about Salsa, or you think you might like to contribute, please jump on to our Zulip instance at [salsa.zulipchat.com](https://salsa.zulipchat.com/). salsa-0.23.0/book/src/common_patterns/on_demand_inputs.md000064400000000000000000000034231046102023000216140ustar 00000000000000# On-Demand (Lazy) Inputs Salsa inputs work best if you can easily provide all of the inputs upfront. However sometimes the set of inputs is not known beforehand. A typical example is reading files from disk. While it is possible to eagerly scan a particular directory and create an in-memory file tree as salsa input structs, a more straight-forward approach is to read the files lazily. That is, when a query requests the text of a file for the first time: 1. Read the file from disk and cache it. 2. Setup a file-system watcher for this path. 3. Update the cached file when the watcher sends a change notification. This is possible to achieve in salsa, by caching the inputs in your database structs and adding a method to the database trait to retrieve them out of this cache. A complete, runnable file-watching example can be found in [the lazy-input example](https://github.com/salsa-rs/salsa/tree/master/examples/lazy-input). The setup looks roughly like this: ```rust,ignore {{#include ../../../examples/lazy-input/main.rs:db}} ``` - We declare a method on the `Db` trait that gives us a `File` input on-demand (it only requires a `&dyn Db` not a `&mut dyn Db`). - There should only be one input struct per file, so we implement that method using a cache (`DashMap` is like a `RwLock`). The driving code that's doing the top-level queries is then in charge of updating the file contents when a file-change notification arrives. It does this by updating the Salsa input in the same way that you would update any other input. Here we implement a simple driving loop, that recompiles the code whenever a file changes. You can use the logs to check that only the queries that could have changed are re-evaluated. ```rust,ignore {{#include ../../../examples/lazy-input/main.rs:main}} ``` salsa-0.23.0/book/src/common_patterns.md000064400000000000000000000001041046102023000162570ustar 00000000000000# Common patterns This section documents patterns for using Salsa. salsa-0.23.0/book/src/cycles.md000064400000000000000000000102121046102023000143320ustar 00000000000000# Cycle handling By default, when Salsa detects a cycle in the computation graph, Salsa will panic with a message naming the "cycle head"; this is the query that was called while it was also on the active query stack, creating a cycle. Salsa also supports recovering from query cycles via fixed-point iteration. Fixed-point iteration is only usable if the queries which may be involved in a cycle are monotone and operate on a value domain which is a partial order with fixed height. Effectively, this means that the queries' output must always be "larger" than its input, and there must be some "maximum" or "top" value. This ensures that fixed-point iteration will converge to a value. (A typical case would be queries operating on types, which form a partial order with a "top" type.) In order to support fixed-point iteration for a query, provide the `cycle_fn` and `cycle_initial` arguments to `salsa::tracked`: ```rust #[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=initial_fn)] fn query(db: &dyn salsa::Database) -> u32 { // ... } fn cycle_fn(_db: &dyn KnobsDatabase, _value: &u32, _count: u32) -> salsa::CycleRecoveryAction { salsa::CycleRecoveryAction::Iterate } fn initial(_db: &dyn KnobsDatabase) -> u32 { 0 } ``` If `query` becomes the head of a cycle (that is, `query` is executing and on the active query stack, it calls `query2`, `query2` calls `query3`, and `query3` calls `query` again -- there could be any number of queries involved in the cycle), the `initial_fn` will be called to generate an "initial" value for `query` in the fixed-point computation. (The initial value should usually be the "bottom" value in the partial order.) All queries in the cycle will compute a provisional result based on this initial value for the cycle head. That is, `query3` will compute a provisional result using the initial value for `query`, `query2` will compute a provisional result using this provisional value for `query3`. When `cycle2` returns its provisional result back to `cycle`, `cycle` will observe that it has received a provisional result from its own cycle, and will call the `cycle_fn` (with the current value and the number of iterations that have occurred so far). The `cycle_fn` can return `salsa::CycleRecoveryAction::Iterate` to indicate that the cycle should iterate again, or `salsa::CycleRecoveryAction::Fallback(value)` to indicate that the cycle should stop iterating and fall back to the value provided. If the `cycle_fn` continues to return `Iterate`, the cycle will iterate until it converges: that is, until two successive iterations produce the same result. If the `cycle_fn` returns `Fallback`, the cycle will iterate one last time and verify that the returned value is the same as the fallback value; that is, the fallback value results in a stable converged cycle. If not, Salsa will panic. It is not permitted to use a fallback value that does not converge, because this would leave the cycle in an unpredictable state, depending on the order of query execution. ## All potential cycle heads must set `cycle_fn` and `cycle_initial` Consider a two-query cycle where `query_a` calls `query_b`, and `query_b` calls `query_a`. If `query_a` is called first, then it will become the "cycle head", but if `query_b` is called first, then `query_b` will be the cycle head. In order for a cycle to use fixed-point iteration instead of panicking, the cycle head must set `cycle_fn` and `cycle_initial`. This means that in order to be robust against varying query execution order, both `query_a` and `query_b` must set `cycle_fn` and `cycle_initial`. ## Ensuring convergence Fixed-point iteration is a powerful tool, but is also easy to misuse, potentially resulting in infinite iteration. To avoid this, ensure that all queries participating in fixpoint iteration are deterministic and monotone. ## Calling Salsa queries from within `cycle_fn` or `cycle_initial` It is permitted to call other Salsa queries from within the `cycle_fn` and `cycle_initial` functions. However, if these functions re-enter the same cycle, this can lead to unpredictable results. Take care which queries are called from within cycle-recovery functions, and avoid triggering further cycles. salsa-0.23.0/book/src/derived-query-maybe-changed-after.drawio.svg000064400000000000000000002115351046102023000231140ustar 00000000000000
Memo with value exists:
last verified in Rv with durability D
Memo with value exists:...
No previous memo,
or memo has no value
No previous memo,...
Load memo_map[K]
Load memo_map[K]
Already
claimed
Already...
Claimed
Claimed
Claim sync_map[K]
Claim sync_map[K]
Thread completed
normally
Thread completed...
Cycle detected
or thread panicked
Cycle detected...
Block until other thread completes
Block until other th...
Load memo_map[K]
Load memo_map[K]
Push K onto stack
Push K onto stack
Release claim.
Release claim.
Pop stack frame.
Pop stack frame.
(Destructor)
(Destructo...
(Destructor)
(Destructo...
False
False
Panic
Panic
True
True
Deep verify memo
Deep verify memo
False
False
True
True
Shallow verify memo
Shallow verify memo
Return changed_at > revision
Return changed_at >...
Return changed_at > revision
Return changed_at >...
Panic
Panic
Execute
Execute
Yes
Yes
No
No
Have old memo?
Have old memo?
Return true (maybe changed)
Return true (maybe c...
maybe_changed_since(revision)
maybe_changed_since(revision)
maybe_changed_after_cold()
maybe_changed_after_cold()
Text is not SVG - cannot display
salsa-0.23.0/book/src/derived-query-read.drawio.svg000064400000000000000000006432021046102023000202440ustar 00000000000000
Memo with value exists:
last verified in Rv with durability D
Memo with value exists:...
No previous memo,
or memo has no value
No previous memo,...
Load memo_map[K]
Load memo_map[K]
No
No
Yes
Yes
Rv is current revision?
Rv is current revisi...
Yes
Yes
No
No
Inputs with durability D changed since Rv?
Inputs with durabili...
Return value
Return value
Already
claimed
Already...
Claimed
Claimed
Claim sync_map[K]
Claim sync_map[K]
Thread completed
normally
Thread completed...
Cycle detected
or thread panicked
Cycle detected...
Block until other thread completes
Block until other th...
No previous memo
No previous memo
Load memo_map[K]
Load memo_map[K]
Previous memo exists,
last verified in Rv
with durability D
Previous memo exists,...
Update verified_at to current revision
Update verified_at t...
No
No
Yes
Yes
Rv is current revision?
Rv is current revisi...
No
No
Inputs with durability D changed since Rv?
Inputs with durabili...
Return value
Return value
Update verified_at to current revision
Update verified_at t...
All inputs verified
All inputs verified
For each input I...
For each input I...
Push K onto stack
Push K onto stack
No
No
Input I maybe changed after Rv?
Input I maybe change...
Salsa Event:
Will Execute
Salsa Event:...
Cycle detected
Cycle detected
Execute query function
Execute query functi...
Backdate value if equal to old value.
Backdate value if eq...
Store memo with
new value.
Store memo with...
Execute cycle recovery
Execute cycle recove...
Fallback
Fallback
Panic
Panic
Check recovery strategy
Check recovery strat...
Flag is Some
Flag is Some
Flag is None
Flag is None
Check cycle flag on stack frame
Check cycle flag on...
Unwind with cycle
Unwind with cycle
Release claim.
Release claim.
Pop stack frame.
Pop stack frame.
(Destructor)
(Destructo...
(Destructor)
(Destructo...
shallow_verify_memo()
shallow_verify_memo()
fetch_hot()
fetch_hot()
fetch_cold()
fetch_cold()
sync_map.claim()
sync_map.claim()
execute()
execute()
deep_verify_memo()
deep_verify_memo()
shallow_verify_memo()
shallow_verify_memo()
Text is not SVG - cannot display
salsa-0.23.0/book/src/how_salsa_works.md000064400000000000000000000042701046102023000162640ustar 00000000000000# How Salsa works ## Video available To get the most complete introduction to Salsa's inner workings, check out [the "How Salsa Works" video](https://youtu.be/_muY4HjSqVw). If you'd like a deeper dive, [the "Salsa in more depth" video](https://www.youtube.com/watch?v=i_IhACacPRY) digs into the details of the incremental algorithm. > If you're in China, watch videos on ["How Salsa Works"](https://www.bilibili.com/video/BV1Df4y1A7t3/), ["Salsa In More Depth"](https://www.bilibili.com/video/BV1AM4y1G7E4/). ## Key idea The key idea of `salsa` is that you define your program as a set of **queries**. Every query is used like a function `K -> V` that maps from some key of type `K` to a value of type `V`. Queries come in two basic varieties: - **Inputs**: the base inputs to your system. You can change these whenever you like. - **Functions**: pure functions (no side effects) that transform your inputs into other values. The results of queries are memoized to avoid recomputing them a lot. When you make changes to the inputs, we'll figure out (fairly intelligently) when we can re-use these memoized values and when we have to recompute them. ## How to use Salsa in three easy steps Using Salsa is as easy as 1, 2, 3... 1. Define one or more **query groups** that contain the inputs and queries you will need. We'll start with one such group, but later on you can use more than one to break up your system into components (or spread your code across crates). 2. Define the **query functions** where appropriate. 3. Define the **database**, which contains the storage for all the inputs/queries you will be using. The query struct will contain the storage for all of the inputs/queries and may also contain anything else that your code needs (e.g., configuration data). To see an example of this in action, check out [the `hello_world` example][hello_world], which has a number of comments explaining how things work. [hello_world]: https://github.com/salsa-rs/salsa/blob/master/examples/hello_world/main.rs ## Digging into the plumbing Check out the [plumbing](plumbing.md) chapter to see a deeper explanation of the code that Salsa generates and how it connects to the Salsa library. salsa-0.23.0/book/src/meta.md000064400000000000000000000011361046102023000140030ustar 00000000000000# Meta: about the book itself ## Linking policy We try to avoid links that easily become fragile. **Do:** * Link to `docs.rs` types to document the public API, but modify the link to use `latest` as the version. * Link to modules in the source code. * Create ["named anchors"] and embed source code directly. ["named anchors"]: https://rust-lang.github.io/mdBook/format/mdbook.html?highlight=ANCHOR#including-portions-of-a-file **Don't:** * Link to direct lines on github, even within a specific commit, unless you are trying to reference a historical piece of code ("how things were at the time").salsa-0.23.0/book/src/overview.md000064400000000000000000000302641046102023000147270ustar 00000000000000# Salsa overview This page contains a brief overview of the pieces of a Salsa program. For a more detailed look, check out the [tutorial](./tutorial.md), which walks through the creation of an entire project end-to-end. ## Goal of Salsa The goal of Salsa is to support efficient **incremental recomputation**. Salsa is used in rust-analyzer, for example, to help it recompile your program quickly as you type. The basic idea of a Salsa program is like this: ```rust let mut input = ...; loop { let output = your_program(&input); modify(&mut input); } ``` You start out with an input that has some value. You invoke your program to get back a result. Some time later, you modify the input and invoke your program again. **Our goal is to make this second call faster by re-using some of the results from the first call.** In reality, of course, you can have many inputs and "your program" may be many different methods and functions defined on those inputs. But this picture still conveys a few important concepts: - Salsa separates out the "incremental computation" (the function `your_program`) from some outer loop that is defining the inputs. - Salsa gives you the tools to define `your_program`. - Salsa assumes that `your_program` is a purely deterministic function of its inputs, or else this whole setup makes no sense. - The mutation of inputs always happens outside of `your_program`, as part of this master loop. ## Database Each time you run your program, Salsa remembers the values of each computation in a **database**. When the inputs change, it consults this database to look for values that can be reused. The database is also used to implement interning (making a canonical version of a value that can be copied around and cheaply compared for equality) and other convenient Salsa features. ## Inputs Every Salsa program begins with an **input**. Inputs are special structs that define the starting point of your program. Everything else in your program is ultimately a deterministic function of these inputs. For example, in a compiler, there might be an input defining the contents of a file on disk: ```rust #[salsa::input] pub struct ProgramFile { pub path: PathBuf, pub contents: String, } ``` You create an input by using the `new` method. Because the values of input fields are stored in the database, you also give an `&`-reference to the database: ```rust let file: ProgramFile = ProgramFile::new( &db, PathBuf::from("some_path.txt"), String::from("fn foo() { }"), ); ``` Mutable access is not needed since creating a new input cannot affect existing tracked data in the database. ### Salsa structs are just integers The `ProgramFile` struct generated by the `salsa::input` macro doesn't actually store any data. It's just a newtyped integer id: ```rust // Generated by the `#[salsa::input]` macro: #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct ProgramFile(salsa::Id); ``` This means that, when you have a `ProgramFile`, you can easily copy it around and put it wherever you like. To actually read any of its fields, however, you will need to use the database and a getter method. ### Reading fields and `returns(ref)` You can access the value of an input's fields by using the getter method. As this is only reading the field, it just needs a `&`-reference to the database: ```rust let contents: String = file.contents(&db); ``` Invoking the accessor clones the value from the database. Sometimes this is not what you want, so you can annotate fields with `#[returns(ref)]` to indicate that they should return a reference into the database instead: ```rust #[salsa::input] pub struct ProgramFile { pub path: PathBuf, #[returns(ref)] pub contents: String, } ``` Now `file.contents(&db)` will return an `&String`. You can also use the `data` method to access the entire struct: ```rust file.data(&db) ``` ### Writing input fields Finally, you can also modify the value of an input field by using the setter method. Since this is modifying the input, and potentially invalidating data derived from it, the setter takes an `&mut`-reference to the database: ```rust file.set_contents(&mut db).to(String::from("fn foo() { /* add a comment */ }")); ``` Note that the setter method `set_contents` returns a "builder". This gives the ability to set the [durability](./reference/durability.md) and other advanced concepts. ## Tracked functions Once you've defined your inputs, the next thing to define are **tracked functions**: ```rust #[salsa::tracked] fn parse_file(db: &dyn crate::Db, file: ProgramFile) -> Ast { let contents: &str = file.contents(db); ... } ``` When you call a tracked function, Salsa will track which inputs it accesses (in this example, `file.contents(db)`). It will also memoize the return value (the `Ast`, in this case). If you call a tracked function twice, Salsa checks if the inputs have changed; if not, it can return the memoized value. The algorithm Salsa uses to decide when a tracked function needs to be re-executed is called the [red-green algorithm](./reference/algorithm.md), and it's where the name Salsa comes from. Tracked functions have to follow a particular structure: - They must take a `&`-reference to the database as their first argument. - Note that because this is an `&`-reference, it is not possible to modify inputs during a tracked function! - They must take a "Salsa struct" as the second argument -- in our example, this is an input struct, but there are other kinds of Salsa structs we'll describe shortly. - They _can_ take additional arguments, but it's faster and better if they don't. Tracked functions can return any clone-able type. A clone is required since, when the value is cached, the result will be cloned out of the database. Tracked functions can also be annotated with `#[returns(ref)]` if you would prefer to return a reference into the database instead (if `parse_file` were so annotated, then callers would actually get back an `&Ast`, for example). ## Tracked structs **Tracked structs** are intermediate structs created during your computation. Like inputs, their fields are stored inside the database, and the struct itself just wraps an id. Unlike inputs, they can only be created inside a tracked function, and their fields can never change once they are created (until the next revision, at least). Getter methods are provided to read the fields, but there are no setter methods. Example: ```rust #[salsa::tracked] struct Ast<'db> { #[returns(ref)] top_level_items: Vec, } ``` Just as with an input, new values are created by invoking `Ast::new`. The `new` function on a tracked struct only requires a `&`-reference to the database: ```rust #[salsa::tracked] fn parse_file(db: &dyn crate::Db, file: ProgramFile) -> Ast { let contents: &str = file.contents(db); let parser = Parser::new(contents); let mut top_level_items = vec![]; while let Some(item) = parser.parse_top_level_item() { top_level_items.push(item); } Ast::new(db, top_level_items) // <-- create an Ast! } ``` ### `#[id]` fields When a tracked function is re-executed because its inputs have changed, the tracked structs it creates in the new execution are matched against those from the old execution, and the values of their fields are compared. If the field values have not changed, then other tracked functions that only read those fields will not be re-executed. Normally, tracked structs are matched up by the order in which they are created. For example, the first `Ast` that is created by `parse_file` in the old execution will be matched against the first `Ast` created by `parse_file` in the new execution. In our example, `parse_file` only ever creates a single `Ast`, so this works great. Sometimes, however, it doesn't work so well. For example, imagine that we had a tracked struct for items in the file: ```rust #[salsa::tracked] struct Item { name: Word, // we'll define Word in a second! ... } ``` Maybe our parser first creates an `Item` with the name `foo` and then later a second `Item` with the name `bar`. Then the user changes the input to reorder the functions. Although we are still creating the same number of items, we are now creating them in the reverse order, so the naive algorithm will match up the _old_ `foo` struct with the new `bar` struct. This will look to Salsa as though the `foo` function was renamed to `bar` and the `bar` function was renamed to `foo`. We'll still get the right result, but we might do more recomputation than we needed to do if we understood that they were just reordered. To address this, you can tag fields in a tracked struct as `#[id]`. These fields are then used to "match up" struct instances across executions: ```rust #[salsa::tracked] struct Item { #[id] name: Word, // we'll define Word in a second! ... } ``` ### Specify the result of tracked functions for particular structs Sometimes it is useful to define a tracked function but specify its value for some particular struct specially. For example, maybe the default way to compute the representation for a function is to read the AST, but you also have some built-in functions in your language and you want to hard-code their results. This can also be used to simulate a field that is initialized after the tracked struct is created. To support this use case, you can use the `specify` method associated with tracked functions. To enable this method, you need to add the `specify` flag to the function to alert users that its value may sometimes be specified externally. ```rust #[salsa::tracked(specify)] // <-- specify flag required fn representation(db: &dyn crate::Db, item: Item) -> Representation { // read the user's input AST by default let ast = ast(db, item); // ... } fn create_builtin_item(db: &dyn crate::Db) -> Item { let i = Item::new(db, ...); let r = hardcoded_representation(); representation::specify(db, i, r); // <-- use the method! i } ``` Specifying is only possible for tracked functions that take a single tracked struct as an argument (besides the database). ## Interned structs The final kind of Salsa struct are **interned structs**. Interned structs are useful for quick equality comparison. They are commonly used to represent strings or other primitive values. Most compilers, for example, will define a type to represent a user identifier: ```rust #[salsa::interned] struct Word { #[returns(ref)] pub text: String, } ``` As with input and tracked structs, the `Word` struct itself is just a newtyped integer, and the actual data is stored in the database. You can create a new interned struct using `new`, just like with input and tracked structs: ```rust let w1 = Word::new(db, "foo".to_string()); let w2 = Word::new(db, "bar".to_string()); let w3 = Word::new(db, "foo".to_string()); ``` When you create two interned structs with the same field values, you are guaranteed to get back the same integer id. So here, we know that `assert_eq!(w1, w3)` is true and `assert_ne!(w1, w2)`. You can access the fields of an interned struct using a getter, like `word.text(db)`. These getters respect the `#[returns(ref)]` annotation. Like tracked structs, the fields of interned structs are immutable. ## Accumulators The final Salsa concept are **accumulators**. Accumulators are a way to report errors or other "side channel" information that is separate from the main return value of your function. To create an accumulator, you declare a type as an _accumulator_: ```rust #[salsa::accumulator] pub struct Diagnostics(String); ``` It must be a newtype of something, like `String`. Now, during a tracked function's execution, you can push those values: ```rust Diagnostics::push(db, "some_string".to_string()) ``` Then later, from outside the execution, you can ask for the set of diagnostics that were accumulated by some particular tracked function. For example, imagine that we have a type-checker and, during type-checking, it reports some diagnostics: ```rust #[salsa::tracked] fn type_check(db: &dyn Db, item: Item) { // ... Diagnostics::push(db, "some error message".to_string()) // ... } ``` we can then later invoke the associated `accumulated` function to get all the `String` values that were pushed: ```rust let v: Vec = type_check::accumulated::(db); ``` salsa-0.23.0/book/src/plumbing/cycles.md000064400000000000000000000033261046102023000161570ustar 00000000000000# Cycles ## Cross-thread blocking The interface for blocking across threads now works as follows: * When one thread `T1` wishes to block on a query `Q` being executed by another thread `T2`, it invokes `Runtime::try_block_on`. This will check for cycles. Assuming no cycle is detected, it will block `T1` until `T2` has completed with `Q`. At that point, `T1` reawakens. However, we don't know the result of executing `Q`, so `T1` now has to "retry". Typically, this will result in successfully reading the cached value. * While `T1` is blocking, the runtime moves its query stack (a `Vec`) into the shared dependency graph data structure. When `T1` reawakens, it recovers ownership of its query stack before returning from `try_block_on`. ## Cycle detection When a thread `T1` attempts to execute a query `Q`, it will try to load the value for `Q` from the memoization tables. If it finds an `InProgress` marker, that indicates that `Q` is currently being computed. This indicates a potential cycle. `T1` will then try to block on the query `Q`: * If `Q` is also being computed by `T1`, then there is a cycle. * Otherwise, if `Q` is being computed by some other thread `T2`, we have to check whether `T2` is (transitively) blocked on `T1`. If so, there is a cycle. These two cases are handled internally by the `Runtime::try_block_on` function. Detecting the intra-thread cycle case is easy; to detect cross-thread cycles, the runtime maintains a dependency DAG between threads (identified by `RuntimeId`). Before adding an edge `T1 -> T2` (i.e., `T1` is blocked waiting for `T2`) into the DAG, it checks whether a path exists from `T2` to `T1`. If so, we have a cycle and the edge cannot be added (then the DAG would not longer be acyclic). salsa-0.23.0/book/src/plumbing/database.md000064400000000000000000000061401046102023000164360ustar 00000000000000# Database Continuing our dissection, the other thing which a user must define is a **database**, which looks something like this: ```rust,ignore {{#include ../../../examples/hello_world/main.rs:database}} ``` The `salsa::database` procedural macro takes a list of query group structs (like `HelloWorldStorage`) and generates the following items: * a copy of the database struct it is applied to * a struct `__SalsaDatabaseStorage` that contains all the storage structs for each query group. Note: these are the structs full of hashmaps etc that are generaetd by the query group procdural macro, not the `HelloWorldStorage` struct itself. * an impl of `HasQueryGroup` for each query group `G` * an impl of `salsa::plumbing::DatabaseStorageTypes` for the database struct * an impl of `salsa::plumbing::DatabaseOps` for the database struct ## Key constraint: we do not know the names of individual queries There is one key constraint in the design here. None of this code knows the names of individual queries. It only knows the name of the query group storage struct. This means that we often delegate things to the group -- e.g., the database key is composed of group keys. This is similar to how none of the code in the query group knows the full set of query groups, and so it must use associated types from the `Database` trait whenever it needs to put something in a "global" context. ## The database storage struct The `__SalsaDatabaseStorage` struct concatenates all of the query group storage structs. In the hello world example, it looks something like: ```rust,ignore struct __SalsaDatabaseStorage { hello_world: >::GroupStorage } ``` We also generate a `Default` impl for `__SalsaDatabaseStorage`. It invokes a `new` method on each group storage with the unique index assigned to that group. This invokes the [inherent `new` method generated by the `#[salsa::query_group]` macro][new]. [new]: query_groups.md#group-storage ## The `HasQueryGroup` impl The `HasQueryGroup` trait allows a given query group to access its definition within the greater database. The impl is generated here: ```rust,ignore {{#include ../../../components/salsa-macros/src/database_storage.rs:HasQueryGroup}} ``` The `HasQueryGroup` impl combines with [the blanket impl] from the `#[salsa::query_group]` macro so that the database can implement the query group trait (e.g., the `HelloWorld` trait) but without knowing all the names of the query methods and the like. [the blanket impl]: query_groups.md#impl-of-the-hello-world-trait ## The `DatabaseStorageTypes` impl Then there are a variety of other impls, like this one for `DatabaseStorageTypes`: ```rust,ignore {{#include ../../../components/salsa-macros/src/database_storage.rs:DatabaseStorageTypes}} ``` ## The `DatabaseOps` impl Or this one for `DatabaseOps`, which defines the for-each method to invoke an operation on every kind of query in the database. It ultimately delegates to the `for_each` methods for the groups: ```rust,ignore {{#include ../../../components/salsa-macros/src/database_storage.rs:DatabaseOps}} ``` salsa-0.23.0/book/src/plumbing/database_and_runtime.md000064400000000000000000000075701046102023000210330ustar 00000000000000# Database and runtime A salsa database struct is declared by the user with the `#[salsa::db]` annotation. It contains all the data that the program needs to execute: ```rust,ignore #[salsa::db] struct MyDatabase { storage: Storage, maybe_other_fields: u32, } ``` This data is divided into two categories: - Salsa-governed storage, contained in the `Storage` field. This data is mandatory. - Other fields (like `maybe_other_fields`) defined by the user. This can be anything. This allows for you to give access to special resources or whatever. ## Parallel handles When used across parallel threads, the database type defined by the user must support a "snapshot" operation. This snapshot should create a clone of the database that can be used by the parallel threads. The `Storage` operation itself supports `snapshot`. The `Snapshot` method returns a `Snapshot` type, which prevents these clones from being accessed via an `&mut` reference. ## The Storage struct The salsa `Storage` struct contains all the data that salsa itself will use and work with. There are three key bits of data: - The `Shared` struct, which contains the data stored across all snapshots, such as synchronization information (a cond var). This is used for cancellation, as described below. - The data in the `Shared` struct is only shared across threads when other threads are active. Some operations, like mutating an input, require an `&mut` handle to the `Shared` struct. This is obtained by using the `Arc::get_mut` methods; obviously this is only possible when all snapshots and threads have ceased executing, since there must be a single handle to the `Arc`. - The `Routes` struct, which contains the information to find any particular ingredient -- this is also shared across all handles. The routes are separated out from the `Shared` struct because they are truly immutable at all times, and we want to be able to hold a handle to them while getting `&mut` access to the `Shared` struct. - The `Runtime` struct, which is specific to a particular database instance. It contains the data for a single active thread, along with some links to shared data of its own. ## Incrementing the revision counter Salsa's general model is that there is a single "master" copy of the database and, potentially, multiple snapshots. The snapshots are not directly owned, they are instead enclosed in a `Snapshot` type that permits only `&`-deref, and so the only database that can be accessed with an `&mut`-ref is the master database. Each of the snapshots however onlys another handle on the `Arc` in `Storage` that stores the ingredients. Whenever the user attempts to do an `&mut`-operation, such as modifying an input field, that needs to first cancel any parallel snapshots and wait for those parallel threads to finish. Once the snapshots have completed, we can use `Arc::get_mut` to get an `&mut` reference to the ingredient data. This allows us to get `&mut` access without any unsafe code and guarantees that we have successfully managed to cancel the other worker threads (or gotten ourselves into a deadlock). The key initial point is that it invokes `cancel_other_workers` before proceeding: ```rust {{#include ../../../src/storage.rs:cancel_other_workers}} ``` ## The Salsa runtime The salsa runtime offers helper methods that are accessed by the ingredients. It tracks, for example, the active query stack, and contains methods for adding dependencies between queries (e.g., `report_tracked_read`) or [resolving cycles](./cycles.md). It also tracks the current revision and information about when values with low or high durability last changed. Basically, the ingredient structures store the "data at rest" -- like memoized values -- and things that are "per ingredient". The runtime stores the "active, in-progress" data, such as which queries are on the stack, and/or the dependencies accessed by the currently active query. salsa-0.23.0/book/src/plumbing/db_lifetime.md000064400000000000000000000266611046102023000171470ustar 00000000000000# The `'db` lifetime [Tracked](./tracked_structs.md) and interned structs are both declared with a `'db` lifetime. This lifetime is linked to the `db: &DB` reference used to create them. The `'db` lifetime has several implications: * It ensures that the user does not create a new salsa revision while a tracked/interned struct is in active use. Creating a new salsa revision requires modifying an input which requires an `&mut DB` reference, therefore it cannot occur during `'db`. * The struct may not even exist in the new salsa revision so allowing access would be confusing. * It permits the structs to be implemented using a pointer rather than a `salsa::Id`, which in turn means more efficient field access (no read locks required). This section discusses the unsafe code used for pointer-based access along with the reasoning behind it. To be concrete, we'll focus on tracked structs -- interned structs are very similar. ## A note on UB When we say in this page "users cannot do X", we mean without Undefined Behavior (e.g., by transmuting integers around etc). ## Proof obligations Here is a typical sequence of operations for a tracked struct along with the user operations that will require us to prove unsafe assertions: * A tracked function `f` executes in revision R0 and creates a tracked struct with `#[id]` fields `K` for the first time. * `K` will be stored in the interning hashmap and mapped to a fresh identifier `id`. * The identifier `id` will be used as the key in the `StructMap` and point to a freshly created allocation `alloc : Alloc`. * A `ts: TS<'db>` is created from the raw pointer `alloc` and returned to the user. * The value of the field `field` is accessed on the tracked struct instance `ts` by invoking the method `ts.field(db)` * *Unsafe:* This accesses the raw pointer to `alloc`.* A new revision R1 begins. * The tracked function `f` does not re-execute in R1. * The value of the field `field` is accessed on the tracked struct instance `ts` by invoking the method `ts.field(db)` * *Unsafe:* This accesses the raw pointer to `alloc`.* A new revision R2 begins. * The tracked function `f` does reexecute in R2 and it again creates a tracked struct with key `K` and with (Some) distinct field values. * The fields for `ts` are updated. * The value of the field `field` is accessed on the tracked struct instance `ts` by invoking the method `ts.field(db)` * *Unsafe:* This accesses the raw pointer to `alloc`. * A new revision R3 begins. * When `f` executes this time it does NOT create a tracked struct with key `K`. The tracked struct `ts` is placed in the "to be deleted" list. * A new revision R4 begins: * The allocation `alloc` is freed. As noted in the list, the core "unsafe" operation that users can perform is to access the fields of a tracked struct. Tracked structs store a raw pointer to the `alloc`, owned by the ingredient, that contains their field data. Accessing the fields of a tracked struct returns a `&`-reference to fields stored in that `alloc`, which means we must ensure Rust's two core constraints are satisfied for the lifetime of that reference: * The allocation `alloc` will not be freed (i.e., not be dropped) * The contents of the fields will not be mutated As the sequence above illustrates, we have to show that those two constraints are true in a variety of circumstances: * newly created tracked structs * tracked structs that were created in prior revisions and re-validated in this revision * tracked structs whose fields were updated in this revision * tracked structs that were *not* created in this revision ## Definitions For every tracked struct `ts` we say that it has a **defining query** `f(..)`. This refers to a particular invocation of the tracked function `f` with a particular set of arguments `..`. This defining query is unique within a revision, meaning that `f` executes at most once with that same set of arguments. We say that a query has *executed in a revision R* if its function body was executed. When this occurs, all tracked structs defined (created) by that query will be recorded along with the query's result. We say that a query has been *validated in a revision R* if the salsa system determined that its inputs did not change and so skipped executing it. This also triggers the tracked structs defined by that query to be considered validated (in particular, we execute a function on them which updates some internal fields, as described below). When we talk about `ts`, we mean ## Theorem: At the start of a new revision, all references to `ts` are within salsa's database After `ts` is deleted, there may be other memoized values still reference `ts`, but they must have a red input query. **Is this true even if there are user bugs like non-deterministic functions?** Argument: yes, because of non-forgery, those memoized values could not be accessed. How did those memoized values obtain the `TS<'db>` value in the first place? It must have come from a function argument (XX: what about thread-local state). Therefore, to access the value, they would have to provide those function arguments again. But how did they get them? Potential holes: * Thread-local APIs that let you thread `'db` values down in an "invisible" way, so that you can return them without them showing up in your arguments -- e.g. a tracked function `() -> S<'db>` that obtains its value from thread-local state. * We might be able to sanity check against this with enough effort by defining some traits that guarantee that every lifetime tagged thing in your result *could have* come from one of your arguments, but I don't think we can prove it altogether. We either have to tell users "don't do that" or we need to have some kind of dynamic check, e.g. with a kind of versioned pointer. Note that it does require unsafe code at present but only because of the limits of our existing APIs. * Alternatively we can do a better job cleaning up deleted stuff. This we could do. * what about weird `Eq` implementations and the like? Do we have to make those unsafe? ## Theorem: To access a tracked struct `ts` in revision R, the defining query `f(..)` must have either *executed* or been *validated* in the revision R. This is the core bit of reasoning underlying most of what follows. The idea is that users cannot "forge" a tracked struct instance `ts`. They must have gotten it through salsa's internal mechanisms. This is important because salsa will provide `&`-references to fields within that remain valid during a revision. But at the start of a new revision salsa may opt to modify those fields or even free the allocation. This is safe because users cannot have references to `ts` at the start of a new revision. ### Lemma We will prove it by proceeding through the revisions in the life cycle above (this can be considered a proof by induction). ### Before `ts` is first created in R0 Users must have originally obtained `ts: TS<'db>` by invoking `TS::new(&db, ...)`. This is because creating an instance of `TS` requires providing a `NonNull` pointer to an unsafe function whose contract requires the pointer's validity. **FIXME:** This is not strictly true, I think the constructor is just a private tuple ctor, we should fix that. ### During R0 ### ### Inductive case: Consider some revision R We start by showing some circumstances that cannot occur: * accessing the field of a tracked struct `ts` that was never created * accessing the field of a tracked struct `ts` after it is freed ### Lemma (no forgery): Users cannot forge a tracked struct The first observation is that users cannot "forge" an instance of a tracked struct `ts`. They are required to produce a pointer to an `Alloc`. This implies that every tracked struct `ts` originated in the ingredient. The same is not true for input structs, for example, because they are created from integer identifiers and users could just make those up. ### Lemma (within one rev): Users cannot hold a tracked struct `ts` across revisions The lifetime `'db` of the tracked struct `ts: TS<'db>` is created from a `db: &'db dyn Db` handle. Beginning a new revision requires an `&mut` reference. Therefore so long as users are actively using the value `ts` the database cannot start a new revision. *Check:* What if users had two databases and invoked internal methods? Maybe they could then. We may have to add some assertions. ### Theorem: In order to get a tracked struct `ts` in revision R0, the tracked fn `f` that creates it must either *execute* or *be validated* first The two points above combine to ## Creating new values Each new value is stored in a `salsa::alloc::Alloc` created by `StructMap::insert`. `Alloc` is a variant of the standard Rust `Box` that carries no uniqueness implications. This means that every tracked struct has its own allocation. This allocation is owned by the tracked struct ingredient and thus stays live until the tracked struct ingredient is dropped or until it is removed (see later for safety conditions around removal). ## The user type uses a raw pointer The `#[salsa::tracked]` macro creates a user-exposed struct that looks roughly like this: ```rust // This struct is a wrapper around the actual fields that adds // some revision metadata. You can think of it as a newtype'd // version of the fields of the tracked struct. use salsa::tracked_struct::ValueStruct; struct MyTrackedStruct<'db> { value: *const ValueStruct<..>, phantom: PhantomData<&'db ValueStruct<...>> } ``` Key observations: * The actual pointer to the `ValueStruct` used at runtime is not a Rust reference but a raw pointer. This is needed for stacked borrows. * A `PhantomData` is used to keep the `'db` lifetime alive. The reason we use a raw pointer in the struct is because instances of this struct will outlive the `'db` lifetime. Consider this example: ```rust let mut db = MyDatabase::default(); let input = MyInput::new(&db, ...); // Revision 1: let result1 = tracked_fn(&db, input); // Revision 2: input.set_field(&mut db).to(...); let result2 = tracked_fn(&db, input); ``` Tracked structs created by `tracked_fn` during Revision 1 may be reused during Revision 2, but the original `&db` reference used to create them has expired. If we stored a true Rust reference, that would be a violation of the stacked borrows rules. Instead, we store a raw pointer and, whenever users invoke the accessor methods for particular fields, we create a new reference to the contents: ```rust impl<'db> MyTrackedStruct<'db> { fn field(self, db: &'db dyn DB) -> &'db FieldType { ... } } ``` This reference is linked to `db` and remains valid so long as the ## The `'db` lifetime at rest ## Updating tracked struct fields across revisions ### The `XX` ## Safety lemmas These lemmas are used to justify the safety of the system. ### Using `MyTracked<'db>` within some revision R always "happens after' a call to `MyTracked::new` Whenever a tracked struct instance `TS<'db>` is created for the first time in revision R1, the result is a fresh allocation and hence there cannot be any pre-existing aliases of that struct. `TS<'db>` will at that time be stored into the salsa database. In later revisions, we assert that ### `&'db T` references are never stored in the database We maintain the invariant that, in any later revision R2, However in some later revision R2, how ## Ways this could go wrong and how we prevent them ### ### Storing an `&'db T` into a field ### Freeing the memory while a tracked struct remains live ### Aliases of a tracked struct salsa-0.23.0/book/src/plumbing/derived_flowchart.md000064400000000000000000000007651046102023000203740ustar 00000000000000# Derived queries flowchart Derived queries are by far the most complex. This flowchart documents the flow of the [maybe changed after] and [fetch] operations. This flowchart can be edited on [draw.io]: [draw.io]: https://draw.io [fetch]: ./fetch.md [maybe changed after]: ./maybe_changed_after.md
![Flowchart](../derived-query-read.drawio.svg)
salsa-0.23.0/book/src/plumbing/diagram.md000064400000000000000000000065161046102023000163050ustar 00000000000000# Diagram This diagram shows the items that get generated from the Hello World query group and database struct. You can click on each item to be taken to the explanation of its purpose. The diagram is wide so be sure to scroll over! ```mermaid graph LR classDef diagramNode text-align:left; subgraph query group HelloWorldTrait["trait HelloWorld: Database + HasQueryGroup(HelloWorldStorage)"] HelloWorldImpl["impl<DB> HelloWorld for DB
where DB: HasQueryGroup(HelloWorldStorage)"] click HelloWorldImpl "http:query_groups.html#impl-of-the-hello-world-trait" "more info" HelloWorldStorage["struct HelloWorldStorage"] click HelloWorldStorage "http:query_groups.html#the-group-struct-and-querygroup-trait" "more info" QueryGroupImpl["impl QueryGroup for HelloWorldStorage
  type DynDb = dyn HelloWorld
  type Storage = HelloWorldGroupStorage__;"] click QueryGroupImpl "http:query_groups.html#the-group-struct-and-querygroup-trait" "more info" HelloWorldGroupStorage["struct HelloWorldGroupStorage__"] click HelloWorldGroupStorage "http:query_groups.html#group-storage" "more info" subgraph for each query... LengthQuery[struct LengthQuery] LengthQueryImpl["impl Query for LengthQuery
  type Key = ()
  type Value = usize
  type Storage = salsa::DerivedStorage(Self)
  type QueryGroup = HelloWorldStorage"] LengthQueryFunctionImpl["impl QueryFunction for LengthQuery
  fn execute(db: &dyn HelloWorld, key: ()) -> usize"] click LengthQuery "http:query_groups.html#for-each-query-a-query-struct" "more info" click LengthQueryImpl "http:query_groups.html#for-each-query-a-query-struct" "more info" click LengthQueryFunctionImpl "http:query_groups.html#for-each-query-a-query-struct" "more info" end class HelloWorldTrait,HelloWorldImpl,HelloWorldStorage,QueryGroupImpl,HelloWorldGroupStorage diagramNode; class LengthQuery,LengthQueryImpl,LengthQueryFunctionImpl diagramNode; end subgraph database DatabaseStruct["struct Database { .. storage: Storage(Self) .. }"] subgraph for each group... HasQueryGroup["impl plumbing::HasQueryGroup(HelloWorldStorage) for DatabaseStruct"] click HasQueryGroup "http:database.html#the-hasquerygroup-impl" "more info" end DatabaseStorageTypes["impl plumbing::DatabaseStorageTypes for DatabaseStruct
  type DatabaseStorage = __SalsaDatabaseStorage"] click DatabaseStorageTypes "http:database.html#the-databasestoragetypes-impl" "more info" DatabaseStorage["struct __SalsaDatabaseStorage"] click DatabaseStorage "http:database.html#the-database-storage-struct" "more info" DatabaseOps["impl plumbing::DatabaseOps for DatabaseStruct"] click DatabaseOps "http:database.html#the-databaseops-impl" "more info" class DatabaseStruct,DatabaseStorage,DatabaseStorageTypes,DatabaseOps,HasQueryGroup diagramNode; end subgraph salsa crate DerivedStorage["DerivedStorage"] class DerivedStorage diagramNode; end LengthQueryImpl --> DerivedStorage; DatabaseStruct -- "used by" --> HelloWorldImpl HasQueryGroup -- "used by" --> HelloWorldImpl ```salsa-0.23.0/book/src/plumbing/fetch.md000064400000000000000000000052661046102023000157730ustar 00000000000000# Fetch The `fetch` operation computes the value of a query. It prefers to reuse memoized values when it can. ## Input queries Input queries simply load the result from the table. ## Interned queries Interned queries map the input into a hashmap to find an existing integer. If none is present, a new value is created. ## Derived queries The logic for derived queries is more complex. We summarize the high-level ideas here, but you may find the [flowchart](./derived_flowchart.md) useful to dig deeper. The [terminology](./terminology.md) section may also be useful; in some cases, we link to that section on the first usage of a word. * If an existing [memo] is found, then we check if the memo was [verified] in the current [revision]. If so, we can directly return the memoized value. * Otherwise, if the memo contains a memoized value, we must check whether [dependencies] have been modified: * Let R be the revision in which the memo was last verified; we wish to know if any of the dependencies have changed since revision R. * First, we check the [durability]. For each memo, we track the minimum durability of the memo's dependencies. If the memo has durability D, and there have been no changes to an input with durability D since the last time the memo was verified, then we can consider the memo verified without any further work. * If the durability check is not sufficient, then we must check the dependencies individually. For this, we iterate over each dependency D and invoke the [maybe changed after](./maybe_changed_after.md) operation to check whether D has changed since the revision R. * If no dependency was modified: * We can mark the memo as verified and return its memoized value. * Assuming dependencies have been modified or the memo does not contain a memoized value: * Then we execute the user's query function. * Next, we compute the revision in which the memoized value last changed: * *Backdate:* If there was a previous memoized value, and the new value is equal to that old value, then we can *backdate* the memo, which means to use the 'changed at' revision from before. * Thanks to backdating, it is possible for a dependency of the query to have changed in some revision R1 but for the *output* of the query to have changed in some revision R2 where R2 predates R1. * Otherwise, we use the current revision. * Construct a memo for the new value and return it. [durability]: ./terminology/durability.md [backdate]: ./terminology/backdate.md [dependency]: ./terminology/dependency.md [dependencies]: ./terminology/dependency.md [memo]: ./terminology/memo.md [revision]: ./terminology/revision.md [verified]: ./terminology/verified.mdsalsa-0.23.0/book/src/plumbing/generated_code.md000064400000000000000000000015661046102023000176310ustar 00000000000000# Generated code This page walks through the ["Hello, World!"] example and explains the code that it generates. Please take it with a grain of salt: while we make an effort to keep this documentation up to date, this sort of thing can fall out of date easily. See the page history below for major updates. ["Hello, World!"]: https://github.com/salsa-rs/salsa/blob/master/examples/hello_world/main.rs If you'd like to see for yourself, you can set the environment variable `SALSA_DUMP` to 1 while the procedural macro runs, and it will dump the full output to stdout. I recommend piping the output through rustfmt. ## Sources The main parts of the source that we are focused on are as follows. ### Query group ```rust,ignore {{#include ../../../examples/hello_world/main.rs:trait}} ``` ### Database ```rust,ignore {{#include ../../../examples/hello_world/main.rs:database}} ``` salsa-0.23.0/book/src/plumbing/maybe_changed_after.md000064400000000000000000000051311046102023000206200ustar 00000000000000# Maybe changed after The `maybe_changed_after` operation computes whether a query's value *may have changed* **after** the given revision. In other words, `Q.maybe_change_since(R)` is true if the value of the query `Q` may have changed in the revisions `(R+1)..R_now`, where `R_now` is the current revision. Note that it doesn't make sense to ask `maybe_changed_after(R_now)`. ## Input queries Input queries are set explicitly by the user. `maybe_changed_after` can therefore just check when the value was last set and compare. ## Interned queries ## Derived queries The logic for derived queries is more complex. We summarize the high-level ideas here, but you may find the [flowchart](./derived_flowchart.md) useful to dig deeper. The [terminology](./terminology.md) section may also be useful; in some cases, we link to that section on the first usage of a word. * If an existing [memo] is found, then we check if the memo was [verified] in the current [revision]. If so, we can compare its [changed at] revision and return true or false appropriately. * Otherwise, we must check whether [dependencies] have been modified: * Let R be the revision in which the memo was last verified; we wish to know if any of the dependencies have changed since revision R. * First, we check the [durability]. For each memo, we track the minimum durability of the memo's dependencies. If the memo has durability D, and there have been no changes to an input with durability D since the last time the memo was verified, then we can consider the memo verified without any further work. * If the durability check is not sufficient, then we must check the dependencies individually. For this, we iterate over each dependency D and invoke the [maybe changed after](./maybe_changed_after.md) operation to check whether D has changed since the revision R. * If no dependency was modified: * We can mark the memo as verified and use its [changed at] revision to return true or false. * Assuming dependencies have been modified: * Then we execute the user's query function (same as in [fetch]), which potentially [backdates] the resulting value. * Compare the [changed at] revision in the resulting memo and return true or false. [changed at]: ./terminology/changed_at.md [durability]: ./terminology/durability.md [backdate]: ./terminology/backdate.md [backdates]: ./terminology/backdate.md [dependency]: ./terminology/dependency.md [dependencies]: ./terminology/dependency.md [memo]: ./terminology/memo.md [revision]: ./terminology/revision.md [verified]: ./terminology/verified.md [fetch]: ./fetch.md [LRU]: ./terminology/LRU.mdsalsa-0.23.0/book/src/plumbing/query_groups.md000064400000000000000000000162021046102023000174360ustar 00000000000000# Query groups and query group structs When you define a query group trait: ```rust,ignore {{#include ../../../examples/hello_world/main.rs:trait}} ``` the `salsa::query_group` macro generates a number of things, shown in the sample generated code below (details in the sections to come). and associated storage struct) that represent things which don't have "public" Note that there are a number of structs and types (e.g., the group descriptor names. We currently generate mangled names with `__` afterwards, but those names are not meant to be exposed to the user (ideally we'd use hygiene to enforce this). ```rust,ignore // First, a copy of the trait, though with extra supertraits and // sometimes with some extra methods (e.g., `set_input_string`) trait HelloWorld: salsa::Database + salsa::plumbing::HasQueryGroup { fn input_string(&self, key: ()) -> Arc; fn set_input_string(&mut self, key: (), value: Arc); fn length(&self, key: ()) -> usize; } // Next, the "query group struct", whose name was given by the // user. This struct implements the `QueryGroup` trait which // defines a few associated types common to the entire group. struct HelloWorldStorage { } impl salsa::plumbing::QueryGroup for HelloWorldStorage { type DynDb = dyn HelloWorld; type GroupStorage = HelloWorldGroupStorage__; } // Next, a blanket impl of the `HelloWorld` trait. This impl // works for any database `DB` that implements the // appropriate `HasQueryGroup`. impl HelloWorld for DB where DB: salsa::Database, DB: salsa::plumbing::HasQueryGroup, { ... } // Next, for each query, a "query struct" that represents it. // The query struct has inherent methods like `in_db` and // implements the `Query` trait, which defines various // details about the query (e.g., its key, value, etc). pub struct InputQuery { } impl InputQuery { /* definition for `in_db`, etc */ } impl salsa::Query for InputQuery { /* associated types */ } // Same as above, but for the derived query `length`. // For derived queries, we also implement `QueryFunction` // which defines how to execute the query. pub struct LengthQuery { } impl salsa::Query for LengthQuery { ... } impl salsa::QueryFunction for LengthQuery { ... } // Finally, the group storage, which contains the actual // hashmaps and other data used to implement the queries. struct HelloWorldGroupStorage__ { .. } ``` ## The group struct and `QueryGroup` trait The group struct is the only thing we generate whose name is known to the user. For a query group named `Foo`, it is conventionally called `FooStorage`, hence the name `HelloWorldStorage` in our example. Despite the name "Storage", the struct itself has no fields. It exists only to implement the `QueryGroup` trait. This *trait* has a number of associated types that reference various bits of the query group, including the actual "group storage" struct: ```rust,ignore struct HelloWorldStorage { } impl salsa::plumbing::QueryGroup for HelloWorldStorage { type DynDb = dyn HelloWorld; type GroupStorage = HelloWorldGroupStorage__; // generated struct } ``` We'll go into detail on these types below and the role they play, but one that we didn't mention yet is `GroupData`. That is a kind of hack used to manage send/sync around slots, and it gets covered in the section on slots. ## Impl of the hello world trait Ultimately, every salsa query group is going to be implemented by your final database type, which is not currently known to us (it is created by combining multiple salsa query groups). In fact, this salsa query group could be composed into multiple database types. However, we want to generate the impl of the query-group trait here in this crate, because this is the point where the trait definition is visible and known to us (otherwise, we'd have to duplicate the method definitions). So what we do is that we define a different trait, called `plumbing::HasQueryGroup`, that can be implemented by the database type. `HasQueryGroup` is generic over the query group struct. So then we can provide an impl of `HelloWorld` for any database type `DB` where `DB: HasQueryGroup`. This `HasQueryGroup` defines a few methods that, given a `DB`, give access to the data for the query group and a few other things. Thus we can generate an impl that looks like: ```rust,ignore impl HelloWorld for DB where DB: salsa::Database, DB: salsa::plumbing::HasQueryGroup { ... fn length(&self, key: ()) -> Arc { >::get_query_table(self).get(()) } } ``` You can see that the various methods just hook into generic functions in the `salsa::plumbing` module. These functions are generic over the query types (`HelloWorldLength__`) that will be described shortly. The details of the "query table" are covered in a future section, but in short this code pulls out the hasmap for storing the `length` results and invokes the generic salsa logic to check for a valid result, etc. ## For each query, a query struct As we referenced in the previous section, each query in the trait gets a struct that represents it. This struct is named after the query, converted into snake case and with the word `Query` appended. In typical Salsa workflows, these structs are not meant to be named or used, but in some cases it may be required. For e.g. the `length` query, this structs might look something like: ```rust,ignore struct LengthQuery { } ``` The struct also implements the `plumbing::Query` trait, which defines a bunch of metadata about the query (and repeats, for convenience, some of the data about the group that the query is in): ```rust,ignore {{#include ../../../components/salsa-macros/src/query_group.rs:Query_impl}} ``` Depending on the kind of query, we may also generate other impls, such as an impl of `salsa::plumbing::QueryFunction`, which defines the methods for executing the body of a query. This impl would then include a call to the user's actual function. ```rust,ignore {{#include ../../../components/salsa-macros/src/query_group.rs:QueryFunction_impl}} ``` ## Group storage The "group storage" is the actual struct that contains all the hashtables and so forth for each query. The types of these are ultimately defined by the `Storage` associated type for each query type. The struct is generic over the final database type: ```rust,ignore struct HelloWorldGroupStorage__ { input: ::Storage, } ``` We also generate some inherent methods. First, a `new` method that takes the group index as a parameter and passes it along to each of the query storage `new` methods: ```rust,ignore {{#include ../../../components/salsa-macros/src/query_group.rs:group_storage_new}} ``` And then various methods that will dispatch from a `DatabaseKeyIndex` that corresponds to this query group into the appropriate query within the group. Each has a similar structure of matching on the query index and then delegating to some method defined by the query storage: ```rust,ignore {{#include ../../../components/salsa-macros/src/query_group.rs:group_storage_methods}} ```salsa-0.23.0/book/src/plumbing/query_ops.md000064400000000000000000000005511046102023000167200ustar 00000000000000# Query operations The most important basic operations that all queries support are: * [maybe changed after](./maybe_changed_after.md): Returns true if the value of the query (for the given key) may have changed since the given revision. * [Fetch](./fetch.md): Returns the up-to-date value for the given K (or an error in the case of an "unrecovered" cycle). salsa-0.23.0/book/src/plumbing/salsa_crate.md000064400000000000000000000051151046102023000171540ustar 00000000000000# Runtime This section documents the contents of the salsa crate. The salsa crate contains code that interacts with the [generated code] to create the complete "salsa experience". [generated code]: ./generated_code.md ## Major types The crate has a few major types. ### The [`salsa::Storage`] struct The [`salsa::Storage`] struct is what users embed into their database. It consists of two main parts: * The "query store", which is the [generated storage struct](./database.md#the-database-storage-struct). * The [`salsa::Runtime`]. ### The [`salsa::Runtime`] struct The [`salsa::Runtime`] struct stores the data that is used to track which queries are being executed and to coordinate between them. The `Runtime` is embedded within the [`salsa::Storage`] struct. **Important**. The `Runtime` does **not** store the actual data from the queries; they live alongside it in the [`salsa::Storage`] struct. This ensures that the type of `Runtime` is not generic which is needed to ensure dyn safety. #### Threading There is one [`salsa::Runtime`] for each active thread, and each of them has a unique [`RuntimeId`]. The `Runtime` state itself is divided into; * `SharedState`, accessible from all runtimes; * `LocalState`, accessible only from this runtime. [`salsa::Runtime`]: https://docs.rs/salsa/latest/salsa/struct.Runtime.html [`salsa::Storage`]: https://docs.rs/salsa/latest/salsa/struct.Storage.html [`RuntimeId`]: https://docs.rs/salsa/0.16.1/salsa/struct.RuntimeId.html ### Query storage implementations and support code For each kind of query (input, derived, interned, etc) there is a corresponding "storage struct" that contains the code to implement it. For example, derived queries are implemented by the `DerivedStorage` struct found in the [`salsa::derived`] module. [`salsa::derived`]: https://github.com/salsa-rs/salsa/blob/master/src/derived.rs Storage structs like `DerivedStorage` are generic over a query type `Q`, which corresponds to the [query structs] in the generated code. The query structs implement the `Query` trait which gives basic info such as the key and value type of the query and its ability to recover from cycles. In some cases, the `Q` type is expected to implement additional traits: derived queries, for example, implement `QueryFunction`, which defines the code that will execute when the query is called. [query structs]: ./query_groups.md#for-each-query-a-query-struct The storage structs, in turn, implement key traits from the plumbing module. The most notable is the `QueryStorageOps`, which defines the [basic operations that can be done on a query](./query_ops.md). salsa-0.23.0/book/src/plumbing/terminology/LRU.md000064400000000000000000000010461046102023000177040ustar 00000000000000# LRU The [`set_lru_capacity`](https://docs.rs/salsa/0.16.1/salsa/struct.QueryTableMut.html#method.set_lru_capacity) method can be used to fix the maximum capacity for a query at a specific number of values. If more values are added after that point, then salsa will drop the values from older [memos] to conserve memory (we always retain the [dependency] information for those memos, however, so that we can still compute whether values may have changed, even if we don't know what that value is). [memos]: ./memo.md [dependency]: ./dependency.md salsa-0.23.0/book/src/plumbing/terminology/backdate.md000064400000000000000000000006211046102023000207760ustar 00000000000000# Backdate *Backdating* is when we mark a value that was computed in revision R as having last changed in some earlier revision. This is done when we have an older [memo] M and we can compare the two values to see that, while the [dependencies] to M may have changed, the result of the [query function] did not. [memo]: ./memo.md [dependencies]: ./dependency.md [query function]: ./query_function.mdsalsa-0.23.0/book/src/plumbing/terminology/changed_at.md000064400000000000000000000005711046102023000213210ustar 00000000000000# Changed at The *changed at* revision for a [memo] is the [revision] in which that memo's value last changed. Typically, this is the same as the revision in which the [query function] was last executed, but it may be an earlier revision if the memo was [backdated]. [query function]: ./query_function.md [backdated]: ./backdate.md [revision]: ./revision.md [memo]: ./memo.mdsalsa-0.23.0/book/src/plumbing/terminology/dependency.md000064400000000000000000000003421046102023000213560ustar 00000000000000# Dependency A *dependency* of a [query] Q is some other query Q1 that was invoked as part of computing the value for Q (typically, invoking by Q's [query function]). [query]: ./query.md [query function]: ./query_function.mdsalsa-0.23.0/book/src/plumbing/terminology/derived_query.md000064400000000000000000000006261046102023000221140ustar 00000000000000# Derived query A *derived query* is a [query] whose value is defined by the result of a user-provided [query function]. That function is executed to get the result of the query. Unlike [input queries], the result of a derived queries can always be recomputed whenever needed simply by re-executing the function. [query]: ./query.md [query function]: ./query_function.md [input queries]: ./input_query.mdsalsa-0.23.0/book/src/plumbing/terminology/durability.md000064400000000000000000000002571046102023000214150ustar 00000000000000# Durability *Durability* is an optimization that we use to avoid checking the [dependencies] of a [query] individually. [dependencies]: ./dependency.md [query]: ./query.md salsa-0.23.0/book/src/plumbing/terminology/ingredient.md000064400000000000000000000001561046102023000213730ustar 00000000000000# Ingredient An *ingredient* is an individual piece of storage used to create a [salsa item](./salsa_item.md)salsa-0.23.0/book/src/plumbing/terminology/input_query.md000064400000000000000000000003071046102023000216250ustar 00000000000000# Input query An *input query* is a [query] whose value is explicitly set by the user. When that value is set, a [durability] can also be provided. [query]: ./query.md [durability]: ./durability.mdsalsa-0.23.0/book/src/plumbing/terminology/memo.md000064400000000000000000000021741046102023000202020ustar 00000000000000# Memo A *memo* stores information about the last time that a [query function] for some [query] Q was executed: * Typically, it contains the value that was returned from that function, so that we don't have to execute it again. * However, this is not always true: some queries don't cache their result values, and values can also be dropped as a result of [LRU] collection. In those cases, the memo just stores [dependency] information, which can still be useful to determine if other queries that have Q as a [dependency] may have changed. * The revision in which the memo last [verified]. * The [changed at] revision in which the memo's value last changed. (Note that it may be [backdated].) * The minimum durability of the memo's [dependencies]. * The complete set of [dependencies], if available, or a marker that the memo has an [untracked dependency]. [revision]: ./revision.md [backdated]: ./backdate.md [dependencies]: ./dependency.md [dependency]: ./dependency.md [untracked dependency]: ./untracked.md [verified]: ./verified.md [query]: ./query.md [query function]: ./query_function.md [changed at]: ./changed_at.md [LRU]: ./LRU.mdsalsa-0.23.0/book/src/plumbing/terminology/query.md000064400000000000000000000000101046102023000203750ustar 00000000000000# Query salsa-0.23.0/book/src/plumbing/terminology/query_function.md000064400000000000000000000011351046102023000223130ustar 00000000000000# Query function The *query function* is the user-provided function that we execute to compute the value of a [derived query]. Salsa assumed that all query functions are a 'pure' function of their [dependencies] unless the user reports an [untracked read]. Salsa always assumes that functions have no important side-effects (i.e., that they don't send messages over the network whose results you wish to observe) and thus that it doesn't have to re-execute functions unless it needs their return value. [derived query]: ./derived_query.md [dependencies]: ./dependency.md [untracked read]: ./untracked.mdsalsa-0.23.0/book/src/plumbing/terminology/revision.md000064400000000000000000000003421046102023000210760ustar 00000000000000# Revision A *revision* is a monotonically increasing integer that we use to track the "version" of the database. Each time the value of an [input query] is modified, we create a new revision. [input query]: ./input_query.mdsalsa-0.23.0/book/src/plumbing/terminology/salsa_item.md000064400000000000000000000001721046102023000213620ustar 00000000000000# Salsa item A salsa item is something that is decorated with a `#[salsa::foo]` macro, like a tracked function or struct.salsa-0.23.0/book/src/plumbing/terminology/salsa_struct.md000064400000000000000000000003231046102023000217460ustar 00000000000000# Salsa struct A salsa struct is a struct decorated with one of the salsa macros: * `#[salsa::tracked]` * `#[salsa::input]` * `#[salsa::interned]` See the [salsa overview](../../overview.md) for more details.salsa-0.23.0/book/src/plumbing/terminology/untracked.md000064400000000000000000000013341046102023000212220ustar 00000000000000# Untracked dependency An *untracked dependency* is an indication that the result of a [derived query] depends on something not visible to the salsa database. Untracked dependencies are created by invoking [`report_untracked_read`](https://docs.rs/salsa/0.16.1/salsa/struct.Runtime.html#method.report_untracked_read) or [`report_synthetic_read`](https://docs.rs/salsa/0.16.1/salsa/struct.Runtime.html#method.report_synthetic_read). When an untracked dependency is present, [derived queries] are always re-executed if the durability check fails (see the description of the [fetch operation] for more details). [derived query]: ./derived_query.md [derived queries]: ./derived_query.md [fetch operation]: ../fetch.md#derived-queries salsa-0.23.0/book/src/plumbing/terminology/verified.md000064400000000000000000000007731046102023000210450ustar 00000000000000# Verified A [memo] is *verified* in a revision R if we have checked that its value is still up-to-date (i.e., if we were to reexecute the [query function], we are guaranteed to get the same result). Each memo tracks the revision in which it was last verified to avoid repeatedly checking whether dependencies have changed during the [fetch] and [maybe changed after] operations. [query function]: ./query_function.md [fetch]: ../fetch.md [maybe changed after]: ../maybe_changed_after.md [memo]: ./memo.mdsalsa-0.23.0/book/src/plumbing/terminology.md000064400000000000000000000000161046102023000172360ustar 00000000000000# Terminology salsa-0.23.0/book/src/plumbing/tracked_structs.md000064400000000000000000000042701046102023000201000ustar 00000000000000# Tracked structs Tracked structs are stored in a special way to reduce their costs. Tracked structs are created via a `new` operation. ## The tracked struct and tracked field ingredients For a single tracked struct we create multiple ingredients. The **tracked struct ingredient** is the ingredient created first. It offers methods to create new instances of the struct and therefore has unique access to the interner and hashtables used to create the struct id. It also shares access to a hashtable that stores the `ValueStruct` that contains the field data. For each field, we create a **tracked field ingredient** that moderates access to a particular field. All of these ingredients use that same shared hashtable to access the `ValueStruct` instance for a given id. The `ValueStruct` contains both the field values but also the revisions when they last changed value. ## Each tracked struct has a globally unique id This will begin by creating a _globally unique, 32-bit id_ for the tracked struct. It is created by interning a combination of - the currently executing query; - a u64 hash of the `#[id]` fields; - a _disambiguator_ that makes this hash unique within the current query. i.e., when a query starts executing, it creates an empty map, and the first time a tracked struct with a given hash is created, it gets disambiguator 0. The next one will be given 1, etc. ## Each tracked struct has a `ValueStruct` storing its data The struct and field ingredients share access to a hashmap that maps each field id to a value struct: ```rust,ignore {{#include ../../../src/tracked_struct.rs:ValueStruct}} ``` The value struct stores the values of the fields but also the revisions when that field last changed. Each time the struct is recreated in a new revision, the old and new values for its fields are compared and a new revision is created. ## The macro generates the tracked struct `Configuration` The "configuration" for a tracked struct defines not only the types of the fields, but also various important operations such as extracting the hashable id fields and updating the "revisions" to track when a field last changed: ```rust,ignore {{#include ../../../src/tracked_struct.rs:Configuration}} ``` salsa-0.23.0/book/src/plumbing.md000064400000000000000000000022401046102023000146670ustar 00000000000000# Plumbing This chapter documents the code that salsa generates and its "inner workings". We refer to this as the "plumbing". ## Overview The plumbing section is broken up into chapters: - The [database and runtime](./plumbing/database_and_runtime.md) covers the data structures that are used at runtime to coordinate workers, trigger cancellation, track which functions are active and what dependencies they have accrued, and so forth. - The [query operations](./plumbing/query_ops.md) chapter describes how the major operations on function ingredients work. This text was written for an older version of salsa but the logic is the same: - The [maybe changed after](./plumbing/maybe_changed_after.md) operation determines when a memoized value for a tracked function is out of date. - The [fetch](./plumbing/fetch.md) operation computes the most recent value. - The [derived queries flowchart](./plumbing/derived_flowchart.md) depicts the logic in flowchart form. - The [cycle handling](./plumbing/cycles.md) handling chapter describes what happens when cycles occur. - The [terminology](./plumbing/terminology.md) section describes various words that appear throughout. salsa-0.23.0/book/src/reference/algorithm.md000064400000000000000000000073121046102023000170030ustar 00000000000000# The "red-green" algorithm This page explains the basic Salsa incremental algorithm. The algorithm is called the "red-green" algorithm, which is where the name Salsa comes from. ### Database revisions The Salsa database always tracks a single **revision**. Each time you set an input, the revision is incremented. So we start in revision `R1`, but when a `set` method is called, we will go to `R2`, then `R3`, and so on. For each input, we also track the revision in which it was last changed. ### Basic rule: when inputs change, re-execute! When you invoke a tracked function, in addition to storing the value that was returned, we also track what _other_ tracked functions it depends on, and the revisions when their value last changed. When you invoke the function again, if the database is in a new revision, then we check whether any of the inputs to this function have changed in that new revision. If not, we can just return our cached value. But if the inputs _have_ changed (or may have changed), we will re-execute the function to find the most up-to-date answer. Here is a simple example, where the `parse_module` function invokes the `module_text` function: ```rust #[salsa::tracked] fn parse_module(db: &dyn Db, module: Module) -> Ast { let module_text: &String = module_text(db, module); Ast::parse_text(module_text) } #[salsa::tracked(returns(ref))] fn module_text(db: &dyn Db, module: Module) -> String { panic!("text for module `{module:?}` not set") } ``` If we invoke `parse_module` twice, but change the module text in between, then we will have to re-execute `parse_module`: ```rust module_text::set( db, module, "fn foo() { }".to_string(), ); parse_module(db, module); // executes // ...some time later... module_text::set( db, module, "fn foo() { /* add a comment */ }".to_string(), ); parse_module(db, module); // executes again! ``` ### Backdating: sometimes we can be smarter Often, though, tracked functions don't depend directly on the inputs. Instead, they'll depend on some other tracked function. For example, perhaps we have a `type_check` function that reads the AST: ```rust #[salsa::tracked] fn type_check(db: &dyn Db, module: Module) { let ast = parse_module(db, module); ... } ``` If the module text is changed, we saw that we have to re-execute `parse_module`, but there are many changes to the source text that still produce the same AST. For example, suppose we simply add a comment? In that case, if `type_check` is called again, we will: - First re-execute `parse_module`, since its input changed. - We will then compare the resulting AST. If it's the same as last time, we can _backdate_ the result, meaning that we say that, even though the inputs changed, the output didn't. ## Durability: an optimization As an optimization, Salsa includes the concept of **durability**, which is the notion of how often some piece of tracked data changes. For example, when compiling a Rust program, you might mark the inputs from crates.io as _high durability_ inputs, since they are unlikely to change. The current workspace could be marked as _low durability_, since changes to it are happening all the time. When you set the value of a tracked function, you can also set it with a given _durability_: ```rust module_text::set_with_durability( db, module, "fn foo() { }".to_string(), salsa::Durability::HIGH ); ``` For each durability, we track the revision in which _some input_ with that durability changed. If a tracked function depends (transitively) only on high durability inputs, and you change a low durability input, then we can very easily determine that the tracked function result is still valid, avoiding the need to traverse the input edges one by one. salsa-0.23.0/book/src/reference/durability.md000064400000000000000000000012121046102023000171560ustar 00000000000000# Durability "Durability" is an optimization that can greatly improve the performance of your salsa programs. Durability specifies the probability that an input's value will change. The default is "low durability". But when you set the value of an input, you can manually specify a higher durability, typically `Durability::HIGH`. Salsa tracks when tracked functions only consume values of high durability and, if no high durability input has changed, it can skip traversing their dependencies. Typically "high durability" values are things like data read from the standard library or other inputs that aren't actively being edited by the end user.salsa-0.23.0/book/src/reference.md000064400000000000000000000000141046102023000150050ustar 00000000000000# Reference salsa-0.23.0/book/src/tuning.md000064400000000000000000000026731046102023000143700ustar 00000000000000# Tuning Salsa ## LRU Cache You can specify an LRU cache size for any non-input query: ```rs let lru_capacity: usize = 128; base_db::ParseQuery.in_db_mut(self).set_lru_capacity(lru_capacity); ``` The default is `0`, which disables LRU-caching entirely. Note that there is no garbage collection for keys and results of old queries, so LRU caches are currently the only knob available for avoiding unbounded memory usage for long-running apps built on Salsa. ## Intern Queries Intern queries can make key lookup cheaper, save memory, and avoid the need for [`Arc`](https://doc.rust-lang.org/std/sync/struct.Arc.html). Interning is especially useful for queries that involve nested, tree-like data structures. See: - The [`compiler` example](https://github.com/salsa-rs/salsa/blob/master/examples/compiler/main.rs), which uses interning. ## Cancellation Queries that are no longer needed due to concurrent writes or changes in dependencies are cancelled by Salsa. Each access of an intermediate query is a potential cancellation point. Cancellation is implemented via panicking, and Salsa internals are intended to be panic-safe. If you have a query that contains a long loop which does not execute any intermediate queries, salsa won't be able to cancel it automatically. You may wish to check for cancellation yourself by invoking `db.unwind_if_cancelled()`. For more details on cancellation, see the tests for cancellation behavior in the Salsa repo. salsa-0.23.0/book/src/tutorial/accumulators.md000064400000000000000000000032111046102023000174160ustar 00000000000000# Defining the parser: reporting errors The last interesting case in the parser is how to handle a parse error. Because Salsa functions are memoized and may not execute, they should not have side-effects, so we don't just want to call `eprintln!`. If we did so, the error would only be reported the first time the function was called, but not on subsequent calls in the situation where the simply returns its memoized value. Salsa defines a mechanism for managing this called an **accumulator**. In our case, we define an accumulator struct called `Diagnostics` in the `ir` module: ```rust {{#include ../../../examples/calc/ir.rs:diagnostic}} ``` Accumulator structs are always newtype structs with a single field, in this case of type `Diagnostic`. Memoized functions can _push_ `Diagnostic` values onto the accumulator. Later, you can invoke a method to find all the values that were pushed by the memoized functions or any functions that they called (e.g., we could get the set of `Diagnostic` values produced by the `parse_statements` function). The `Parser::report_error` method contains an example of pushing a diagnostic: ```rust {{#include ../../../examples/calc/parser.rs:report_error}} ``` To get the set of diagnostics produced by `parse_errors`, or any other memoized function, we invoke the associated `accumulated` function: ```rust let accumulated: Vec = parse_statements::accumulated::(db); // ----------- // Use turbofish to specify // the diagnostics type. ``` `accumulated` takes the database `db` as argument and returns a `Vec`. salsa-0.23.0/book/src/tutorial/checker.md000064400000000000000000000000271046102023000163220ustar 00000000000000# Defining the checker salsa-0.23.0/book/src/tutorial/db.md000064400000000000000000000020761046102023000153110ustar 00000000000000# Defining the database struct First, we need to create the **database struct**. Typically it is only used by the "driver" of your application; the one which starts up the program, supplies the inputs, and relays the outputs. In `calc`, the database struct is in the [`db`] module, and it looks like this: [`db`]: https://github.com/salsa-rs/salsa/blob/master/examples/calc/db.rs ```rust {{#include ../../../examples/calc/db.rs:db_struct}} ``` The `#[salsa::db]` attribute marks the struct as a database. It must have a field named `storage` whose type is `salsa::Storage`, but it can also contain whatever other fields you want. ## Implementing the `salsa::Database` trait In addition to the struct itself, we must add an impl of `salsa::Database`: ```rust {{#include ../../../examples/calc/db.rs:db_impl}} ``` ## Implementing the `salsa::ParallelDatabase` trait If you want to permit accessing your database from multiple threads at once, then you also need to implement the `ParallelDatabase` trait: ```rust {{#include ../../../examples/calc/db.rs:par_db_impl}} ``` salsa-0.23.0/book/src/tutorial/debug.md000064400000000000000000000035161046102023000160120ustar 00000000000000# Defining the parser: debug impls and testing As the final part of the parser, we need to write some tests. To do so, we will create a database, set the input source text, run the parser, and check the result. Before we can do that, though, we have to address one question: how do we inspect the value of an interned type like `Expression`? ## The `DebugWithDb` trait Because an interned type like `Expression` just stores an integer, the traditional `Debug` trait is not very useful. To properly print a `Expression`, you need to access the Salsa database to find out what its value is. To solve this, `salsa` provides a `DebugWithDb` trait that acts like the regular `Debug`, but takes a database as argument. For types that implement this trait, you can invoke the `debug` method. This returns a temporary that implements the ordinary `Debug` trait, allowing you to write something like ```rust eprintln!("Expression = {:?}", expr.debug(db)); ``` and get back the output you expect. The `DebugWithDb` trait is automatically derived for all `#[input]`, `#[interned]`, and `#[tracked]` structs. ## Forwarding to the ordinary `Debug` trait For consistency, it is sometimes useful to have a `DebugWithDb` implementation even for types, like `Op`, that are just ordinary enums. You can do that like so: ```rust {{#include ../../../examples/calc/ir.rs:op_debug_impl}} ``` ## Writing the unit test Now that we have our `DebugWithDb` impls in place, we can write a simple unit test harness. The `parse_string` function below creates a database, sets the source text, and then invokes the parser: ```rust {{#include ../../../examples/calc/parser.rs:parse_string}} ``` Combined with the [`expect-test`](https://crates.io/crates/expect-test) crate, we can then write unit tests like this one: ```rust {{#include ../../../examples/calc/parser.rs:parse_print}} ``` salsa-0.23.0/book/src/tutorial/interpreter.md000064400000000000000000000000331046102023000172560ustar 00000000000000# Defining the interpreter salsa-0.23.0/book/src/tutorial/ir.md000064400000000000000000000222021046102023000153270ustar 00000000000000# Defining the IR Before we can define the [parser](./parser.md), we need to define the intermediate representation (IR) that we will use for `calc` programs. In the [basic structure](./structure.md), we defined some "pseudo-Rust" structures like `Statement` and `Expression`; now we are going to define them for real. ## "Salsa structs" In addition to regular Rust types, we will make use of various **Salsa structs**. A Salsa struct is a struct that has been annotated with one of the Salsa annotations: - [`#[salsa::input]`](#input-structs), which designates the "base inputs" to your computation; - [`#[salsa::tracked]`](#tracked-structs), which designate intermediate values created during your computation; - [`#[salsa::interned]`](#interned-structs), which designate small values that are easy to compare for equality. All Salsa structs store the actual values of their fields in the Salsa database. This permits us to track when the values of those fields change to figure out what work will need to be re-executed. When you annotate a struct with one of the above Salsa attributes, Salsa actually generates a bunch of code to link that struct into the database. ## Input structs The first thing we will define is our **input**. Every Salsa program has some basic inputs that drive the rest of the computation. The rest of the program must be some deterministic function of those base inputs, such that when those inputs change, we can try to efficiently recompute the new result of that function. Inputs are defined as Rust structs with a `#[salsa::input]` annotation: ```rust {{#include ../../../examples/calc/ir.rs:input}} ``` In our compiler, we have just one simple input, the `SourceProgram`, which has a `text` field (the string). ### The data lives in the database Although they are declared like other Rust structs, Salsa structs are implemented quite differently. The values of their fields are stored in the Salsa database and the struct themselves just reference it. This means that the struct instances are copy (no matter what fields they contain). Creating instances of the struct and accessing fields is done by invoking methods like `new` as well as getters and setters. In the case of `#[salsa::input]`, the struct contains a `salsa::Id`, which is a non-zero integer. Therefore, the generated `SourceProgram` struct looks something like this: ```rust #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct SourceProgram(salsa::Id); ``` It will also generate a method `new` that lets you create a `SourceProgram` in the database. For an input, a `&db` reference is required, along with the values for each field: ```rust let source = SourceProgram::new(&db, "print 11 + 11".to_string()); ``` You can read the value of the field with `source.text(&db)`, and you can set the value of the field with `source.set_text(&mut db, "print 11 * 2".to_string())`. ### Database revisions Whenever a function takes an `&mut` reference to the database, that means that it can only be invoked from outside the incrementalized part of your program, as explained in [the overview](../overview.md#goal-of-salsa). When you change the value of an input field, that increments a 'revision counter' in the database, indicating that some inputs are different now. When we talk about a "revision" of the database, we are referring to the state of the database in between changes to the input values. ### Representing the parsed program Next we will define a **tracked struct**. Whereas inputs represent the _start_ of a computation, tracked structs represent intermediate values created during your computation. In this case, the parser is going to take in the `SourceProgram` struct that we saw and return a `Program` that represents the fully parsed program: ```rust {{#include ../../../examples/calc/ir.rs:program}} ``` Like with an input, the fields of a tracked struct are also stored in the database. Unlike an input, those fields are immutable (they cannot be "set"), and Salsa compares them across revisions to know when they have changed. In this case, if parsing the input produced the same `Program` result (e.g., because the only change to the input was some trailing whitespace, perhaps), then subsequent parts of the computation won't need to re-execute. (We'll revisit the role of tracked structs in reuse more in future parts of the IR.) Apart from the fields being immutable, the API for working with a tracked struct is quite similar to an input: - You can create a new value by using `new`: e.g., `Program::new(&db, some_statements)` - You use a getter to read the value of a field, just like with an input (e.g., `my_func.statements(db)` to read the `statements` field). - In this case, the field is tagged as `#[returns(ref)]`, which means that the getter will return a `&Vec`, instead of cloning the vector. ### The `'db` lifetime Unlike inputs, tracked structs carry a `'db` lifetime. This lifetime is tied to the `&db` used to create them and ensures that, so long as you are using the struct, the database remains immutable: in other words, you cannot change the values of a `salsa::Input`. The `'db` lifetime also allows tracked structs to be implemented using a pointer (versus the numeric id found in `salsa::input` structs). This doesn't really effect you as a user except that it allows accessing fields from tracked structs— a very common operation—to be optimized. ## Representing functions We will also use a tracked struct to represent each function: The `Function` struct is going to be created by the parser to represent each of the functions defined by the user: ```rust {{#include ../../../examples/calc/ir.rs:functions}} ``` If we had created some `Function` instance `f`, for example, we might find that `the f.body` field changes because the user changed the definition of `f`. This would mean that we have to re-execute those parts of the code that depended on `f.body` (but not those parts of the code that depended on the body of _other_ functions). Apart from the fields being immutable, the API for working with a tracked struct is quite similar to an input: - You can create a new value by using `new`: e.g., `Function::new(&db, some_name, some_args, some_body)` - You use a getter to read the value of a field, just like with an input (e.g., `my_func.args(db)` to read the `args` field). ### id fields To get better reuse across revisions, particularly when things are reordered, you can mark some entity fields with `#[id]`. Normally, you would do this on fields that represent the "name" of an entity. This indicates that, across two revisions R1 and R2, if two functions are created with the same name, they refer to the same entity, so we can compare their other fields for equality to determine what needs to be re-executed. Adding `#[id]` attributes is an optimization and never affects correctness. For more details, see the [algorithm](../reference/algorithm.md) page of the reference. ## Interned structs The final kind of Salsa struct are _interned structs_. As with input and tracked structs, the data for an interned struct is stored in the database. Unlike those structs, if you intern the same data twice, you get back the **same integer**. A classic use of interning is for small strings like function names and variables. It's annoying and inefficient to pass around those names with `String` values which must be cloned; it's also inefficient to have to compare them for equality via string comparison. Therefore, we define two interned structs, `FunctionId` and `VariableId`, each with a single field that stores the string: ```rust {{#include ../../../examples/calc/ir.rs:interned_ids}} ``` When you invoke e.g. `FunctionId::new(&db, "my_string".to_string())`, you will get back a `FunctionId` that is just a newtype'd integer. But if you invoke the same call to `new` again, you get back the same integer: ```rust let f1 = FunctionId::new(&db, "my_string".to_string()); let f2 = FunctionId::new(&db, "my_string".to_string()); assert_eq!(f1, f2); ``` ### Interned values carry a `'db` lifetime Like tracked structs, interned values carry a `'db` lifetime that prevents them from being used across salsa revisions. It also permits them to be implemented using a pointer "under the hood", permitting efficient field access. Interned values are guaranteed to be consistent within a single revision. Across revisions, they may be cleared, reallocated, or reassigned -- but you cannot generally observe this, since the `'db` lifetime prevents you from changing inputs (and hence creating a new revision) while an interned value is in active use. ### Expressions and statements We won't use any special "Salsa structs" for expressions and statements: ```rust {{#include ../../../examples/calc/ir.rs:statements_and_expressions}} ``` Since statements and expressions are not tracked, this implies that we are only attempting to get incremental re-use at the granularity of functions -- whenever anything in a function body changes, we consider the entire function body dirty and re-execute anything that depended on it. It usually makes sense to draw some kind of "reasonably coarse" boundary like this. One downside of the way we have set things up: we inlined the position into each of the structs. salsa-0.23.0/book/src/tutorial/parser.md000064400000000000000000000101411046102023000162100ustar 00000000000000# Defining the parser: memoized functions and inputs The next step in the `calc` compiler is to define the parser. The role of the parser will be to take the `ProgramSource` input, read the string from the `text` field, and create the `Statement`, `Function`, and `Expression` structures that [we defined in the `ir` module](./ir.md). To minimize dependencies, we are going to write a [recursive descent parser][rd]. Another option would be to use a [Rust parsing framework](https://rustrepo.com/catalog/rust-parsing_newest_1). We won't cover the parsing itself in this tutorial -- you can read the code if you want to see how it works. We're going to focus only on the Salsa-related aspects. [rd]: https://en.wikipedia.org/wiki/Recursive_descent_parser ## The `parse_statements` function The starting point for the parser is the `parse_statements` function: ```rust {{#include ../../../examples/calc/parser.rs:parse_statements}} ``` This function is annotated as `#[salsa::tracked]`. That means that, when it is called, Salsa will track what inputs it reads as well as what value it returns. The return value is _memoized_, which means that if you call this function again without changing the inputs, Salsa will just clone the result rather than re-execute it. ### Tracked functions are the unit of reuse Tracked functions are the core part of how Salsa enables incremental reuse. The goal of the framework is to avoid re-executing tracked functions and instead to clone their result. Salsa uses the [red-green algorithm](../reference/algorithm.md) to decide when to re-execute a function. The short version is that a tracked function is re-executed if either (a) it directly reads an input, and that input has changed, or (b) it directly invokes another tracked function and that function's return value has changed. In the case of `parse_statements`, it directly reads `ProgramSource::text`, so if the text changes, then the parser will re-execute. By choosing which functions to mark as `#[tracked]`, you control how much reuse you get. In our case, we're opting to mark the outermost parsing function as tracked, but not the inner ones. This means that if the input changes, we will always re-parse the entire input and re-create the resulting statements and so forth. We'll see later that this _doesn't_ mean we will always re-run the type checker and other parts of the compiler. This trade-off makes sense because (a) parsing is very cheap, so the overhead of tracking and enabling finer-grained reuse doesn't pay off and because (b) since strings are just a big blob-o-bytes without any structure, it's rather hard to identify which parts of the IR need to be reparsed. Some systems do choose to do more granular reparsing, often by doing a "first pass" over the string to give it a bit of structure, e.g. to identify the functions, but deferring the parsing of the body of each function until later. Setting up a scheme like this is relatively easy in Salsa and uses the same principles that we will use later to avoid re-executing the type checker. ### Parameters to a tracked function The **first** parameter to a tracked function is **always** the database, `db: &dyn crate::Db`. The **second** parameter to a tracked function is **always** some kind of Salsa struct. Tracked functions may take other arguments as well, though our examples here do not. Functions that take additional arguments are less efficient and flexible. It's generally better to structure tracked functions as functions of a single Salsa struct if possible. ### The `returns(ref)` annotation You may have noticed that `parse_statements` is tagged with `#[salsa::tracked(returns(ref))]`. Ordinarily, when you call a tracked function, the result you get back is cloned out of the database. The `returns(ref)` attribute means that a reference into the database is returned instead. So, when called, `parse_statements` will return an `&Vec` rather than cloning the `Vec`. This is useful as a performance optimization. (You may recall the `returns(ref)` annotation from the [ir](./ir.md) section of the tutorial, where it was placed on struct fields, with roughly the same meaning.) salsa-0.23.0/book/src/tutorial/structure.md000064400000000000000000000041141046102023000167570ustar 00000000000000# Basic structure Before we do anything with Salsa, let's talk about the basic structure of the calc compiler. Part of Salsa's design is that you are able to write programs that feel 'pretty close' to what a natural Rust program looks like. ## Example program This is our example calc program: ``` x = 5 y = 10 z = x + y * 3 print z ``` ## Parser The calc compiler takes as input a program, represented by a string: ```rust struct ProgramSource { text: String } ``` The first thing it does it to parse that string into a series of statements that look something like the following pseudo-Rust:[^lexer] ```rust enum Statement { /// Defines `fn () = ` Function(Function), /// Defines `print ` Print(Expression), } /// Defines `fn () = ` struct Function { name: FunctionId, args: Vec, body: Expression } ``` where an expression is something like this (pseudo-Rust, because the `Expression` enum is recursive): ```rust enum Expression { Op(Expression, Op, Expression), Number(f64), Variable(VariableId), Call(FunctionId, Vec), } enum Op { Add, Subtract, Multiply, Divide, } ``` Finally, for function/variable names, the `FunctionId` and `VariableId` types will be interned strings: ```rust type FunctionId = /* interned string */; type VariableId = /* interned string */; ``` [^lexer]: Because calc is so simple, we don't have to bother separating out the lexer from the parser. ## Checker The "checker" has the job of ensuring that the user only references variables that have been defined. We're going to write the checker in a "context-less" style, which is a bit less intuitive but allows for more incremental re-use. The idea is to compute, for a given expression, which variables it references. Then there is a function `check` which ensures that those variables are a subset of those that are already defined. ## Interpreter The interpreter will execute the program and print the result. We don't bother with much incremental re-use here, though it's certainly possible. salsa-0.23.0/book/src/tutorial.md000064400000000000000000000015261046102023000147230ustar 00000000000000# Tutorial: calc This tutorial walks through an end-to-end example of using Salsa. It does not assume you know anything about salsa, but reading the [overview](./overview.md) first is probably a good idea to get familiar with the basic concepts. Our goal is define a compiler/interpreter for a simple language called `calc`. The `calc` compiler takes programs like the following and then parses and executes them: ``` fn area_rectangle(w, h) = w * h fn area_circle(r) = 3.14 * r * r print area_rectangle(3, 4) print area_circle(1) print 11 * 2 ``` When executed, this program prints `12`, `3.14`, and `22`. If the program contains errors (e.g., a reference to an undefined function), it prints those out too. And, of course, it will be reactive, so small changes to the input don't require recompiling (or rexecuting, necessarily) the entire thing. salsa-0.23.0/book/src/videos.md000064400000000000000000000014531046102023000143500ustar 00000000000000# Videos There is currently one video available on the newest version of Salsa: - [Salsa Architecture Walkthrough](https://www.youtube.com/watch?v=vrnNvAAoQFk), which covers many aspects of the redesigned architecture. There are also two videos on the older version Salsa, but they are rather outdated: - [How Salsa Works](https://youtu.be/_muY4HjSqVw), which gives a high-level introduction to the key concepts involved and shows how to use Salsa; - [Salsa In More Depth](https://www.youtube.com/watch?v=i_IhACacPRY), which digs into the incremental algorithm and explains -- at a high-level -- how Salsa is implemented. > If you're in China, watch videos on > [How Salsa Works](https://www.bilibili.com/video/BV1Df4y1A7t3/), > [Salsa In More Depth](https://www.bilibili.com/video/BV1AM4y1G7E4/). salsa-0.23.0/examples/calc/compile.rs000064400000000000000000000004441046102023000155310ustar 00000000000000use crate::ir::SourceProgram; use crate::parser::parse_statements; use crate::type_check::type_check_program; #[salsa::tracked] pub fn compile(db: &dyn crate::Db, source_program: SourceProgram) { let program = parse_statements(db, source_program); type_check_program(db, program); } salsa-0.23.0/examples/calc/db.rs000064400000000000000000000033221046102023000144640ustar 00000000000000#[cfg(test)] use std::sync::{Arc, Mutex}; // ANCHOR: db_struct #[salsa::db] #[derive(Clone)] #[cfg_attr(not(test), derive(Default))] pub struct CalcDatabaseImpl { storage: salsa::Storage, // The logs are only used for testing and demonstrating reuse: #[cfg(test)] logs: Arc>>>, } #[cfg(test)] impl Default for CalcDatabaseImpl { fn default() -> Self { let logs = >>>>::default(); Self { storage: salsa::Storage::new(Some(Box::new({ let logs = logs.clone(); move |event| { eprintln!("Event: {event:?}"); // Log interesting events, if logging is enabled if let Some(logs) = &mut *logs.lock().unwrap() { // only log interesting events if let salsa::EventKind::WillExecute { .. } = event.kind { logs.push(format!("Event: {event:?}")); } } } }))), logs, } } } // ANCHOR_END: db_struct impl CalcDatabaseImpl { /// Enable logging of each salsa event. #[cfg(test)] pub fn enable_logging(&self) { let mut logs = self.logs.lock().unwrap(); if logs.is_none() { *logs = Some(vec![]); } } #[cfg(test)] pub fn take_logs(&self) -> Vec { let mut logs = self.logs.lock().unwrap(); if let Some(logs) = &mut *logs { std::mem::take(logs) } else { vec![] } } } // ANCHOR: db_impl #[salsa::db] impl salsa::Database for CalcDatabaseImpl {} // ANCHOR_END: db_impl salsa-0.23.0/examples/calc/ir.rs000064400000000000000000000064031046102023000145140ustar 00000000000000#![allow(clippy::needless_borrow)] use ordered_float::OrderedFloat; // ANCHOR: input #[salsa::input(debug)] pub struct SourceProgram { #[returns(ref)] pub text: String, } // ANCHOR_END: input // ANCHOR: interned_ids #[salsa::interned(debug)] pub struct VariableId<'db> { #[returns(ref)] pub text: String, } #[salsa::interned(debug)] pub struct FunctionId<'db> { #[returns(ref)] pub text: String, } // ANCHOR_END: interned_ids // ANCHOR: program #[salsa::tracked(debug)] pub struct Program<'db> { #[tracked] #[returns(ref)] pub statements: Vec>, } // ANCHOR_END: program // ANCHOR: statements_and_expressions #[derive(Eq, PartialEq, Debug, Hash, salsa::Update)] pub struct Statement<'db> { pub span: Span<'db>, pub data: StatementData<'db>, } impl<'db> Statement<'db> { pub fn new(span: Span<'db>, data: StatementData<'db>) -> Self { Statement { span, data } } } #[derive(Eq, PartialEq, Debug, Hash, salsa::Update)] pub enum StatementData<'db> { /// Defines `fn () = ` Function(Function<'db>), /// Defines `print ` Print(Expression<'db>), } #[derive(Eq, PartialEq, Debug, Hash, salsa::Update)] pub struct Expression<'db> { pub span: Span<'db>, pub data: ExpressionData<'db>, } impl<'db> Expression<'db> { pub fn new(span: Span<'db>, data: ExpressionData<'db>) -> Self { Expression { span, data } } } #[derive(Eq, PartialEq, Debug, Hash, salsa::Update)] pub enum ExpressionData<'db> { Op(Box>, Op, Box>), Number(OrderedFloat), Variable(VariableId<'db>), Call(FunctionId<'db>, Vec>), } #[derive(Eq, PartialEq, Copy, Clone, Hash, Debug)] pub enum Op { Add, Subtract, Multiply, Divide, } // ANCHOR_END: statements_and_expressions // ANCHOR: functions #[salsa::tracked(debug)] pub struct Function<'db> { pub name: FunctionId<'db>, name_span: Span<'db>, #[tracked] #[returns(ref)] pub args: Vec>, #[tracked] #[returns(ref)] pub body: Expression<'db>, } // ANCHOR_END: functions #[salsa::tracked(debug)] pub struct Span<'db> { #[tracked] pub start: usize, #[tracked] pub end: usize, } // ANCHOR: diagnostic #[salsa::accumulator] #[derive(Debug)] #[allow(dead_code)] // Debug impl uses them pub struct Diagnostic { pub start: usize, pub end: usize, pub message: String, } // ANCHOR_END: diagnostic impl Diagnostic { pub fn new(start: usize, end: usize, message: String) -> Self { Diagnostic { start, end, message, } } #[cfg(test)] pub fn render(&self, db: &dyn crate::Db, src: SourceProgram) -> String { use annotate_snippets::*; let line_start = src.text(db)[..self.start].lines().count() + 1; Renderer::plain() .render( Level::Error.title(&self.message).snippet( Snippet::source(src.text(db)) .line_start(line_start) .origin("input") .fold(true) .annotation(Level::Error.span(self.start..self.end).label("here")), ), ) .to_string() } } salsa-0.23.0/examples/calc/main.rs000064400000000000000000000007001046102023000150200ustar 00000000000000use db::CalcDatabaseImpl; use ir::{Diagnostic, SourceProgram}; use salsa::Database as Db; mod compile; mod db; mod ir; mod parser; mod type_check; pub fn main() { let db: CalcDatabaseImpl = Default::default(); let source_program = SourceProgram::new(&db, String::new()); compile::compile(&db, source_program); let diagnostics = compile::compile::accumulated::(&db, source_program); eprintln!("{diagnostics:?}"); } salsa-0.23.0/examples/calc/parser.rs000064400000000000000000000757631046102023000154150ustar 00000000000000use ordered_float::OrderedFloat; use salsa::Accumulator; use crate::ir::{ Diagnostic, Expression, ExpressionData, Function, FunctionId, Op, Program, SourceProgram, Span, Statement, StatementData, VariableId, }; // ANCHOR: parse_statements #[salsa::tracked] pub fn parse_statements(db: &dyn crate::Db, source: SourceProgram) -> Program<'_> { // Get the source text from the database let source_text = source.text(db); // Create the parser let mut parser = Parser { db, source_text, position: 0, }; // Read in statements until we reach the end of the input let mut result = vec![]; loop { // Skip over any whitespace parser.skip_whitespace(); // If there are no more tokens, break if parser.peek().is_none() { break; } // Otherwise, there is more input, so parse a statement. if let Some(statement) = parser.parse_statement() { result.push(statement); } else { // If we failed, report an error at whatever position the parser // got stuck. We could recover here by skipping to the end of the line // or something like that. But we leave that as an exercise for the reader! parser.report_error(); break; } } Program::new(db, result) } // ANCHOR_END: parse_statements /// The parser tracks the current position in the input. /// /// There are parsing methods on the parser named `parse_foo`. Each such method tries to parse a /// `foo` at current position. Once they've recognized a `foo`, they return `Some(foo)` with the /// result, and they update the position. If there is a parse error /// (i.e., they don't recognize a `foo` at the current position), they return `None`, /// and they leave `position` at roughly the spot where parsing failed. You can use this to /// report errors and recover. /// /// There are some simpler method that read a single token (e.g., [`Parser::ch`] /// or [`Parser::word`]). These methods guarantee that, when they return `None`, the position /// is not changed apart from consuming whitespace. This allows them to be used to probe ahead /// and test the next token. struct Parser<'source, 'db> { db: &'db dyn crate::Db, source_text: &'source str, position: usize, } impl<'db> Parser<'_, 'db> { // Invoke `f` and, if it returns `None`, then restore the parsing position. fn probe(&mut self, f: impl FnOnce(&mut Self) -> Option) -> Option { let p = self.position; if let Some(v) = f(self) { Some(v) } else { self.position = p; None } } // ANCHOR: report_error /// Report an error diagnostic at the current position. fn report_error(&self) { let next_position = match self.peek() { Some(ch) => self.position + ch.len_utf8(), None => self.position, }; Diagnostic { start: self.position, end: next_position, message: "unexpected character".to_string(), } .accumulate(self.db); } // ANCHOR_END: report_error fn peek(&self) -> Option { self.source_text[self.position..].chars().next() } // Returns a span ranging from `start_position` until the current position (exclusive) fn span_from(&self, start_position: usize) -> Span<'db> { Span::new(self.db, start_position, self.position) } fn consume(&mut self, ch: char) { debug_assert!(self.peek() == Some(ch)); self.position += ch.len_utf8(); } /// Skips whitespace and returns the new position. fn skip_whitespace(&mut self) -> usize { while let Some(ch) = self.peek() { if ch.is_whitespace() { self.consume(ch); } else { break; } } self.position } // ANCHOR: parse_statement fn parse_statement(&mut self) -> Option> { let start_position = self.skip_whitespace(); let word = self.word()?; if word == "fn" { let func = self.parse_function()?; Some(Statement::new( self.span_from(start_position), StatementData::Function(func), )) } else if word == "print" { let expr = self.parse_expression()?; Some(Statement::new( self.span_from(start_position), StatementData::Print(expr), )) } else { None } } // ANCHOR_END: parse_statement // ANCHOR: parse_function fn parse_function(&mut self) -> Option> { let start_position = self.skip_whitespace(); let name = self.word()?; let name_span = self.span_from(start_position); let name: FunctionId = FunctionId::new(self.db, name); // ^^^^^^^^^^^^^^^ // Create a new interned struct. self.ch('(')?; let args = self.parameters()?; self.ch(')')?; self.ch('=')?; let body = self.parse_expression()?; Some(Function::new(self.db, name, name_span, args, body)) // ^^^^^^^^^^^^^ // Create a new entity struct. } // ANCHOR_END: parse_function fn parse_expression(&mut self) -> Option> { self.parse_op_expression(Self::parse_expression1, Self::low_op) } fn low_op(&mut self) -> Option { if self.ch('+').is_some() { Some(Op::Add) } else if self.ch('-').is_some() { Some(Op::Subtract) } else { None } } /// Parses a high-precedence expression (times, div). /// /// On failure, skips arbitrary tokens. fn parse_expression1(&mut self) -> Option> { self.parse_op_expression(Self::parse_expression2, Self::high_op) } fn high_op(&mut self) -> Option { if self.ch('*').is_some() { Some(Op::Multiply) } else if self.ch('/').is_some() { Some(Op::Divide) } else { None } } fn parse_op_expression( &mut self, mut parse_expr: impl FnMut(&mut Self) -> Option>, mut op: impl FnMut(&mut Self) -> Option, ) -> Option> { let start_position = self.skip_whitespace(); let mut expr1 = parse_expr(self)?; while let Some(op) = op(self) { let expr2 = parse_expr(self)?; expr1 = Expression::new( self.span_from(start_position), ExpressionData::Op(Box::new(expr1), op, Box::new(expr2)), ); } Some(expr1) } /// Parses a "base expression" (no operators). /// /// On failure, skips arbitrary tokens. fn parse_expression2(&mut self) -> Option> { let start_position = self.skip_whitespace(); if let Some(w) = self.word() { if self.ch('(').is_some() { let f = FunctionId::new(self.db, w); let args = self.parse_expressions()?; self.ch(')')?; return Some(Expression::new( self.span_from(start_position), ExpressionData::Call(f, args), )); } let v = VariableId::new(self.db, w); Some(Expression::new( self.span_from(start_position), ExpressionData::Variable(v), )) } else if let Some(n) = self.number() { Some(Expression::new( self.span_from(start_position), ExpressionData::Number(OrderedFloat::from(n)), )) } else if self.ch('(').is_some() { let expr = self.parse_expression()?; self.ch(')')?; Some(expr) } else { None } } fn parse_expressions(&mut self) -> Option>> { let mut r = vec![]; loop { let expr = self.parse_expression()?; r.push(expr); if self.ch(',').is_none() { return Some(r); } } } /// Parses a list of variable identifiers, like `a, b, c`. /// No trailing commas because I am lazy. /// /// On failure, skips arbitrary tokens. fn parameters(&mut self) -> Option>> { let mut r = vec![]; loop { let name = self.word()?; let vid = VariableId::new(self.db, name); r.push(vid); if self.ch(',').is_none() { return Some(r); } } } /// Parses a single character. /// /// Even on failure, only skips whitespace. fn ch(&mut self, c: char) -> Option> { let start_position = self.skip_whitespace(); match self.peek() { Some(p) if c == p => { self.consume(c); Some(self.span_from(start_position)) } _ => None, } } /// Parses an identifier. /// /// Even on failure, only skips whitespace. fn word(&mut self) -> Option { self.skip_whitespace(); // In this loop, if we consume any characters, we always // return `Some`. let mut s = String::new(); let _position = self.position; while let Some(ch) = self.peek() { if ch.is_alphabetic() || ch == '_' || (!s.is_empty() && ch.is_numeric()) { s.push(ch); } else { break; } self.consume(ch); } if s.is_empty() { None } else { Some(s) } } /// Parses a number. /// /// Even on failure, only skips whitespace. fn number(&mut self) -> Option { let _start_position = self.skip_whitespace(); self.probe(|this| { // 👆 We need the call to `probe` here because we could consume // some characters like `3.1.2.3`, invoke `str::parse`, and then // still return `None`. let mut s = String::new(); while let Some(ch) = this.peek() { if ch.is_numeric() || ch == '.' { s.push(ch); } else { break; } this.consume(ch); } if s.is_empty() { None } else { str::parse(&s).ok() } }) } } // ANCHOR: parse_string /// Create a new database with the given source text and parse the result. /// Returns the statements and the diagnostics generated. #[cfg(test)] fn parse_string(source_text: &str) -> String { use salsa::Database; use crate::db::CalcDatabaseImpl; CalcDatabaseImpl::default().attach(|db| { // Create the source program let source_program = SourceProgram::new(db, source_text.to_string()); // Invoke the parser let statements = parse_statements(db, source_program); // Read out any diagnostics let accumulated = parse_statements::accumulated::(db, source_program); // Format the result as a string and return it format!("{:#?}", (statements, accumulated)) }) } // ANCHOR_END: parse_string // ANCHOR: parse_print #[test] fn parse_print() { let actual = parse_string("print 1 + 2"); let expected = expect_test::expect![[r#" ( Program { [salsa id]: Id(800), statements: [ Statement { span: Span { [salsa id]: Id(404), start: 0, end: 11, }, data: Print( Expression { span: Span { [salsa id]: Id(403), start: 6, end: 11, }, data: Op( Expression { span: Span { [salsa id]: Id(400), start: 6, end: 7, }, data: Number( 1.0, ), }, Add, Expression { span: Span { [salsa id]: Id(402), start: 10, end: 11, }, data: Number( 2.0, ), }, ), }, ), }, ], }, [], )"#]]; expected.assert_eq(&actual); } // ANCHOR_END: parse_print #[test] fn parse_example() { let actual = parse_string( " fn area_rectangle(w, h) = w * h fn area_circle(r) = 3.14 * r * r print area_rectangle(3, 4) print area_circle(1) print 11 * 2 ", ); let expected = expect_test::expect![[r#" ( Program { [salsa id]: Id(1400), statements: [ Statement { span: Span { [salsa id]: Id(409), start: 13, end: 57, }, data: Function( Function { [salsa id]: Id(1000), name: FunctionId { text: "area_rectangle", }, name_span: Span { [salsa id]: Id(400), start: 16, end: 30, }, args: [ VariableId { text: "w", }, VariableId { text: "h", }, ], body: Expression { span: Span { [salsa id]: Id(408), start: 39, end: 57, }, data: Op( Expression { span: Span { [salsa id]: Id(405), start: 39, end: 41, }, data: Variable( VariableId { text: "w", }, ), }, Multiply, Expression { span: Span { [salsa id]: Id(407), start: 43, end: 57, }, data: Variable( VariableId { text: "h", }, ), }, ), }, }, ), }, Statement { span: Span { [salsa id]: Id(415), start: 57, end: 102, }, data: Function( Function { [salsa id]: Id(1001), name: FunctionId { text: "area_circle", }, name_span: Span { [salsa id]: Id(40a), start: 60, end: 71, }, args: [ VariableId { text: "r", }, ], body: Expression { span: Span { [salsa id]: Id(414), start: 77, end: 102, }, data: Op( Expression { span: Span { [salsa id]: Id(411), start: 77, end: 86, }, data: Op( Expression { span: Span { [salsa id]: Id(40e), start: 77, end: 81, }, data: Number( 3.14, ), }, Multiply, Expression { span: Span { [salsa id]: Id(410), start: 84, end: 86, }, data: Variable( VariableId { text: "r", }, ), }, ), }, Multiply, Expression { span: Span { [salsa id]: Id(413), start: 88, end: 102, }, data: Variable( VariableId { text: "r", }, ), }, ), }, }, ), }, Statement { span: Span { [salsa id]: Id(41c), start: 102, end: 141, }, data: Print( Expression { span: Span { [salsa id]: Id(41b), start: 108, end: 128, }, data: Call( FunctionId { text: "area_rectangle", }, [ Expression { span: Span { [salsa id]: Id(417), start: 123, end: 124, }, data: Number( 3.0, ), }, Expression { span: Span { [salsa id]: Id(419), start: 126, end: 127, }, data: Number( 4.0, ), }, ], ), }, ), }, Statement { span: Span { [salsa id]: Id(421), start: 141, end: 174, }, data: Print( Expression { span: Span { [salsa id]: Id(420), start: 147, end: 161, }, data: Call( FunctionId { text: "area_circle", }, [ Expression { span: Span { [salsa id]: Id(41e), start: 159, end: 160, }, data: Number( 1.0, ), }, ], ), }, ), }, Statement { span: Span { [salsa id]: Id(426), start: 174, end: 195, }, data: Print( Expression { span: Span { [salsa id]: Id(425), start: 180, end: 186, }, data: Op( Expression { span: Span { [salsa id]: Id(422), start: 180, end: 182, }, data: Number( 11.0, ), }, Multiply, Expression { span: Span { [salsa id]: Id(424), start: 185, end: 186, }, data: Number( 2.0, ), }, ), }, ), }, ], }, [], )"#]]; expected.assert_eq(&actual); } #[test] fn parse_error() { let source_text: &str = "print 1 + + 2"; // 0123456789^ <-- this is the position 10, where the error is reported let actual = parse_string(source_text); let expected = expect_test::expect![[r#" ( Program { [salsa id]: Id(800), statements: [], }, [ Diagnostic { start: 10, end: 11, message: "unexpected character", }, ], )"#]]; expected.assert_eq(&actual); } #[test] fn parse_precedence() { // this parses as `(1 + (2 * 3)) + 4` let source_text: &str = "print 1 + 2 * 3 + 4"; let actual = parse_string(source_text); let expected = expect_test::expect![[r#" ( Program { [salsa id]: Id(800), statements: [ Statement { span: Span { [salsa id]: Id(40a), start: 0, end: 19, }, data: Print( Expression { span: Span { [salsa id]: Id(409), start: 6, end: 19, }, data: Op( Expression { span: Span { [salsa id]: Id(406), start: 6, end: 16, }, data: Op( Expression { span: Span { [salsa id]: Id(400), start: 6, end: 7, }, data: Number( 1.0, ), }, Add, Expression { span: Span { [salsa id]: Id(405), start: 10, end: 15, }, data: Op( Expression { span: Span { [salsa id]: Id(402), start: 10, end: 11, }, data: Number( 2.0, ), }, Multiply, Expression { span: Span { [salsa id]: Id(404), start: 14, end: 15, }, data: Number( 3.0, ), }, ), }, ), }, Add, Expression { span: Span { [salsa id]: Id(408), start: 18, end: 19, }, data: Number( 4.0, ), }, ), }, ), }, ], }, [], )"#]]; expected.assert_eq(&actual); } salsa-0.23.0/examples/calc/type_check.rs000064400000000000000000000165251046102023000162260ustar 00000000000000#[cfg(test)] use expect_test::expect; use salsa::Accumulator; #[cfg(test)] use test_log::test; use crate::ir::{ Diagnostic, Expression, Function, FunctionId, Program, Span, StatementData, VariableId, }; // ANCHOR: parse_statements #[salsa::tracked] pub fn type_check_program<'db>(db: &'db dyn crate::Db, program: Program<'db>) { for statement in program.statements(db) { match &statement.data { StatementData::Function(f) => type_check_function(db, *f, program), StatementData::Print(e) => CheckExpression::new(db, program, &[]).check(e), } } } #[salsa::tracked] pub fn type_check_function<'db>( db: &'db dyn crate::Db, function: Function<'db>, program: Program<'db>, ) { CheckExpression::new(db, program, function.args(db)).check(function.body(db)) } #[salsa::tracked] pub fn find_function<'db>( db: &'db dyn crate::Db, program: Program<'db>, name: FunctionId<'db>, ) -> Option> { program .statements(db) .iter() .flat_map(|s| match &s.data { StatementData::Function(f) if f.name(db) == name => Some(*f), _ => None, }) .next() } struct CheckExpression<'input, 'db> { db: &'db dyn crate::Db, program: Program<'db>, names_in_scope: &'input [VariableId<'db>], } impl<'input, 'db> CheckExpression<'input, 'db> { pub fn new( db: &'db dyn crate::Db, program: Program<'db>, names_in_scope: &'input [VariableId<'db>], ) -> Self { CheckExpression { db, program, names_in_scope, } } } impl<'db> CheckExpression<'_, 'db> { fn check(&self, expression: &Expression<'db>) { match &expression.data { crate::ir::ExpressionData::Op(left, _, right) => { self.check(left); self.check(right); } crate::ir::ExpressionData::Number(_) => {} crate::ir::ExpressionData::Variable(v) => { if !self.names_in_scope.contains(v) { self.report_error( expression.span, format!("the variable `{}` is not declared", v.text(self.db)), ); } } crate::ir::ExpressionData::Call(f, args) => { if self.find_function(*f).is_none() { self.report_error( expression.span, format!("the function `{}` is not declared", f.text(self.db)), ); } for arg in args { self.check(arg); } } } } fn find_function(&self, f: FunctionId<'db>) -> Option> { find_function(self.db, self.program, f) } fn report_error(&self, span: Span, message: String) { Diagnostic::new(span.start(self.db), span.end(self.db), message).accumulate(self.db); } } /// Create a new database with the given source text and parse the result. /// Returns the statements and the diagnostics generated. #[cfg(test)] fn check_string( source_text: &str, expected_diagnostics: expect_test::Expect, edits: &[(&str, expect_test::Expect)], ) { use salsa::{Database, Setter}; use crate::db::CalcDatabaseImpl; use crate::ir::SourceProgram; use crate::parser::parse_statements; // Create the database let mut db = CalcDatabaseImpl::default(); db.enable_logging(); // Create the source program let source_program = SourceProgram::new(&db, source_text.to_string()); // Invoke the parser let program = parse_statements(&db, source_program); // Read out any diagnostics db.attach(|db| { let rendered_diagnostics: String = type_check_program::accumulated::(db, program) .into_iter() .map(|d| d.render(db, source_program)) .collect::>() .join("\n"); expected_diagnostics.assert_eq(&rendered_diagnostics); }); // Apply edits and check diagnostics/logs after each one for (new_source_text, expected_diagnostics) in edits { source_program .set_text(&mut db) .to(new_source_text.to_string()); db.attach(|db| { let program = parse_statements(db, source_program); expected_diagnostics .assert_debug_eq(&type_check_program::accumulated::(db, program)); }); } } #[test] fn check_print() { check_string("print 1 + 2", expect![""], &[]); } #[test] fn check_bad_variable_in_program() { check_string( "print a + b", expect![[r#" error: the variable `a` is not declared --> input:2:7 | 2 | print a + b | ^^ here | error: the variable `b` is not declared --> input:2:11 | 2 | print a + b | ^ here |"#]], &[], ); } #[test] fn check_bad_function_in_program() { check_string( "print a(22)", expect![[r#" error: the function `a` is not declared --> input:2:7 | 2 | print a(22) | ^^^^^ here |"#]], &[], ); } #[test] fn check_bad_variable_in_function() { check_string( " fn add_one(a) = a + b print add_one(22) ", expect![[r#" error: the variable `b` is not declared --> input:4:33 | 4 | fn add_one(a) = a + b | _________________________________^ 5 | | print add_one(22) | |____________^ here |"#]], &[], ); } #[test] fn check_bad_function_in_function() { check_string( " fn add_one(a) = add_two(a) + b print add_one(22) ", expect![[r#" error: the function `add_two` is not declared --> input:4:29 | 4 | fn add_one(a) = add_two(a) + b | ^^^^^^^^^^ here | error: the variable `b` is not declared --> input:4:42 | 4 | fn add_one(a) = add_two(a) + b | __________________________________________^ 5 | | print add_one(22) | |____________^ here |"#]], &[], ); } #[test] fn fix_bad_variable_in_function() { check_string( " fn double(a) = a * b fn quadruple(a) = double(double(a)) print quadruple(2) ", expect![[r#" error: the variable `b` is not declared --> input:4:32 | 4 | fn double(a) = a * b | ________________________________^ 5 | | fn quadruple(a) = double(double(a)) | |____________^ here |"#]], &[( " fn double(a) = a * 2 fn quadruple(a) = double(double(a)) print quadruple(2) ", expect![[r#" [] "#]], )], ); } salsa-0.23.0/examples/lazy-input/inputs/a000064400000000000000000000000071046102023000164250ustar 000000000000002 ./aa salsa-0.23.0/examples/lazy-input/inputs/aa000064400000000000000000000000021046102023000165610ustar 000000000000008 salsa-0.23.0/examples/lazy-input/inputs/b000064400000000000000000000000021046102023000164210ustar 000000000000004 salsa-0.23.0/examples/lazy-input/inputs/start000064400000000000000000000000121046102023000173360ustar 000000000000001 ./a ./b salsa-0.23.0/examples/lazy-input/main.rs000064400000000000000000000161521046102023000162420ustar 00000000000000#![allow(unreachable_patterns)] // FIXME(rust-lang/rust#129031): regression in nightly use std::path::PathBuf; use std::sync::{Arc, Mutex}; use std::time::Duration; use crossbeam_channel::{unbounded, Sender}; use dashmap::mapref::entry::Entry; use dashmap::DashMap; use eyre::{eyre, Context, Report, Result}; use notify_debouncer_mini::notify::{RecommendedWatcher, RecursiveMode}; use notify_debouncer_mini::{new_debouncer, DebounceEventResult, Debouncer}; use salsa::{Accumulator, Setter, Storage}; // ANCHOR: main fn main() -> Result<()> { // Create the channel to receive file change events. let (tx, rx) = unbounded(); let mut db = LazyInputDatabase::new(tx); let initial_file_path = std::env::args_os() .nth(1) .ok_or_else(|| eyre!("Usage: ./lazy-input "))?; // Create the initial input using the input method so that changes to it // will be watched like the other files. let initial = db.input(initial_file_path.into())?; loop { // Compile the code starting at the provided input, this will read other // needed files using the on-demand mechanism. let sum = compile(&db, initial); let diagnostics = compile::accumulated::(&db, initial); if diagnostics.is_empty() { println!("Sum is: {sum}"); } else { for diagnostic in diagnostics { println!("{}", diagnostic.0); } } for log in db.logs.lock().unwrap().drain(..) { eprintln!("{log}"); } // Wait for file change events, the output can't change unless the // inputs change. for event in rx.recv()?.unwrap() { let path = event.path.canonicalize().wrap_err_with(|| { format!("Failed to canonicalize path {}", event.path.display()) })?; let file = match db.files.get(&path) { Some(file) => *file, None => continue, }; // `path` has changed, so read it and update the contents to match. // This creates a new revision and causes the incremental algorithm // to kick in, just like any other update to a salsa input. let contents = std::fs::read_to_string(path) .wrap_err_with(|| format!("Failed to read file {}", event.path.display()))?; file.set_contents(&mut db).to(contents); } } } // ANCHOR_END: main // ANCHOR: db #[salsa::input] struct File { path: PathBuf, #[returns(ref)] contents: String, } #[salsa::db] trait Db: salsa::Database { fn input(&self, path: PathBuf) -> Result; } #[salsa::db] #[derive(Clone)] struct LazyInputDatabase { storage: Storage, logs: Arc>>, files: DashMap, file_watcher: Arc>>, } impl LazyInputDatabase { fn new(tx: Sender) -> Self { let logs: Arc>> = Default::default(); Self { storage: Storage::new(Some(Box::new({ let logs = logs.clone(); move |event| { // don't log boring events if let salsa::EventKind::WillExecute { .. } = event.kind { logs.lock().unwrap().push(format!("{event:?}")); } } }))), logs, files: DashMap::new(), file_watcher: Arc::new(Mutex::new( new_debouncer(Duration::from_secs(1), tx).unwrap(), )), } } } #[salsa::db] impl salsa::Database for LazyInputDatabase {} #[salsa::db] impl Db for LazyInputDatabase { fn input(&self, path: PathBuf) -> Result { let path = path .canonicalize() .wrap_err_with(|| format!("Failed to read {}", path.display()))?; Ok(match self.files.entry(path.clone()) { // If the file already exists in our cache then just return it. Entry::Occupied(entry) => *entry.get(), // If we haven't read this file yet set up the watch, read the // contents, store it in the cache, and return it. Entry::Vacant(entry) => { // Set up the watch before reading the contents to try to avoid // race conditions. let watcher = &mut *self.file_watcher.lock().unwrap(); watcher .watcher() .watch(&path, RecursiveMode::NonRecursive) .unwrap(); let contents = std::fs::read_to_string(&path) .wrap_err_with(|| format!("Failed to read {}", path.display()))?; *entry.insert(File::new(self, path, contents)) } }) } } // ANCHOR_END: db #[salsa::accumulator] struct Diagnostic(String); impl Diagnostic { fn push_error(db: &dyn Db, file: File, error: Report) { Diagnostic(format!( "Error in file {}: {:?}\n", file.path(db) .file_name() .unwrap_or_else(|| "".as_ref()) .to_string_lossy(), error, )) .accumulate(db); } } #[salsa::tracked] struct ParsedFile<'db> { value: u32, #[returns(ref)] links: Vec>, } #[salsa::tracked] fn compile(db: &dyn Db, input: File) -> u32 { let parsed = parse(db, input); sum(db, parsed) } #[salsa::tracked] fn parse(db: &dyn Db, input: File) -> ParsedFile<'_> { let mut lines = input.contents(db).lines(); let value = match lines.next().map(|line| (line.parse::(), line)) { Some((Ok(num), _)) => num, Some((Err(e), line)) => { Diagnostic::push_error( db, input, Report::new(e).wrap_err(format!( "First line ({line}) could not be parsed as an integer" )), ); 0 } None => { Diagnostic::push_error(db, input, eyre!("File must contain an integer")); 0 } }; let links = lines .filter_map(|path| { let relative_path = match path.parse::() { Ok(path) => path, Err(err) => { Diagnostic::push_error( db, input, Report::new(err).wrap_err(format!("Failed to parse path: {path}")), ); return None; } }; let link_path = input.path(db).parent().unwrap().join(relative_path); match db.input(link_path) { Ok(file) => Some(parse(db, file)), Err(err) => { Diagnostic::push_error(db, input, err); None } } }) .collect(); ParsedFile::new(db, value, links) } #[salsa::tracked] fn sum<'db>(db: &'db dyn Db, input: ParsedFile<'db>) -> u32 { input.value(db) + input .links(db) .iter() .map(|&file| sum(db, file)) .sum::() } salsa-0.23.0/justfile000064400000000000000000000003041046102023000125560ustar 00000000000000test: cargo test --workspace --all-targets --no-fail-fast miri: cargo +nightly miri test --no-fail-fast shuttle: cargo nextest run --features shuttle --test parallel all: test miri salsa-0.23.0/release-plz.toml000064400000000000000000000002561046102023000141340ustar 00000000000000[[package]] name = "salsa" version_group = "salsa" [[package]] name = "salsa-macros" version_group = "salsa" [[package]] name = "salsa-macro-rules" version_group = "salsa" salsa-0.23.0/src/accumulator/accumulated.rs000064400000000000000000000021571046102023000167610ustar 00000000000000use std::any::Any; use std::fmt::Debug; use crate::accumulator::Accumulator; #[derive(Clone, Debug)] pub(crate) struct Accumulated { values: Vec, } pub(crate) trait AnyAccumulated: Any + Send + Sync { fn as_dyn_any(&self) -> &dyn Any; fn as_dyn_any_mut(&mut self) -> &mut dyn Any; } impl Accumulated { pub fn push(&mut self, value: A) { self.values.push(value); } pub fn extend_with_accumulated<'slf>(&'slf self, values: &mut Vec<&'slf A>) { values.extend(&self.values); } } impl Default for Accumulated { fn default() -> Self { Self { values: Default::default(), } } } impl AnyAccumulated for Accumulated where A: Accumulator, { fn as_dyn_any(&self) -> &dyn Any { self } fn as_dyn_any_mut(&mut self) -> &mut dyn Any { self } } impl dyn AnyAccumulated { pub fn accumulate(&mut self, value: A) { self.as_dyn_any_mut() .downcast_mut::>() .unwrap() .push(value); } } salsa-0.23.0/src/accumulator/accumulated_map.rs000064400000000000000000000100401046102023000176040ustar 00000000000000use std::ops; use rustc_hash::FxBuildHasher; use crate::accumulator::accumulated::Accumulated; use crate::accumulator::{Accumulator, AnyAccumulated}; use crate::sync::atomic::{AtomicBool, Ordering}; use crate::IngredientIndex; #[derive(Default)] pub struct AccumulatedMap { map: hashbrown::HashMap, FxBuildHasher>, } impl std::fmt::Debug for AccumulatedMap { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("AccumulatedMap") .field("map", &self.map.keys()) .finish() } } impl AccumulatedMap { pub fn accumulate(&mut self, index: IngredientIndex, value: A) { self.map .entry(index) .or_insert_with(|| >>::default()) .accumulate(value); } pub fn extend_with_accumulated<'slf, A: Accumulator>( &'slf self, index: IngredientIndex, output: &mut Vec<&'slf A>, ) { let Some(a) = self.map.get(&index) else { return; }; a.as_dyn_any() .downcast_ref::>() .unwrap() .extend_with_accumulated(output); } pub fn is_empty(&self) -> bool { self.map.is_empty() } pub fn clear(&mut self) { self.map.clear() } pub fn allocation_size(&self) -> usize { self.map.allocation_size() } } /// Tracks whether any input read during a query's execution has any accumulated values. /// /// Knowning whether any input has accumulated values makes aggregating the accumulated values /// cheaper because we can skip over entire subtrees. #[derive(Copy, Clone, Debug, Default, PartialEq, Eq)] pub enum InputAccumulatedValues { /// The query nor any of its inputs have any accumulated values. #[default] Empty, /// The query or any of its inputs have at least one accumulated value. Any, } impl InputAccumulatedValues { pub const fn is_any(self) -> bool { matches!(self, Self::Any) } pub const fn is_empty(self) -> bool { matches!(self, Self::Empty) } pub fn or_else(self, other: impl FnOnce() -> Self) -> Self { if self.is_any() { Self::Any } else { other() } } } impl ops::BitOr for InputAccumulatedValues { type Output = Self; fn bitor(self, rhs: Self) -> Self::Output { match (self, rhs) { (Self::Any, _) | (_, Self::Any) => Self::Any, (Self::Empty, Self::Empty) => Self::Empty, } } } impl ops::BitOrAssign for InputAccumulatedValues { fn bitor_assign(&mut self, rhs: Self) { *self = *self | rhs; } } #[derive(Debug, Default)] pub struct AtomicInputAccumulatedValues(AtomicBool); impl Clone for AtomicInputAccumulatedValues { fn clone(&self) -> Self { Self(AtomicBool::new(self.0.load(Ordering::Relaxed))) } } impl AtomicInputAccumulatedValues { pub(crate) fn new(accumulated_inputs: InputAccumulatedValues) -> Self { Self(AtomicBool::new(accumulated_inputs.is_any())) } pub(crate) fn store(&self, accumulated: InputAccumulatedValues) { self.0.store(accumulated.is_any(), Ordering::Release); } pub(crate) fn load(&self) -> InputAccumulatedValues { if self.0.load(Ordering::Acquire) { InputAccumulatedValues::Any } else { InputAccumulatedValues::Empty } } } #[cfg(test)] mod tests { use super::*; #[test] fn atomic_input_accumulated_values() { let val = AtomicInputAccumulatedValues::new(InputAccumulatedValues::Empty); assert_eq!(val.load(), InputAccumulatedValues::Empty); val.store(InputAccumulatedValues::Any); assert_eq!(val.load(), InputAccumulatedValues::Any); let val = AtomicInputAccumulatedValues::new(InputAccumulatedValues::Any); assert_eq!(val.load(), InputAccumulatedValues::Any); val.store(InputAccumulatedValues::Empty); assert_eq!(val.load(), InputAccumulatedValues::Empty); } } salsa-0.23.0/src/accumulator.rs000064400000000000000000000067021046102023000144720ustar 00000000000000//! Basic test of accumulator functionality. use std::any::{Any, TypeId}; use std::fmt; use std::marker::PhantomData; use std::panic::UnwindSafe; use accumulated::{Accumulated, AnyAccumulated}; use crate::cycle::CycleHeads; use crate::function::VerifyResult; use crate::ingredient::{Ingredient, Jar}; use crate::plumbing::{IngredientIndices, ZalsaLocal}; use crate::sync::Arc; use crate::table::memo::MemoTableTypes; use crate::zalsa::{IngredientIndex, Zalsa}; use crate::{Database, Id, Revision}; mod accumulated; pub(crate) mod accumulated_map; /// Trait implemented on the struct that user annotated with `#[salsa::accumulator]`. /// The `Self` type is therefore the types to be accumulated. pub trait Accumulator: Send + Sync + Any + Sized + UnwindSafe { const DEBUG_NAME: &'static str; /// Accumulate an instance of this in the database for later retrieval. fn accumulate(self, db: &Db) where Db: ?Sized + Database; } pub struct JarImpl { phantom: PhantomData, } impl Default for JarImpl { fn default() -> Self { Self { phantom: Default::default(), } } } impl Jar for JarImpl { fn create_ingredients( _zalsa: &Zalsa, first_index: IngredientIndex, _dependencies: IngredientIndices, ) -> Vec> { vec![Box::new(>::new(first_index))] } fn id_struct_type_id() -> TypeId { TypeId::of::() } } pub struct IngredientImpl { index: IngredientIndex, phantom: PhantomData>, } impl IngredientImpl { /// Find the accumulator ingredient for `A` in the database, if any. pub fn from_zalsa(zalsa: &Zalsa) -> Option<&Self> { let index = zalsa.lookup_jar_by_type::>().get_or_create(); let ingredient = zalsa.lookup_ingredient(index).assert_type::(); Some(ingredient) } pub fn new(index: IngredientIndex) -> Self { Self { index, phantom: PhantomData, } } pub fn push(&self, zalsa_local: &ZalsaLocal, value: A) { if let Err(()) = zalsa_local.accumulate(self.index, value) { panic!("cannot accumulate values outside of an active tracked function"); } } pub fn index(&self) -> IngredientIndex { self.index } } impl Ingredient for IngredientImpl { fn location(&self) -> &'static crate::ingredient::Location { &const { crate::ingredient::Location { file: file!(), line: line!(), } } } fn ingredient_index(&self) -> IngredientIndex { self.index } unsafe fn maybe_changed_after( &self, _db: &dyn Database, _input: Id, _revision: Revision, _cycle_heads: &mut CycleHeads, ) -> VerifyResult { panic!("nothing should ever depend on an accumulator directly") } fn debug_name(&self) -> &'static str { A::DEBUG_NAME } fn memo_table_types(&self) -> Arc { unreachable!("accumulator does not allocate pages") } } impl std::fmt::Debug for IngredientImpl where A: Accumulator, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct(std::any::type_name::()) .field("index", &self.index) .finish() } } salsa-0.23.0/src/active_query.rs000064400000000000000000000370251046102023000146550ustar 00000000000000use std::{fmt, mem, ops}; use crate::accumulator::accumulated_map::{ AccumulatedMap, AtomicInputAccumulatedValues, InputAccumulatedValues, }; use crate::cycle::{CycleHeads, IterationCount}; use crate::durability::Durability; use crate::hash::FxIndexSet; use crate::key::DatabaseKeyIndex; use crate::runtime::Stamp; use crate::sync::atomic::AtomicBool; use crate::tracked_struct::{Disambiguator, DisambiguatorMap, IdentityHash, IdentityMap}; use crate::zalsa_local::{QueryEdge, QueryOrigin, QueryRevisions, QueryRevisionsExtra}; use crate::{Accumulator, IngredientIndex, Revision}; #[derive(Debug)] pub(crate) struct ActiveQuery { /// What query is executing pub(crate) database_key_index: DatabaseKeyIndex, /// Minimum durability of inputs observed so far. durability: Durability, /// Maximum revision of all inputs observed. If we observe an /// untracked read, this will be set to the most recent revision. changed_at: Revision, /// Inputs: Set of subqueries that were accessed thus far. /// Outputs: Tracks values written by this query. Could be... /// /// * tracked structs created /// * invocations of `specify` /// * accumulators pushed to input_outputs: FxIndexSet, /// True if there was an untracked read. untracked_read: bool, /// When new tracked structs are created, their data is hashed, and the resulting /// hash is added to this map. If it is not present, then the disambiguator is 0. /// Otherwise it is 1 more than the current value (which is incremented). /// /// This table starts empty as the query begins and is gradually populated. /// Note that if a query executes in 2 different revisions but creates the same /// set of tracked structs, they will get the same disambiguator values. disambiguator_map: DisambiguatorMap, /// Map from tracked struct keys (which include the hash + disambiguator) to their /// final id. tracked_struct_ids: IdentityMap, /// Stores the values accumulated to the given ingredient. /// The type of accumulated value is erased but known to the ingredient. accumulated: AccumulatedMap, /// [`InputAccumulatedValues::Empty`] if any input read during the query's execution /// has any accumulated values. accumulated_inputs: InputAccumulatedValues, /// Provisional cycle results that this query depends on. cycle_heads: CycleHeads, /// If this query is a cycle head, iteration count of that cycle. iteration_count: IterationCount, } impl ActiveQuery { pub(super) fn seed_iteration( &mut self, durability: Durability, changed_at: Revision, edges: &[QueryEdge], untracked_read: bool, ) { assert!(self.input_outputs.is_empty()); self.input_outputs = edges.iter().cloned().collect(); self.durability = self.durability.min(durability); self.changed_at = self.changed_at.max(changed_at); self.untracked_read |= untracked_read; } pub(super) fn add_read( &mut self, input: DatabaseKeyIndex, durability: Durability, changed_at: Revision, has_accumulated: bool, accumulated_inputs: &AtomicInputAccumulatedValues, cycle_heads: &CycleHeads, ) { self.durability = self.durability.min(durability); self.changed_at = self.changed_at.max(changed_at); self.input_outputs.insert(QueryEdge::input(input)); self.accumulated_inputs = self.accumulated_inputs.or_else(|| match has_accumulated { true => InputAccumulatedValues::Any, false => accumulated_inputs.load(), }); self.cycle_heads.extend(cycle_heads); } pub(super) fn add_read_simple( &mut self, input: DatabaseKeyIndex, durability: Durability, revision: Revision, ) { self.durability = self.durability.min(durability); self.changed_at = self.changed_at.max(revision); self.input_outputs.insert(QueryEdge::input(input)); } pub(super) fn add_untracked_read(&mut self, changed_at: Revision) { self.untracked_read = true; self.durability = Durability::MIN; self.changed_at = changed_at; } pub(super) fn add_synthetic_read(&mut self, durability: Durability, revision: Revision) { self.untracked_read = true; self.durability = self.durability.min(durability); self.changed_at = self.changed_at.max(revision); } pub(super) fn accumulate(&mut self, index: IngredientIndex, value: impl Accumulator) { self.accumulated.accumulate(index, value); } /// Adds a key to our list of outputs. pub(super) fn add_output(&mut self, key: DatabaseKeyIndex) { self.input_outputs.insert(QueryEdge::output(key)); } /// True if the given key was output by this query. pub(super) fn is_output(&self, key: DatabaseKeyIndex) -> bool { self.input_outputs.contains(&QueryEdge::output(key)) } pub(super) fn disambiguate(&mut self, key: IdentityHash) -> Disambiguator { self.disambiguator_map.disambiguate(key) } pub(super) fn stamp(&self) -> Stamp { Stamp { durability: self.durability, changed_at: self.changed_at, } } pub(super) fn iteration_count(&self) -> IterationCount { self.iteration_count } pub(crate) fn tracked_struct_ids(&self) -> &IdentityMap { &self.tracked_struct_ids } pub(crate) fn tracked_struct_ids_mut(&mut self) -> &mut IdentityMap { &mut self.tracked_struct_ids } } impl ActiveQuery { fn new(database_key_index: DatabaseKeyIndex, iteration_count: IterationCount) -> Self { ActiveQuery { database_key_index, durability: Durability::MAX, changed_at: Revision::start(), input_outputs: FxIndexSet::default(), untracked_read: false, disambiguator_map: Default::default(), tracked_struct_ids: Default::default(), accumulated: Default::default(), accumulated_inputs: Default::default(), cycle_heads: Default::default(), iteration_count, } } fn top_into_revisions(&mut self) -> QueryRevisions { let &mut Self { database_key_index: _, durability, changed_at, ref mut input_outputs, untracked_read, ref mut disambiguator_map, ref mut tracked_struct_ids, ref mut accumulated, accumulated_inputs, ref mut cycle_heads, iteration_count, } = self; let origin = if untracked_read { QueryOrigin::derived_untracked(input_outputs.drain(..)) } else { QueryOrigin::derived(input_outputs.drain(..)) }; disambiguator_map.clear(); let verified_final = cycle_heads.is_empty(); let extra = QueryRevisionsExtra::new( mem::take(accumulated), mem::take(tracked_struct_ids), mem::take(cycle_heads), iteration_count, ); let accumulated_inputs = AtomicInputAccumulatedValues::new(accumulated_inputs); QueryRevisions { changed_at, durability, origin, accumulated_inputs, verified_final: AtomicBool::new(verified_final), extra, } } fn clear(&mut self) { let Self { database_key_index: _, durability: _, changed_at: _, input_outputs, untracked_read: _, disambiguator_map, tracked_struct_ids, accumulated, accumulated_inputs: _, cycle_heads, iteration_count, } = self; input_outputs.clear(); disambiguator_map.clear(); tracked_struct_ids.clear(); accumulated.clear(); *cycle_heads = Default::default(); *iteration_count = IterationCount::initial(); } fn reset_for( &mut self, new_database_key_index: DatabaseKeyIndex, new_iteration_count: IterationCount, ) { let Self { database_key_index, durability, changed_at, input_outputs, untracked_read, disambiguator_map, tracked_struct_ids, accumulated, accumulated_inputs, cycle_heads, iteration_count, } = self; *database_key_index = new_database_key_index; *durability = Durability::MAX; *changed_at = Revision::start(); *untracked_read = false; *accumulated_inputs = Default::default(); *iteration_count = new_iteration_count; debug_assert!( input_outputs.is_empty(), "`ActiveQuery::clear` or `ActiveQuery::into_revisions` should've been called" ); debug_assert!( disambiguator_map.is_empty(), "`ActiveQuery::clear` or `ActiveQuery::into_revisions` should've been called" ); debug_assert!( tracked_struct_ids.is_empty(), "`ActiveQuery::clear` or `ActiveQuery::into_revisions` should've been called" ); debug_assert!( cycle_heads.is_empty(), "`ActiveQuery::clear` or `ActiveQuery::into_revisions` should've been called" ); debug_assert!( accumulated.is_empty(), "`ActiveQuery::clear` or `ActiveQuery::into_revisions` should've been called" ); } } #[derive(Default)] pub(crate) struct QueryStack { stack: Vec, len: usize, } impl std::fmt::Debug for QueryStack { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { if f.alternate() { f.debug_list() .entries(self.stack.iter().map(|q| q.database_key_index)) .finish() } else { f.debug_struct("QueryStack") .field("stack", &self.stack) .field("len", &self.len) .finish() } } } impl ops::Deref for QueryStack { type Target = [ActiveQuery]; #[inline(always)] fn deref(&self) -> &Self::Target { &self.stack[..self.len] } } impl ops::DerefMut for QueryStack { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.stack[..self.len] } } impl QueryStack { pub(crate) fn push_new_query( &mut self, database_key_index: DatabaseKeyIndex, iteration_count: IterationCount, ) { if self.len < self.stack.len() { self.stack[self.len].reset_for(database_key_index, iteration_count); } else { self.stack .push(ActiveQuery::new(database_key_index, iteration_count)); } self.len += 1; } #[cfg(debug_assertions)] pub(crate) fn len(&self) -> usize { self.len } pub(crate) fn pop_into_revisions( &mut self, key: DatabaseKeyIndex, #[cfg(debug_assertions)] push_len: usize, ) -> QueryRevisions { #[cfg(debug_assertions)] assert_eq!(push_len, self.len(), "unbalanced push/pop"); debug_assert_ne!(self.len, 0, "too many pops"); self.len -= 1; debug_assert_eq!( self.stack[self.len].database_key_index, key, "unbalanced push/pop" ); self.stack[self.len].top_into_revisions() } pub(crate) fn pop(&mut self, key: DatabaseKeyIndex, #[cfg(debug_assertions)] push_len: usize) { #[cfg(debug_assertions)] assert_eq!(push_len, self.len(), "unbalanced push/pop"); debug_assert_ne!(self.len, 0, "too many pops"); self.len -= 1; debug_assert_eq!( self.stack[self.len].database_key_index, key, "unbalanced push/pop" ); self.stack[self.len].clear() } } struct CapturedQuery { database_key_index: DatabaseKeyIndex, durability: Durability, changed_at: Revision, cycle_heads: CycleHeads, iteration_count: IterationCount, } impl fmt::Debug for CapturedQuery { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut debug_struct = f.debug_struct("CapturedQuery"); debug_struct .field("database_key_index", &self.database_key_index) .field("durability", &self.durability) .field("changed_at", &self.changed_at); if !self.cycle_heads.is_empty() { debug_struct .field("cycle_heads", &self.cycle_heads) .field("iteration_count", &self.iteration_count); } debug_struct.finish() } } pub struct Backtrace(Box<[CapturedQuery]>); impl Backtrace { pub fn capture() -> Option { crate::with_attached_database(|db| { db.zalsa_local().try_with_query_stack(|stack| { Backtrace( stack .iter() .rev() .map(|query| CapturedQuery { database_key_index: query.database_key_index, durability: query.durability, changed_at: query.changed_at, cycle_heads: query.cycle_heads.clone(), iteration_count: query.iteration_count, }) .collect(), ) }) })? } } impl fmt::Debug for Backtrace { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { write!(fmt, "Backtrace ")?; let mut dbg = fmt.debug_list(); for frame in &self.0 { dbg.entry(&frame); } dbg.finish() } } impl fmt::Display for Backtrace { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { writeln!(fmt, "query stacktrace:")?; let full = fmt.alternate(); let indent = " "; for ( idx, &CapturedQuery { database_key_index, durability, changed_at, ref cycle_heads, iteration_count, }, ) in self.0.iter().enumerate() { write!(fmt, "{idx:>4}: {database_key_index:?}")?; if full { write!(fmt, " -> ({changed_at:?}, {durability:#?}")?; if !cycle_heads.is_empty() || !iteration_count.is_initial() { write!(fmt, ", iteration = {iteration_count:?}")?; } write!(fmt, ")")?; } writeln!(fmt)?; crate::attach::with_attached_database(|db| { let ingredient = db .zalsa() .lookup_ingredient(database_key_index.ingredient_index()); let loc = ingredient.location(); writeln!(fmt, "{indent}at {}:{}", loc.file, loc.line)?; if !cycle_heads.is_empty() { write!(fmt, "{indent}cycle heads: ")?; for (idx, head) in cycle_heads.iter().enumerate() { if idx != 0 { write!(fmt, ", ")?; } write!( fmt, "{:?} -> {:?}", head.database_key_index, head.iteration_count )?; } writeln!(fmt)?; } Ok(()) }) .transpose()?; } Ok(()) } } salsa-0.23.0/src/attach.rs000064400000000000000000000067521046102023000134240ustar 00000000000000use std::cell::Cell; use std::ptr::NonNull; use crate::Database; #[cfg(feature = "shuttle")] crate::sync::thread_local! { /// The thread-local state salsa requires for a given thread static ATTACHED: Attached = Attached::new(); } // shuttle's `thread_local` macro does not support const-initialization. #[cfg(not(feature = "shuttle"))] crate::sync::thread_local! { /// The thread-local state salsa requires for a given thread static ATTACHED: Attached = const { Attached::new() } } /// State that is specific to a single execution thread. /// /// Internally, this type uses ref-cells. /// /// **Note also that all mutations to the database handle (and hence /// to the local-state) must be undone during unwinding.** struct Attached { /// Pointer to the currently attached database. database: Cell>>, } impl Attached { const fn new() -> Self { Self { database: Cell::new(None), } } #[inline] fn attach(&self, db: &Db, op: impl FnOnce() -> R) -> R where Db: ?Sized + Database, { struct DbGuard<'s> { state: Option<&'s Attached>, } impl<'s> DbGuard<'s> { #[inline] fn new(attached: &'s Attached, db: &dyn Database) -> Self { match attached.database.get() { Some(current_db) => { let new_db = NonNull::from(db); if !std::ptr::addr_eq(current_db.as_ptr(), new_db.as_ptr()) { panic!("Cannot change database mid-query. current: {current_db:?}, new: {new_db:?}"); } Self { state: None } } None => { // Otherwise, set the database. attached.database.set(Some(NonNull::from(db))); Self { state: Some(attached), } } } } } impl Drop for DbGuard<'_> { #[inline] fn drop(&mut self) { // Reset database to null if we did anything in `DbGuard::new`. if let Some(attached) = self.state { attached.database.set(None); } } } let _guard = DbGuard::new(self, db.as_dyn_database()); op() } /// Access the "attached" database. Returns `None` if no database is attached. /// Databases are attached with `attach_database`. #[inline] fn with(&self, op: impl FnOnce(&dyn Database) -> R) -> Option { let db = self.database.get()?; // SAFETY: We always attach the database in for the entire duration of a function, // so it cannot become "unattached" while this function is running. Some(op(unsafe { db.as_ref() })) } } /// Attach the database to the current thread and execute `op`. /// Panics if a different database has already been attached. #[inline] pub fn attach(db: &Db, op: impl FnOnce() -> R) -> R where Db: ?Sized + Database, { ATTACHED.with( #[inline] |a| a.attach(db, op), ) } /// Access the "attached" database. Returns `None` if no database is attached. /// Databases are attached with `attach_database`. #[inline] pub fn with_attached_database(op: impl FnOnce(&dyn Database) -> R) -> Option { ATTACHED.with( #[inline] |a| a.with(op), ) } salsa-0.23.0/src/cancelled.rs000064400000000000000000000030431046102023000140600ustar 00000000000000use std::fmt; use std::panic::{self, UnwindSafe}; /// A panic payload indicating that execution of a salsa query was cancelled. /// /// This can occur for a few reasons: /// * /// * /// * #[derive(Debug)] #[non_exhaustive] pub enum Cancelled { /// The query was operating on revision R, but there is a pending write to move to revision R+1. #[non_exhaustive] PendingWrite, /// The query was blocked on another thread, and that thread panicked. #[non_exhaustive] PropagatedPanic, } impl Cancelled { pub(crate) fn throw(self) -> ! { // We use resume and not panic here to avoid running the panic // hook (that is, to avoid collecting and printing backtrace). panic::resume_unwind(Box::new(self)); } /// Runs `f`, and catches any salsa cancellation. pub fn catch(f: F) -> Result where F: FnOnce() -> T + UnwindSafe, { match panic::catch_unwind(f) { Ok(t) => Ok(t), Err(payload) => match payload.downcast() { Ok(cancelled) => Err(*cancelled), Err(payload) => panic::resume_unwind(payload), }, } } } impl std::fmt::Display for Cancelled { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let why = match self { Cancelled::PendingWrite => "pending write", Cancelled::PropagatedPanic => "propagated panic", }; f.write_str("cancelled because of ")?; f.write_str(why) } } impl std::error::Error for Cancelled {} salsa-0.23.0/src/cycle.rs000064400000000000000000000234711046102023000132540ustar 00000000000000//! Cycle handling //! //! Salsa's default cycle handling is quite simple: if we encounter a cycle (that is, if we attempt //! to execute a query that is already on the active query stack), we panic. //! //! By setting `cycle_fn` and `cycle_initial` arguments to `salsa::tracked`, queries can opt-in to //! fixed-point iteration instead. //! //! We call the query which triggers the cycle (that is, the query that is already on the stack //! when it is called again) the "cycle head". The cycle head is responsible for managing iteration //! of the cycle. When a cycle is encountered, if the cycle head has `cycle_fn` and `cycle_initial` //! set, it will call the `cycle_initial` function to generate an "empty" or "initial" value for //! fixed-point iteration, which will be returned to its caller. Then each query in the cycle will //! compute a value normally, but every computed value will track the head(s) of the cycles it is //! part of. Every query's "cycle heads" are the union of all the cycle heads of all the queries it //! depends on. A memoized query result with cycle heads is called a "provisional value". //! //! For example, if `qa` calls `qb`, and `qb` calls `qc`, and `qc` calls `qa`, then `qa` will call //! its `cycle_initial` function to get an initial value, and return that as its result to `qc`, //! marked with `qa` as cycle head. `qc` will compute its own provisional result based on that, and //! return to `qb` a result also marked with `qa` as cycle head. `qb` will similarly compute and //! return a provisional value back to `qa`. //! //! When a query observes that it has just computed a result which contains itself as a cycle head, //! it recognizes that it is responsible for resolving this cycle and calls its `cycle_fn` to //! decide how to do so. The `cycle_fn` function is passed the provisional value just computed for //! that query and the count of iterations so far, and must return either //! `CycleRecoveryAction::Iterate` (which signals that the cycle head should re-iterate the cycle), //! or `CycleRecoveryAction::Fallback` (which signals that the cycle head should replace its //! computed value with the given fallback value). //! //! If the cycle head ever observes that the provisional value it just recomputed is the same as //! the provisional value from the previous iteration, the cycle has converged. The cycle head will //! mark that value as final (by removing itself as cycle head) and return it. //! //! Other queries in the cycle will still have provisional values recorded, but those values should //! now also be considered final! We don't eagerly walk the entire cycle to mark them final. //! Instead, we wait until the next time that provisional value is read, and then we check if all //! of its cycle heads have a final result, in which case it, too, can be marked final. (This is //! implemented in `shallow_verify_memo` and `validate_provisional`.) //! //! If the `cycle_fn` returns a fallback value, the cycle head will replace its provisional value //! with that fallback, and then iterate the cycle one more time. A fallback value is expected to //! result in a stable, converged cycle. If it does not (that is, if the result of another //! iteration of the cycle is not the same as the fallback value), we'll panic. //! //! In nested cycle cases, the inner cycle head will iterate until its own cycle is resolved, but //! the "final" value it then returns will still be provisional on the outer cycle head. The outer //! cycle head may then iterate, which may result in a new set of iterations on the inner cycle, //! for each iteration of the outer cycle. use thin_vec::{thin_vec, ThinVec}; use crate::key::DatabaseKeyIndex; use crate::sync::OnceLock; /// The maximum number of times we'll fixpoint-iterate before panicking. /// /// Should only be relevant in case of a badly configured cycle recovery. pub const MAX_ITERATIONS: IterationCount = IterationCount(200); /// Return value from a cycle recovery function. #[derive(Debug)] pub enum CycleRecoveryAction { /// Iterate the cycle again to look for a fixpoint. Iterate, /// Cut off iteration and use the given result value for this query. Fallback(T), } /// Cycle recovery strategy: Is this query capable of recovering from /// a cycle that results from executing the function? If so, how? #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum CycleRecoveryStrategy { /// Cannot recover from cycles: panic. /// /// This is the default. Panic, /// Recovers from cycles by fixpoint iterating and/or falling /// back to a sentinel value. /// /// This choice is computed by the query's `cycle_recovery` /// function and initial value. Fixpoint, /// Recovers from cycles by inserting a fallback value for all /// queries that have a fallback, and ignoring any other query /// in the cycle (as if they were not computed). FallbackImmediate, } /// A "cycle head" is the query at which we encounter a cycle; that is, if A -> B -> C -> A, then A /// would be the cycle head. It returns an "initial value" when the cycle is encountered (if /// fixpoint iteration is enabled for that query), and then is responsible for re-iterating the /// cycle until it converges. #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] pub struct CycleHead { pub(crate) database_key_index: DatabaseKeyIndex, pub(crate) iteration_count: IterationCount, } #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct IterationCount(u8); impl IterationCount { pub(crate) const fn initial() -> Self { Self(0) } pub(crate) const fn is_initial(self) -> bool { self.0 == 0 } pub(crate) const fn increment(self) -> Option { let next = Self(self.0 + 1); if next.0 <= MAX_ITERATIONS.0 { Some(next) } else { None } } pub(crate) const fn as_u32(self) -> u32 { self.0 as u32 } } /// Any provisional value generated by any query in a cycle will track the cycle head(s) (can be /// plural in case of nested cycles) representing the cycles it is part of, and the current /// iteration count for each cycle head. This struct tracks these cycle heads. #[derive(Clone, Debug, Default)] pub struct CycleHeads(ThinVec); impl CycleHeads { pub(crate) fn is_empty(&self) -> bool { self.0.is_empty() } pub(crate) fn initial(database_key_index: DatabaseKeyIndex) -> Self { Self(thin_vec![CycleHead { database_key_index, iteration_count: IterationCount::initial(), }]) } pub(crate) fn iter(&self) -> std::slice::Iter<'_, CycleHead> { self.0.iter() } pub(crate) fn contains(&self, value: &DatabaseKeyIndex) -> bool { self.into_iter() .any(|head| head.database_key_index == *value) } pub(crate) fn remove(&mut self, value: &DatabaseKeyIndex) -> bool { let found = self .0 .iter() .position(|&head| head.database_key_index == *value); let Some(found) = found else { return false }; self.0.swap_remove(found); true } pub(crate) fn update_iteration_count( &mut self, cycle_head_index: DatabaseKeyIndex, new_iteration_count: IterationCount, ) { if let Some(cycle_head) = self .0 .iter_mut() .find(|cycle_head| cycle_head.database_key_index == cycle_head_index) { cycle_head.iteration_count = new_iteration_count; } } #[inline] pub(crate) fn push_initial(&mut self, database_key_index: DatabaseKeyIndex) { if let Some(existing) = self .0 .iter() .find(|candidate| candidate.database_key_index == database_key_index) { assert_eq!(existing.iteration_count, IterationCount::initial()); } else { self.0.push(CycleHead { database_key_index, iteration_count: IterationCount::initial(), }); } } #[inline] pub(crate) fn extend(&mut self, other: &Self) { self.0.reserve(other.0.len()); for head in other { if let Some(existing) = self .0 .iter() .find(|candidate| candidate.database_key_index == head.database_key_index) { assert_eq!(existing.iteration_count, head.iteration_count); } else { self.0.push(*head); } } } #[cfg(feature = "salsa_unstable")] pub(crate) fn allocation_size(&self) -> usize { std::mem::size_of_val(self.0.as_slice()) } } impl IntoIterator for CycleHeads { type Item = CycleHead; type IntoIter = as IntoIterator>::IntoIter; fn into_iter(self) -> Self::IntoIter { self.0.into_iter() } } impl<'a> std::iter::IntoIterator for &'a CycleHeads { type Item = &'a CycleHead; type IntoIter = std::slice::Iter<'a, CycleHead>; fn into_iter(self) -> Self::IntoIter { self.iter() } } impl From for CycleHeads { fn from(value: CycleHead) -> Self { Self(thin_vec![value]) } } #[inline] pub(crate) fn empty_cycle_heads() -> &'static CycleHeads { static EMPTY_CYCLE_HEADS: OnceLock = OnceLock::new(); EMPTY_CYCLE_HEADS.get_or_init(|| CycleHeads(ThinVec::new())) } #[derive(Debug, PartialEq, Eq)] pub enum ProvisionalStatus { Provisional { iteration: IterationCount }, Final { iteration: IterationCount }, FallbackImmediate, } impl ProvisionalStatus { pub(crate) const fn iteration(&self) -> Option { match self { ProvisionalStatus::Provisional { iteration } => Some(*iteration), ProvisionalStatus::Final { iteration } => Some(*iteration), ProvisionalStatus::FallbackImmediate => None, } } } salsa-0.23.0/src/database.rs000064400000000000000000000205321046102023000137140ustar 00000000000000use std::any::Any; use std::borrow::Cow; use crate::zalsa::{IngredientIndex, ZalsaDatabase}; use crate::{Durability, Revision}; /// The trait implemented by all Salsa databases. /// You can create your own subtraits of this trait using the `#[salsa::db]`(`crate::db`) procedural macro. pub trait Database: Send + AsDynDatabase + Any + ZalsaDatabase { /// Enforces current LRU limits, evicting entries if necessary. /// /// **WARNING:** Just like an ordinary write, this method triggers /// cancellation. If you invoke it while a snapshot exists, it /// will block until that snapshot is dropped -- if that snapshot /// is owned by the current thread, this could trigger deadlock. fn trigger_lru_eviction(&mut self) { let zalsa_mut = self.zalsa_mut(); zalsa_mut.evict_lru(); } /// A "synthetic write" causes the system to act *as though* some /// input of durability `durability` has changed, triggering a new revision. /// This is mostly useful for profiling scenarios. /// /// **WARNING:** Just like an ordinary write, this method triggers /// cancellation. If you invoke it while a snapshot exists, it /// will block until that snapshot is dropped -- if that snapshot /// is owned by the current thread, this could trigger deadlock. fn synthetic_write(&mut self, durability: Durability) { let zalsa_mut = self.zalsa_mut(); zalsa_mut.new_revision(); zalsa_mut.runtime_mut().report_tracked_write(durability); } /// Reports that the query depends on some state unknown to salsa. /// /// Queries which report untracked reads will be re-executed in the next /// revision. fn report_untracked_read(&self) { let (zalsa, zalsa_local) = self.zalsas(); zalsa_local.report_untracked_read(zalsa.current_revision()) } /// Return the "debug name" (i.e., the struct name, etc) for an "ingredient", /// which are the fine-grained components we use to track data. This is intended /// for debugging and the contents of the returned string are not semver-guaranteed. /// /// Ingredient indices can be extracted from [`DatabaseKeyIndex`](`crate::DatabaseKeyIndex`) values. fn ingredient_debug_name(&self, ingredient_index: IngredientIndex) -> Cow<'_, str> { Cow::Borrowed( self.zalsa() .lookup_ingredient(ingredient_index) .debug_name(), ) } /// Starts unwinding the stack if the current revision is cancelled. /// /// This method can be called by query implementations that perform /// potentially expensive computations, in order to speed up propagation of /// cancellation. /// /// Cancellation will automatically be triggered by salsa on any query /// invocation. /// /// This method should not be overridden by `Database` implementors. A /// `salsa_event` is emitted when this method is called, so that should be /// used instead. fn unwind_if_revision_cancelled(&self) { let (zalsa, zalsa_local) = self.zalsas(); zalsa.unwind_if_revision_cancelled(zalsa_local); } /// Execute `op` with the database in thread-local storage for debug print-outs. #[inline(always)] fn attach(&self, op: impl FnOnce(&Self) -> R) -> R where Self: Sized, { crate::attach::attach(self, || op(self)) } #[doc(hidden)] #[inline(always)] fn zalsa_register_downcaster(&self) { // The no-op downcaster is special cased in view caster construction. } #[doc(hidden)] #[inline(always)] unsafe fn downcast(db: &dyn Database) -> &dyn Database where Self: Sized, { // No-op db } } /// Upcast to a `dyn Database`. /// /// Only required because upcasts not yet stabilized (*grr*). pub trait AsDynDatabase { fn as_dyn_database(&self) -> &dyn Database; fn as_dyn_database_mut(&mut self) -> &mut dyn Database; } impl AsDynDatabase for T { #[inline(always)] fn as_dyn_database(&self) -> &dyn Database { self } #[inline(always)] fn as_dyn_database_mut(&mut self) -> &mut dyn Database { self } } pub fn current_revision(db: &Db) -> Revision { db.zalsa().current_revision() } impl dyn Database { /// Upcasts `self` to the given view. /// /// # Panics /// /// If the view has not been added to the database (see [`crate::views::Views`]). #[track_caller] pub fn as_view(&self) -> &DbView { let views = self.zalsa().views(); views.downcaster_for().downcast(self) } } #[cfg(feature = "salsa_unstable")] pub use memory_usage::IngredientInfo; #[cfg(feature = "salsa_unstable")] pub(crate) use memory_usage::{MemoInfo, SlotInfo}; #[cfg(feature = "salsa_unstable")] mod memory_usage { use crate::Database; use hashbrown::HashMap; impl dyn Database { /// Returns information about any Salsa structs. pub fn structs_info(&self) -> Vec { self.zalsa() .ingredients() .filter_map(|ingredient| { let mut size_of_fields = 0; let mut size_of_metadata = 0; let mut instances = 0; for slot in ingredient.memory_usage(self)? { instances += 1; size_of_fields += slot.size_of_fields; size_of_metadata += slot.size_of_metadata; } Some(IngredientInfo { count: instances, size_of_fields, size_of_metadata, debug_name: ingredient.debug_name(), }) }) .collect() } /// Returns information about any memoized Salsa queries. /// /// The returned map holds memory usage information for memoized values of a given query, keyed /// by the query function name. pub fn queries_info(&self) -> HashMap<&'static str, IngredientInfo> { let mut queries = HashMap::new(); for input_ingredient in self.zalsa().ingredients() { let Some(input_info) = input_ingredient.memory_usage(self) else { continue; }; for input in input_info { for memo in input.memos { let info = queries.entry(memo.debug_name).or_insert(IngredientInfo { debug_name: memo.output.debug_name, ..Default::default() }); info.count += 1; info.size_of_fields += memo.output.size_of_fields; info.size_of_metadata += memo.output.size_of_metadata; } } } queries } } /// Information about instances of a particular Salsa ingredient. #[derive(Default, Debug, PartialEq, Eq, PartialOrd, Ord)] pub struct IngredientInfo { debug_name: &'static str, count: usize, size_of_metadata: usize, size_of_fields: usize, } impl IngredientInfo { /// Returns the debug name of the ingredient. pub fn debug_name(&self) -> &'static str { self.debug_name } /// Returns the total size of the fields of any instances of this ingredient, in bytes. pub fn size_of_fields(&self) -> usize { self.size_of_fields } /// Returns the total size of Salsa metadata of any instances of this ingredient, in bytes. pub fn size_of_metadata(&self) -> usize { self.size_of_metadata } /// Returns the number of instances of this ingredient. pub fn count(&self) -> usize { self.count } } /// Memory usage information about a particular instance of struct, input or output. pub struct SlotInfo { pub(crate) debug_name: &'static str, pub(crate) size_of_metadata: usize, pub(crate) size_of_fields: usize, pub(crate) memos: Vec, } /// Memory usage information about a particular memo. pub struct MemoInfo { pub(crate) debug_name: &'static str, pub(crate) output: SlotInfo, } } salsa-0.23.0/src/database_impl.rs000064400000000000000000000023751046102023000147420ustar 00000000000000use tracing::Level; use crate::storage::HasStorage; use crate::{Database, Storage}; /// Default database implementation that you can use if you don't /// require any custom user data. #[derive(Clone)] pub struct DatabaseImpl { storage: Storage, } impl Default for DatabaseImpl { fn default() -> Self { Self { // Default behavior: tracing debug log the event. storage: Storage::new(if tracing::enabled!(Level::DEBUG) { Some(Box::new(|event| { tracing::debug!("salsa_event({:?})", event) })) } else { None }), } } } impl DatabaseImpl { /// Create a new database; equivalent to `Self::default`. pub fn new() -> Self { Self::default() } pub fn storage(&self) -> &Storage { &self.storage } } impl Database for DatabaseImpl {} // SAFETY: The `storage` and `storage_mut` fields return a reference to the same storage field owned by `self`. unsafe impl HasStorage for DatabaseImpl { #[inline(always)] fn storage(&self) -> &Storage { &self.storage } #[inline(always)] fn storage_mut(&mut self) -> &mut Storage { &mut self.storage } } salsa-0.23.0/src/durability.rs000064400000000000000000000064011046102023000143170ustar 00000000000000/// Describes how likely a value is to change—how "durable" it is. /// /// By default, inputs have `Durability::LOW` and interned values have /// `Durability::HIGH`. But inputs can be explicitly set with other /// durabilities. /// /// We use durabilities to optimize the work of "revalidating" a query /// after some input has changed. Ordinarily, in a new revision, /// queries have to trace all their inputs back to the base inputs to /// determine if any of those inputs have changed. But if we know that /// the only changes were to inputs of low durability (the common /// case), and we know that the query only used inputs of medium /// durability or higher, then we can skip that enumeration. /// /// Typically, one assigns low durabilities to inputs that the user is /// frequently editing. Medium or high durabilities are used for /// configuration, the source from library crates, or other things /// that are unlikely to be edited. #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] pub struct Durability(DurabilityVal); impl std::fmt::Debug for Durability { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { if f.alternate() { match self.0 { DurabilityVal::Low => f.write_str("Durability::LOW"), DurabilityVal::Medium => f.write_str("Durability::MEDIUM"), DurabilityVal::High => f.write_str("Durability::HIGH"), } } else { f.debug_tuple("Durability") .field(&(self.0 as usize)) .finish() } } } // We use an enum here instead of a u8 for niches. #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] enum DurabilityVal { Low = 0, Medium = 1, High = 2, } impl From for DurabilityVal { fn from(value: u8) -> Self { match value { 0 => DurabilityVal::Low, 1 => DurabilityVal::Medium, 2 => DurabilityVal::High, _ => panic!("invalid durability"), } } } impl Durability { /// Low durability: things that change frequently. /// /// Example: part of the crate being edited pub const LOW: Durability = Durability(DurabilityVal::Low); /// Medium durability: things that change sometimes, but rarely. /// /// Example: a Cargo.toml file pub const MEDIUM: Durability = Durability(DurabilityVal::Medium); /// High durability: things that are not expected to change under /// common usage. /// /// Example: the standard library or something from crates.io pub const HIGH: Durability = Durability(DurabilityVal::High); /// The minimum possible durability; equivalent to LOW but /// "conceptually" distinct (i.e., if we add more durability /// levels, this could change). pub(crate) const MIN: Durability = Self::LOW; /// The maximum possible durability; equivalent to HIGH but /// "conceptually" distinct (i.e., if we add more durability /// levels, this could change). pub(crate) const MAX: Durability = Self::HIGH; /// Number of durability levels. pub(crate) const LEN: usize = Self::HIGH.0 as usize + 1; pub(crate) fn index(self) -> usize { self.0 as usize } } impl Default for Durability { fn default() -> Self { Durability::LOW } } salsa-0.23.0/src/event.rs000064400000000000000000000100331046102023000132640ustar 00000000000000use crate::cycle::IterationCount; use crate::key::DatabaseKeyIndex; use crate::sync::thread::{self, ThreadId}; use crate::Revision; /// The `Event` struct identifies various notable things that can /// occur during salsa execution. Instances of this struct are given /// to `salsa_event`. #[derive(Debug)] pub struct Event { /// The id of the thread that triggered the event. pub thread_id: ThreadId, /// What sort of event was it. pub kind: EventKind, } impl Event { pub fn new(kind: EventKind) -> Self { Self { thread_id: thread::current().id(), kind, } } } /// An enum identifying the various kinds of events that can occur. #[derive(Debug)] pub enum EventKind { /// Occurs when we found that all inputs to a memoized value are /// up-to-date and hence the value can be re-used without /// executing the closure. /// /// Executes before the "re-used" value is returned. DidValidateMemoizedValue { /// The database-key for the affected value. Implements `Debug`. database_key: DatabaseKeyIndex, }, /// Indicates that another thread (with id `other_thread_id`) is processing the /// given query (`database_key`), so we will block until they /// finish. /// /// Executes after we have registered with the other thread but /// before they have answered us. WillBlockOn { /// The id of the thread we will block on. other_thread_id: ThreadId, /// The database-key for the affected value. Implements `Debug`. database_key: DatabaseKeyIndex, }, /// Indicates that the function for this query will be executed. /// This is either because it has never executed before or because /// its inputs may be out of date. WillExecute { /// The database-key for the affected value. Implements `Debug`. database_key: DatabaseKeyIndex, }, WillIterateCycle { /// The database-key for the cycle head. Implements `Debug`. database_key: DatabaseKeyIndex, iteration_count: IterationCount, fell_back: bool, }, /// Indicates that `unwind_if_cancelled` was called and salsa will check if /// the current revision has been cancelled. WillCheckCancellation, /// Indicates that one [`Handle`](`crate::Handle`) has set the cancellation flag. /// When other active handles execute salsa methods, they will observe this flag /// and panic with a sentinel value of type [`Cancelled`](`crate::Cancelled`). DidSetCancellationFlag, /// Discovered that a query used to output a given output but no longer does. WillDiscardStaleOutput { /// Key for the query that is executing and which no longer outputs the given value. execute_key: DatabaseKeyIndex, /// Key for the query that is no longer output output_key: DatabaseKeyIndex, }, /// Tracked structs or memoized data were discarded (freed). DidDiscard { /// Value being discarded. key: DatabaseKeyIndex, }, /// Discarded accumulated data from a given fn DidDiscardAccumulated { /// The key of the fn that accumulated results executor_key: DatabaseKeyIndex, /// Accumulator that was accumulated into accumulator: DatabaseKeyIndex, }, /// Indicates that a value was newly interned. DidInternValue { // The key of the interned value. key: DatabaseKeyIndex, // The revision the value was interned in. revision: Revision, }, /// Indicates that a value was interned by reusing an existing slot. DidReuseInternedValue { // The key of the interned value. key: DatabaseKeyIndex, // The revision the value was interned in. revision: Revision, }, /// Indicates that a previously interned value was read in a new revision. DidValidateInternedValue { // The key of the interned value. key: DatabaseKeyIndex, // The revision the value was interned in. revision: Revision, }, } salsa-0.23.0/src/function/accumulated.rs000064400000000000000000000104051046102023000162620ustar 00000000000000use crate::accumulator::accumulated_map::{AccumulatedMap, InputAccumulatedValues}; use crate::accumulator::{self}; use crate::function::{Configuration, IngredientImpl}; use crate::hash::FxHashSet; use crate::zalsa::ZalsaDatabase; use crate::zalsa_local::QueryOriginRef; use crate::{AsDynDatabase, DatabaseKeyIndex, Id}; impl IngredientImpl where C: Configuration, { /// Helper used by `accumulate` functions. Computes the results accumulated by `database_key_index` /// and its inputs. pub fn accumulated_by<'db, A>(&self, db: &'db C::DbView, key: Id) -> Vec<&'db A> where A: accumulator::Accumulator, { let (zalsa, zalsa_local) = db.zalsas(); // NOTE: We don't have a precise way to track accumulated values at present, // so we report any read of them as an untracked read. // // Like tracked struct fields, accumulated values are essentially a "side channel output" // from a tracked function, hence we can't report this as a read of the tracked function(s) // whose accumulated values we are probing, since the accumulated values may have changed // even when the main return value of the function has not changed. // // Unlike tracked struct fields, we don't have a distinct id or ingredient to represent // "the values of type A accumulated by tracked function X". Typically accumulated values // are read from outside of salsa anyway so this is not a big deal. zalsa_local.report_untracked_read(zalsa.current_revision()); let Some(accumulator) = >::from_zalsa(zalsa) else { return vec![]; }; let mut output = vec![]; // First ensure the result is up to date self.fetch(db, key); let db = db.as_dyn_database(); let db_key = self.database_key_index(key); let mut visited: FxHashSet = FxHashSet::default(); let mut stack: Vec = vec![db_key]; // Do a depth-first search across the dependencies of `key`, reading the values accumulated by // each dependency. while let Some(k) = stack.pop() { // Already visited `k`? if !visited.insert(k) { continue; } let ingredient = zalsa.lookup_ingredient(k.ingredient_index()); // Extend `output` with any values accumulated by `k`. let (accumulated_map, input) = ingredient.accumulated(db, k.key_index()); if let Some(accumulated_map) = accumulated_map { accumulated_map.extend_with_accumulated(accumulator.index(), &mut output); } // Skip over the inputs because we know that the entire sub-graph has no accumulated values if input.is_empty() { continue; } // Find the inputs of `k` and push them onto the stack. // // Careful: to ensure the user gets a consistent ordering in their // output vector, we want to push in execution order, so reverse order to // ensure the first child that was executed will be the first child popped // from the stack. let Some(origin) = ingredient.origin(zalsa, k.key_index()) else { continue; }; if let QueryOriginRef::Derived(edges) | QueryOriginRef::DerivedUntracked(edges) = origin { stack.reserve(edges.len()); } stack.extend( origin .inputs() .filter_map(|input| TryInto::::try_into(input).ok()) .rev(), ); visited.reserve(stack.len()); } output } pub(super) fn accumulated_map<'db>( &'db self, db: &'db C::DbView, key: Id, ) -> (Option<&'db AccumulatedMap>, InputAccumulatedValues) { let (zalsa, zalsa_local) = db.zalsas(); // NEXT STEP: stash and refactor `fetch` to return an `&Memo` so we can make this work let memo = self.refresh_memo(db, zalsa, zalsa_local, key); ( memo.revisions.accumulated(), memo.revisions.accumulated_inputs.load(), ) } } salsa-0.23.0/src/function/backdate.rs000064400000000000000000000041151046102023000155320ustar 00000000000000use crate::function::memo::Memo; use crate::function::{Configuration, IngredientImpl}; use crate::zalsa_local::QueryRevisions; use crate::DatabaseKeyIndex; impl IngredientImpl where C: Configuration, { /// If the value/durability of this memo is equal to what is found in `revisions`/`value`, /// then update `revisions.changed_at` to match `self.revisions.changed_at`. This is invoked /// on an old memo when a new memo has been produced to check whether there have been changed. pub(super) fn backdate_if_appropriate<'db>( &self, old_memo: &Memo<'db, C>, index: DatabaseKeyIndex, revisions: &mut QueryRevisions, value: &C::Output<'db>, ) { // We've seen issues where queries weren't re-validated when backdating provisional values // in ty. This is more of a bandaid because we're close to a release and don't have the time to prove // right now whether backdating could be made safe for queries participating in queries. // TODO: Write a test that demonstrates that backdating queries participating in a cycle isn't safe // OR write many tests showing that it is (and fixing the case where it didn't correctly account for today). if !revisions.cycle_heads().is_empty() { return; } if let Some(old_value) = &old_memo.value { // Careful: if the value became less durable than it // used to be, that is a "breaking change" that our // consumers must be aware of. Becoming *more* durable // is not. See the test `durable_to_less_durable`. if revisions.durability >= old_memo.revisions.durability && C::values_equal(old_value, value) { tracing::debug!( "{index:?} value is equal, back-dating to {:?}", old_memo.revisions.changed_at, ); assert!(old_memo.revisions.changed_at <= revisions.changed_at); revisions.changed_at = old_memo.revisions.changed_at; } } } } salsa-0.23.0/src/function/delete.rs000064400000000000000000000033561046102023000152440ustar 00000000000000use std::ptr::NonNull; use crate::function::memo::Memo; use crate::function::Configuration; /// Stores the list of memos that have been deleted so they can be freed /// once the next revision starts. See the comment on the field /// `deleted_entries` of [`FunctionIngredient`][] for more details. pub(super) struct DeletedEntries { memos: boxcar::Vec>>, } #[allow(clippy::undocumented_unsafe_blocks)] // TODO(#697) document safety unsafe impl Send for SharedBox {} #[allow(clippy::undocumented_unsafe_blocks)] // TODO(#697) document safety unsafe impl Sync for SharedBox {} impl Default for DeletedEntries { fn default() -> Self { Self { memos: Default::default(), } } } impl DeletedEntries { /// # Safety /// /// The memo must be valid and safe to free when the `DeletedEntries` list is cleared or dropped. pub(super) unsafe fn push(&self, memo: NonNull>) { // Safety: The memo must be valid and safe to free when the `DeletedEntries` list is cleared or dropped. let memo = unsafe { std::mem::transmute::>, NonNull>>(memo) }; self.memos.push(SharedBox(memo)); } /// Free all deleted memos, keeping the list available for reuse. pub(super) fn clear(&mut self) { self.memos.clear(); } } /// A wrapper around `NonNull` that frees the allocation when it is dropped. struct SharedBox(NonNull); impl Drop for SharedBox { fn drop(&mut self) { // SAFETY: Guaranteed by the caller of `DeletedEntries::push`. unsafe { drop(Box::from_raw(self.0.as_ptr())) }; } } salsa-0.23.0/src/function/diff_outputs.rs000064400000000000000000000054761046102023000165220ustar 00000000000000use crate::function::memo::Memo; use crate::function::{Configuration, IngredientImpl}; use crate::hash::FxIndexSet; use crate::zalsa::Zalsa; use crate::zalsa_local::{output_edges, QueryOriginRef, QueryRevisions}; use crate::{DatabaseKeyIndex, Event, EventKind, Id}; impl IngredientImpl where C: Configuration, { /// Compute the old and new outputs and invoke `remove_stale_output` /// for each output that was generated before but is not generated now. /// /// This function takes a `&mut` reference to `revisions` to remove outputs /// that no longer exist in this revision from [`QueryRevisions::tracked_struct_ids`]. pub(super) fn diff_outputs( &self, zalsa: &Zalsa, key: DatabaseKeyIndex, old_memo: &Memo<'_, C>, revisions: &mut QueryRevisions, ) { let (QueryOriginRef::Derived(edges) | QueryOriginRef::DerivedUntracked(edges)) = old_memo.revisions.origin.as_ref() else { return; }; // Iterate over the outputs of the `old_memo` and put them into a hashset // // Ignore key_generation here, because we use the same tracked struct allocation for // all generations with the same key_index and can't report it as stale let mut old_outputs: FxIndexSet<_> = output_edges(edges) .map(|a| (a.ingredient_index(), a.key_index().index())) .collect(); if old_outputs.is_empty() { return; } // Iterate over the outputs of the current query // and remove elements from `old_outputs` when we find them for new_output in revisions.origin.as_ref().outputs() { old_outputs.swap_remove(&( new_output.ingredient_index(), new_output.key_index().index(), )); } // Remove the outputs that are no longer present in the current revision // to prevent that the next revision is seeded with an id mapping that no longer exists. if let Some(tracked_struct_ids) = revisions.tracked_struct_ids_mut() { tracked_struct_ids .retain(|(k, value)| !old_outputs.contains(&(k.ingredient_index(), value.index()))); }; for (ingredient_index, key_index) in old_outputs { // SAFETY: key_index acquired from valid output let id = unsafe { Id::from_index(key_index) }; Self::report_stale_output(zalsa, key, DatabaseKeyIndex::new(ingredient_index, id)); } } fn report_stale_output(zalsa: &Zalsa, key: DatabaseKeyIndex, output: DatabaseKeyIndex) { zalsa.event(&|| { Event::new(EventKind::WillDiscardStaleOutput { execute_key: key, output_key: output, }) }); output.remove_stale_output(zalsa, key); } } salsa-0.23.0/src/function/execute.rs000064400000000000000000000330521046102023000154400ustar 00000000000000use crate::cycle::{CycleRecoveryStrategy, IterationCount}; use crate::function::memo::Memo; use crate::function::{Configuration, IngredientImpl}; use crate::sync::atomic::{AtomicBool, Ordering}; use crate::zalsa::{MemoIngredientIndex, Zalsa, ZalsaDatabase}; use crate::zalsa_local::{ActiveQueryGuard, QueryRevisions}; use crate::{Event, EventKind, Id, Revision}; impl IngredientImpl where C: Configuration, { /// Executes the query function for the given `active_query`. Creates and stores /// a new memo with the result, backdated if possible. Once this completes, /// the query will have been popped off the active query stack. /// /// # Parameters /// /// * `db`, the database. /// * `active_query`, the active stack frame for the query to execute. /// * `opt_old_memo`, the older memo, if any existed. Used for backdating. #[inline(never)] pub(super) fn execute<'db>( &'db self, db: &'db C::DbView, active_query: ActiveQueryGuard<'db>, opt_old_memo: Option<&Memo<'db, C>>, ) -> &'db Memo<'db, C> { let database_key_index = active_query.database_key_index; let id = database_key_index.key_index(); tracing::info!("{:?}: executing query", database_key_index); let zalsa = db.zalsa(); zalsa.event(&|| { Event::new(EventKind::WillExecute { database_key: database_key_index, }) }); let memo_ingredient_index = self.memo_ingredient_index(zalsa, id); let (new_value, mut revisions) = match C::CYCLE_STRATEGY { CycleRecoveryStrategy::Panic => { Self::execute_query(db, active_query, opt_old_memo, zalsa.current_revision(), id) } CycleRecoveryStrategy::FallbackImmediate => { let (mut new_value, mut revisions) = Self::execute_query( db, active_query, opt_old_memo, zalsa.current_revision(), id, ); if let Some(cycle_heads) = revisions.cycle_heads_mut() { // Did the new result we got depend on our own provisional value, in a cycle? if cycle_heads.contains(&database_key_index) { // Ignore the computed value, leave the fallback value there. let memo = self .get_memo_from_table_for(zalsa, id, memo_ingredient_index) .unwrap_or_else(|| { unreachable!( "{database_key_index:#?} is a `FallbackImmediate` cycle head, \ but no memo found" ) }); // We need to mark the memo as finalized so other cycle participants that have fallbacks // will be verified (participants that don't have fallbacks will not be verified). memo.revisions.verified_final.store(true, Ordering::Release); return memo; } // If we're in the middle of a cycle and we have a fallback, use it instead. // Cycle participants that don't have a fallback will be discarded in // `validate_provisional()`. let cycle_heads = std::mem::take(cycle_heads); let active_query = db .zalsa_local() .push_query(database_key_index, IterationCount::initial()); new_value = C::cycle_initial(db, C::id_to_input(db, id)); revisions = active_query.pop(); // We need to set `cycle_heads` and `verified_final` because it needs to propagate to the callers. // When verifying this, we will see we have fallback and mark ourselves verified. revisions.set_cycle_heads(cycle_heads); revisions.verified_final = AtomicBool::new(false); } (new_value, revisions) } CycleRecoveryStrategy::Fixpoint => self.execute_maybe_iterate( db, active_query, opt_old_memo, zalsa, id, memo_ingredient_index, ), }; if let Some(old_memo) = opt_old_memo { // If the new value is equal to the old one, then it didn't // really change, even if some of its inputs have. So we can // "backdate" its `changed_at` revision to be the same as the // old value. self.backdate_if_appropriate(old_memo, database_key_index, &mut revisions, &new_value); // Diff the new outputs with the old, to discard any no-longer-emitted // outputs and update the tracked struct IDs for seeding the next revision. self.diff_outputs(zalsa, database_key_index, old_memo, &mut revisions); } self.insert_memo( zalsa, id, Memo::new(Some(new_value), zalsa.current_revision(), revisions), memo_ingredient_index, ) } #[inline] fn execute_maybe_iterate<'db>( &'db self, db: &'db C::DbView, mut active_query: ActiveQueryGuard<'db>, opt_old_memo: Option<&Memo<'db, C>>, zalsa: &'db Zalsa, id: Id, memo_ingredient_index: MemoIngredientIndex, ) -> (C::Output<'db>, QueryRevisions) { let database_key_index = active_query.database_key_index; let mut iteration_count = IterationCount::initial(); let mut fell_back = false; // Our provisional value from the previous iteration, when doing fixpoint iteration. // Initially it's set to None, because the initial provisional value is created lazily, // only when a cycle is actually encountered. let mut opt_last_provisional: Option<&Memo<'db, C>> = None; loop { let previous_memo = opt_last_provisional.or(opt_old_memo); let (mut new_value, mut revisions) = Self::execute_query( db, active_query, previous_memo, zalsa.current_revision(), id, ); // Did the new result we got depend on our own provisional value, in a cycle? if let Some(cycle_heads) = revisions .cycle_heads_mut() .filter(|cycle_heads| cycle_heads.contains(&database_key_index)) { let last_provisional_value = if let Some(last_provisional) = opt_last_provisional { // We have a last provisional value from our previous time around the loop. last_provisional.value.as_ref() } else { // This is our first time around the loop; a provisional value must have been // inserted into the memo table when the cycle was hit, so let's pull our // initial provisional value from there. let memo = self .get_memo_from_table_for(zalsa, id, memo_ingredient_index) .unwrap_or_else(|| { unreachable!( "{database_key_index:#?} is a cycle head, \ but no provisional memo found" ) }); debug_assert!(memo.may_be_provisional()); memo.value.as_ref() }; // SAFETY: The `LRU` does not run mid-execution, so the value remains filled let last_provisional_value = unsafe { last_provisional_value.unwrap_unchecked() }; tracing::debug!( "{database_key_index:?}: execute: \ I am a cycle head, comparing last provisional value with new value" ); // If the new result is equal to the last provisional result, the cycle has // converged and we are done. if !C::values_equal(&new_value, last_provisional_value) { if fell_back { // We fell back to a value last iteration, but the fallback didn't result // in convergence. We only have bad options here: continue iterating // (ignoring the request to fall back), or forcibly use the fallback and // leave the cycle in an inconsistent state (we'll be using a value for // this query that it doesn't evaluate to, given its inputs). Maybe we'll // have to go with the latter, but for now let's panic and see if real use // cases need non-converging fallbacks. panic!("{database_key_index:?}: execute: fallback did not converge"); } // We are in a cycle that hasn't converged; ask the user's // cycle-recovery function what to do: match C::recover_from_cycle( db, &new_value, iteration_count.as_u32(), C::id_to_input(db, id), ) { crate::CycleRecoveryAction::Iterate => {} crate::CycleRecoveryAction::Fallback(fallback_value) => { tracing::debug!( "{database_key_index:?}: execute: user cycle_fn says to fall back" ); new_value = fallback_value; // We have to insert the fallback value for this query and then iterate // one more time to fill in correct values for everything else in the // cycle based on it; then we'll re-insert it as final value. fell_back = true; } } // `iteration_count` can't overflow as we check it against `MAX_ITERATIONS` // which is less than `u32::MAX`. iteration_count = iteration_count.increment().unwrap_or_else(|| { panic!("{database_key_index:?}: execute: too many cycle iterations") }); zalsa.event(&|| { Event::new(EventKind::WillIterateCycle { database_key: database_key_index, iteration_count, fell_back, }) }); cycle_heads.update_iteration_count(database_key_index, iteration_count); revisions.update_iteration_count(iteration_count); tracing::debug!( "{database_key_index:?}: execute: iterate again, revisions: {revisions:#?}" ); opt_last_provisional = Some(self.insert_memo( zalsa, id, Memo::new(Some(new_value), zalsa.current_revision(), revisions), memo_ingredient_index, )); active_query = db .zalsa_local() .push_query(database_key_index, iteration_count); continue; } tracing::debug!( "{database_key_index:?}: execute: fixpoint iteration has a final value" ); cycle_heads.remove(&database_key_index); if cycle_heads.is_empty() { // If there are no more cycle heads, we can mark this as verified. revisions.verified_final.store(true, Ordering::Relaxed); } } tracing::debug!("{database_key_index:?}: execute: result.revisions = {revisions:#?}"); break (new_value, revisions); } } #[inline] fn execute_query<'db>( db: &'db C::DbView, active_query: ActiveQueryGuard<'db>, opt_old_memo: Option<&Memo<'db, C>>, current_revision: Revision, id: Id, ) -> (C::Output<'db>, QueryRevisions) { if let Some(old_memo) = opt_old_memo { // If we already executed this query once, then use the tracked-struct ids from the // previous execution as the starting point for the new one. if let Some(tracked_struct_ids) = old_memo.revisions.tracked_struct_ids() { active_query.seed_tracked_struct_ids(tracked_struct_ids); } // Copy over all inputs and outputs from a previous iteration. // This is necessary to: // * ensure that tracked struct created during the previous iteration // (and are owned by the query) are alive even if the query in this iteration no longer creates them. // * ensure the final returned memo depends on all inputs from all iterations. if old_memo.may_be_provisional() && old_memo.verified_at.load() == current_revision { active_query.seed_iteration(&old_memo.revisions); } } // Query was not previously executed, or value is potentially // stale, or value is absent. Let's execute! let new_value = C::execute(db, C::id_to_input(db, id)); (new_value, active_query.pop()) } } salsa-0.23.0/src/function/fetch.rs000064400000000000000000000272401046102023000150710ustar 00000000000000use crate::cycle::{CycleHeads, CycleRecoveryStrategy, IterationCount}; use crate::function::memo::Memo; use crate::function::sync::ClaimResult; use crate::function::{Configuration, IngredientImpl, VerifyResult}; use crate::zalsa::{MemoIngredientIndex, Zalsa, ZalsaDatabase}; use crate::zalsa_local::{QueryRevisions, ZalsaLocal}; use crate::Id; impl IngredientImpl where C: Configuration, { pub fn fetch<'db>(&'db self, db: &'db C::DbView, id: Id) -> &'db C::Output<'db> { let (zalsa, zalsa_local) = db.zalsas(); zalsa.unwind_if_revision_cancelled(zalsa_local); let database_key_index = self.database_key_index(id); #[cfg(debug_assertions)] let _span = tracing::debug_span!("fetch", query = ?database_key_index).entered(); let memo = self.refresh_memo(db, zalsa, zalsa_local, id); // SAFETY: We just refreshed the memo so it is guaranteed to contain a value now. let memo_value = unsafe { memo.value.as_ref().unwrap_unchecked() }; self.lru.record_use(id); zalsa_local.report_tracked_read( database_key_index, memo.revisions.durability, memo.revisions.changed_at, memo.revisions.accumulated().is_some(), &memo.revisions.accumulated_inputs, memo.cycle_heads(), ); memo_value } #[inline(always)] pub(super) fn refresh_memo<'db>( &'db self, db: &'db C::DbView, zalsa: &'db Zalsa, zalsa_local: &'db ZalsaLocal, id: Id, ) -> &'db Memo<'db, C> { let memo_ingredient_index = self.memo_ingredient_index(zalsa, id); loop { if let Some(memo) = self .fetch_hot(zalsa, id, memo_ingredient_index) .or_else(|| { self.fetch_cold_with_retry(zalsa, zalsa_local, db, id, memo_ingredient_index) }) { return memo; } } } #[inline(always)] fn fetch_hot<'db>( &'db self, zalsa: &'db Zalsa, id: Id, memo_ingredient_index: MemoIngredientIndex, ) -> Option<&'db Memo<'db, C>> { let memo = self.get_memo_from_table_for(zalsa, id, memo_ingredient_index)?; memo.value.as_ref()?; let database_key_index = self.database_key_index(id); let can_shallow_update = self.shallow_verify_memo(zalsa, database_key_index, memo); if can_shallow_update.yes() && !memo.may_be_provisional() { self.update_shallow(zalsa, database_key_index, memo, can_shallow_update); // SAFETY: memo is present in memo_map and we have verified that it is // still valid for the current revision. unsafe { Some(self.extend_memo_lifetime(memo)) } } else { None } } #[inline(never)] fn fetch_cold_with_retry<'db>( &'db self, zalsa: &'db Zalsa, zalsa_local: &'db ZalsaLocal, db: &'db C::DbView, id: Id, memo_ingredient_index: MemoIngredientIndex, ) -> Option<&'db Memo<'db, C>> { let memo = self.fetch_cold(zalsa, zalsa_local, db, id, memo_ingredient_index)?; // If we get back a provisional cycle memo, and it's provisional on any cycle heads // that are claimed by a different thread, we can't propagate the provisional memo // any further (it could escape outside the cycle); we need to block on the other // thread completing fixpoint iteration of the cycle, and then we can re-query for // our no-longer-provisional memo. // That is only correct for fixpoint cycles, though: `FallbackImmediate` cycles // never have provisional entries. if C::CYCLE_STRATEGY == CycleRecoveryStrategy::FallbackImmediate || !memo.provisional_retry(zalsa, zalsa_local, self.database_key_index(id)) { Some(memo) } else { None } } fn fetch_cold<'db>( &'db self, zalsa: &'db Zalsa, zalsa_local: &'db ZalsaLocal, db: &'db C::DbView, id: Id, memo_ingredient_index: MemoIngredientIndex, ) -> Option<&'db Memo<'db, C>> { let database_key_index = self.database_key_index(id); // Try to claim this query: if someone else has claimed it already, go back and start again. let claim_guard = match self.sync_table.try_claim(zalsa, id) { ClaimResult::Running(blocked_on) => { blocked_on.block_on(zalsa); let memo = self.get_memo_from_table_for(zalsa, id, memo_ingredient_index); if let Some(memo) = memo { // This isn't strictly necessary, but if this is a provisional memo for an inner cycle, // await all outer cycle heads to give the thread driving it a chance to complete // (we don't want multiple threads competing for the queries participating in the same cycle). if memo.value.is_some() && memo.may_be_provisional() { memo.block_on_heads(zalsa, zalsa_local); } } return None; } ClaimResult::Cycle { .. } => { // check if there's a provisional value for this query // Note we don't `validate_may_be_provisional` the memo here as we want to reuse an // existing provisional memo if it exists let memo_guard = self.get_memo_from_table_for(zalsa, id, memo_ingredient_index); if let Some(memo) = memo_guard { if memo.value.is_some() && memo.revisions.cycle_heads().contains(&database_key_index) { let can_shallow_update = self.shallow_verify_memo(zalsa, database_key_index, memo); if can_shallow_update.yes() { self.update_shallow( zalsa, database_key_index, memo, can_shallow_update, ); // SAFETY: memo is present in memo_map. return unsafe { Some(self.extend_memo_lifetime(memo)) }; } } } // no provisional value; create/insert/return initial provisional value return match C::CYCLE_STRATEGY { CycleRecoveryStrategy::Panic => zalsa_local.with_query_stack(|stack| { panic!( "dependency graph cycle when querying {database_key_index:#?}, \ set cycle_fn/cycle_initial to fixpoint iterate.\n\ Query stack:\n{stack:#?}", ); }), CycleRecoveryStrategy::Fixpoint => { tracing::debug!( "hit cycle at {database_key_index:#?}, \ inserting and returning fixpoint initial value" ); let revisions = QueryRevisions::fixpoint_initial(database_key_index); let initial_value = C::cycle_initial(db, C::id_to_input(db, id)); Some(self.insert_memo( zalsa, id, Memo::new(Some(initial_value), zalsa.current_revision(), revisions), memo_ingredient_index, )) } CycleRecoveryStrategy::FallbackImmediate => { tracing::debug!( "hit a `FallbackImmediate` cycle at {database_key_index:#?}" ); let active_query = zalsa_local.push_query(database_key_index, IterationCount::initial()); let fallback_value = C::cycle_initial(db, C::id_to_input(db, id)); let mut revisions = active_query.pop(); revisions.set_cycle_heads(CycleHeads::initial(database_key_index)); // We need this for `cycle_heads()` to work. We will unset this in the outer `execute()`. *revisions.verified_final.get_mut() = false; Some(self.insert_memo( zalsa, id, Memo::new(Some(fallback_value), zalsa.current_revision(), revisions), memo_ingredient_index, )) } }; } ClaimResult::Claimed(guard) => guard, }; // Now that we've claimed the item, check again to see if there's a "hot" value. let opt_old_memo = self.get_memo_from_table_for(zalsa, id, memo_ingredient_index); if let Some(old_memo) = opt_old_memo { if old_memo.value.is_some() { let mut cycle_heads = CycleHeads::default(); if let VerifyResult::Unchanged(_) = self.deep_verify_memo(db, zalsa, old_memo, database_key_index, &mut cycle_heads) { if cycle_heads.is_empty() { // SAFETY: memo is present in memo_map and we have verified that it is // still valid for the current revision. return unsafe { Some(self.extend_memo_lifetime(old_memo)) }; } } // If this is a provisional memo from the same revision, await all its cycle heads because // we need to ensure that only one thread is iterating on a cycle at a given time. // For example, if we have a nested cycle like so: // ``` // a -> b -> c -> b // -> a // // d -> b // ``` // thread 1 calls `a` and `a` completes the inner cycle `b -> c` but hasn't finished the outer cycle `a` yet. // thread 2 now calls `b`. We don't want that thread 2 iterates `b` while thread 1 is iterating `a` at the same time // because it can result in thread b overriding provisional memos that thread a has accessed already and still relies upon. // // By waiting, we ensure that thread 1 completes a (based on a provisional value for `b`) and `b` // becomes the new outer cycle, which thread 2 drives to completion. if old_memo.may_be_provisional() && old_memo.verified_at.load() == zalsa.current_revision() { // Try to claim all cycle heads of the provisional memo. If we can't because // some head is running on another thread, drop our claim guard to give that thread // a chance to take ownership of this query and complete it as part of its fixpoint iteration. // We will then block on the cycle head and retry once all cycle heads completed. if !old_memo.try_claim_heads(zalsa, zalsa_local) { drop(claim_guard); old_memo.block_on_heads(zalsa, zalsa_local); return None; } } } } let memo = self.execute( db, zalsa_local.push_query(database_key_index, IterationCount::initial()), opt_old_memo, ); Some(memo) } } salsa-0.23.0/src/function/inputs.rs000064400000000000000000000007571046102023000153260ustar 00000000000000use crate::function::{Configuration, IngredientImpl}; use crate::zalsa::Zalsa; use crate::zalsa_local::QueryOriginRef; use crate::Id; impl IngredientImpl where C: Configuration, { pub(super) fn origin<'db>(&self, zalsa: &'db Zalsa, key: Id) -> Option> { let memo_ingredient_index = self.memo_ingredient_index(zalsa, key); self.get_memo_from_table_for(zalsa, key, memo_ingredient_index) .map(|m| m.revisions.origin.as_ref()) } } salsa-0.23.0/src/function/lru.rs000064400000000000000000000022411046102023000145740ustar 00000000000000use std::num::NonZeroUsize; use crate::hash::FxLinkedHashSet; use crate::sync::Mutex; use crate::Id; pub(super) struct Lru { capacity: Option, set: Mutex>, } impl Lru { pub fn new(cap: usize) -> Self { Self { capacity: NonZeroUsize::new(cap), set: Mutex::default(), } } #[inline(always)] pub(super) fn record_use(&self, index: Id) { if self.capacity.is_some() { self.insert(index); } } #[inline(never)] fn insert(&self, index: Id) { let mut set = self.set.lock(); set.insert(index); } pub(super) fn set_capacity(&mut self, capacity: usize) { self.capacity = NonZeroUsize::new(capacity); if self.capacity.is_none() { self.set.get_mut().clear(); } } pub(super) fn for_each_evicted(&mut self, mut cb: impl FnMut(Id)) { let Some(cap) = self.capacity else { return; }; let set = self.set.get_mut(); while set.len() > cap.get() { if let Some(id) = set.pop_front() { cb(id); } } } } salsa-0.23.0/src/function/maybe_changed_after.rs000064400000000000000000000573541046102023000177400ustar 00000000000000use crate::accumulator::accumulated_map::InputAccumulatedValues; use crate::cycle::{CycleHeads, CycleRecoveryStrategy, IterationCount, ProvisionalStatus}; use crate::function::memo::Memo; use crate::function::sync::ClaimResult; use crate::function::{Configuration, IngredientImpl}; use crate::key::DatabaseKeyIndex; use crate::sync::atomic::Ordering; use crate::zalsa::{MemoIngredientIndex, Zalsa, ZalsaDatabase}; use crate::zalsa_local::{QueryEdgeKind, QueryOriginRef, ZalsaLocal}; use crate::{AsDynDatabase as _, Id, Revision}; /// Result of memo validation. pub enum VerifyResult { /// Memo has changed and needs to be recomputed. Changed, /// Memo remains valid. /// /// The inner value tracks whether the memo or any of its dependencies have an /// accumulated value. Unchanged(InputAccumulatedValues), } impl VerifyResult { pub(crate) fn changed_if(changed: bool) -> Self { if changed { Self::Changed } else { Self::unchanged() } } pub(crate) fn unchanged() -> Self { Self::Unchanged(InputAccumulatedValues::Empty) } } impl IngredientImpl where C: Configuration, { pub(super) fn maybe_changed_after<'db>( &'db self, db: &'db C::DbView, id: Id, revision: Revision, cycle_heads: &mut CycleHeads, ) -> VerifyResult { let (zalsa, zalsa_local) = db.zalsas(); let memo_ingredient_index = self.memo_ingredient_index(zalsa, id); zalsa.unwind_if_revision_cancelled(zalsa_local); loop { let database_key_index = self.database_key_index(id); tracing::debug!("{database_key_index:?}: maybe_changed_after(revision = {revision:?})"); // Check if we have a verified version: this is the hot path. let memo_guard = self.get_memo_from_table_for(zalsa, id, memo_ingredient_index); let Some(memo) = memo_guard else { // No memo? Assume has changed. return VerifyResult::Changed; }; let can_shallow_update = self.shallow_verify_memo(zalsa, database_key_index, memo); if can_shallow_update.yes() && !memo.may_be_provisional() { self.update_shallow(zalsa, database_key_index, memo, can_shallow_update); return if memo.revisions.changed_at > revision { VerifyResult::Changed } else { VerifyResult::Unchanged(memo.revisions.accumulated_inputs.load()) }; } if let Some(mcs) = self.maybe_changed_after_cold( zalsa, db, id, revision, memo_ingredient_index, cycle_heads, ) { return mcs; } else { // We failed to claim, have to retry. } } } #[inline(never)] fn maybe_changed_after_cold<'db>( &'db self, zalsa: &Zalsa, db: &'db C::DbView, key_index: Id, revision: Revision, memo_ingredient_index: MemoIngredientIndex, cycle_heads: &mut CycleHeads, ) -> Option { let database_key_index = self.database_key_index(key_index); let _claim_guard = match self.sync_table.try_claim(zalsa, key_index) { ClaimResult::Running(blocked_on) => { blocked_on.block_on(zalsa); return None; } ClaimResult::Cycle { .. } => match C::CYCLE_STRATEGY { CycleRecoveryStrategy::Panic => db.zalsa_local().with_query_stack(|stack| { panic!( "dependency graph cycle when validating {database_key_index:#?}, \ set cycle_fn/cycle_initial to fixpoint iterate.\n\ Query stack:\n{stack:#?}", ); }), CycleRecoveryStrategy::FallbackImmediate => { return Some(VerifyResult::unchanged()); } CycleRecoveryStrategy::Fixpoint => { tracing::debug!( "hit cycle at {database_key_index:?} in `maybe_changed_after`, returning fixpoint initial value", ); cycle_heads.push_initial(database_key_index); return Some(VerifyResult::unchanged()); } }, ClaimResult::Claimed(guard) => guard, }; // Load the current memo, if any. let Some(old_memo) = self.get_memo_from_table_for(zalsa, key_index, memo_ingredient_index) else { return Some(VerifyResult::Changed); }; tracing::debug!( "{database_key_index:?}: maybe_changed_after_cold, successful claim, \ revision = {revision:?}, old_memo = {old_memo:#?}", old_memo = old_memo.tracing_debug() ); // Check if the inputs are still valid. We can just compare `changed_at`. let deep_verify = self.deep_verify_memo(db, zalsa, old_memo, database_key_index, cycle_heads); if let VerifyResult::Unchanged(accumulated_inputs) = deep_verify { return Some(if old_memo.revisions.changed_at > revision { VerifyResult::Changed } else { VerifyResult::Unchanged(accumulated_inputs) }); } // If inputs have changed, but we have an old value, we can re-execute. // It is possible the result will be equal to the old value and hence // backdated. In that case, although we will have computed a new memo, // the value has not logically changed. // However, executing the query here is only safe if we are not in a cycle. // In a cycle, it's important that the cycle head gets executed or we // risk that some dependencies of this query haven't been verified yet because // the cycle head returned *fixpoint initial* without validating its dependencies. // `in_cycle` tracks if the enclosing query is in a cycle. `deep_verify.cycle_heads` tracks // if **this query** encountered a cycle (which means there's some provisional value somewhere floating around). if old_memo.value.is_some() && cycle_heads.is_empty() { let active_query = db .zalsa_local() .push_query(database_key_index, IterationCount::initial()); let memo = self.execute(db, active_query, Some(old_memo)); let changed_at = memo.revisions.changed_at; return Some(if changed_at > revision { VerifyResult::Changed } else { VerifyResult::Unchanged(match memo.revisions.accumulated() { Some(_) => InputAccumulatedValues::Any, None => memo.revisions.accumulated_inputs.load(), }) }); } // Otherwise, nothing for it: have to consider the value to have changed. Some(VerifyResult::Changed) } /// `Some` if the memo's value and `changed_at` time is still valid in this revision. /// Does only a shallow O(1) check, doesn't walk the dependencies. /// /// In general, a provisional memo (from cycle iteration) does not verify. Since we don't /// eagerly finalize all provisional memos in cycle iteration, we have to lazily check here /// (via `validate_provisional`) whether a may-be-provisional memo should actually be verified /// final, because its cycle heads are all now final. #[inline] pub(super) fn shallow_verify_memo( &self, zalsa: &Zalsa, database_key_index: DatabaseKeyIndex, memo: &Memo<'_, C>, ) -> ShallowUpdate { tracing::debug!( "{database_key_index:?}: shallow_verify_memo(memo = {memo:#?})", memo = memo.tracing_debug() ); let verified_at = memo.verified_at.load(); let revision_now = zalsa.current_revision(); if verified_at == revision_now { // Already verified. return ShallowUpdate::Verified; } let last_changed = zalsa.last_changed_revision(memo.revisions.durability); tracing::debug!( "{database_key_index:?}: check_durability(memo = {memo:#?}, last_changed={:?} <= verified_at={:?}) = {:?}", last_changed, verified_at, last_changed <= verified_at, memo = memo.tracing_debug() ); if last_changed <= verified_at { // No input of the suitable durability has changed since last verified. ShallowUpdate::HigherDurability } else { ShallowUpdate::No } } #[inline] pub(super) fn update_shallow( &self, zalsa: &Zalsa, database_key_index: DatabaseKeyIndex, memo: &Memo<'_, C>, update: ShallowUpdate, ) { if let ShallowUpdate::HigherDurability = update { memo.mark_as_verified(zalsa, database_key_index); memo.mark_outputs_as_verified(zalsa, database_key_index); } } /// Validates this memo if it is a provisional memo. Returns true for: /// * non provisional memos /// * provisional memos that have been successfully marked as verified final, that is, its /// cycle heads have all been finalized. /// * provisional memos that have been created in the same revision and iteration and are part of the same cycle. #[inline] pub(super) fn validate_may_be_provisional( &self, zalsa: &Zalsa, zalsa_local: &ZalsaLocal, database_key_index: DatabaseKeyIndex, memo: &Memo<'_, C>, ) -> bool { !memo.may_be_provisional() || self.validate_provisional(zalsa, database_key_index, memo) || self.validate_same_iteration(zalsa, zalsa_local, database_key_index, memo) } /// Check if this memo's cycle heads have all been finalized. If so, mark it verified final and /// return true, if not return false. #[inline] fn validate_provisional( &self, zalsa: &Zalsa, database_key_index: DatabaseKeyIndex, memo: &Memo<'_, C>, ) -> bool { tracing::trace!( "{database_key_index:?}: validate_provisional(memo = {memo:#?})", memo = memo.tracing_debug() ); for cycle_head in memo.revisions.cycle_heads() { // Test if our cycle heads (with the same revision) are now finalized. let Some(kind) = zalsa .lookup_ingredient(cycle_head.database_key_index.ingredient_index()) .provisional_status(zalsa, cycle_head.database_key_index.key_index()) else { return false; }; match kind { ProvisionalStatus::Provisional { .. } => return false, ProvisionalStatus::Final { iteration } => { // It's important to also account for the revision for the case where: // thread 1: `b` -> `a` (but only in the first iteration) // -> `c` -> `b` // thread 2: `a` -> `b` // // If we don't account for the revision, then `a` (from iteration 0) will be finalized // because its cycle head `b` is now finalized, but `b` never pulled `a` in the last iteration. if iteration != cycle_head.iteration_count { return false; } // FIXME: We can ignore this, I just don't have a use-case for this. if C::CYCLE_STRATEGY == CycleRecoveryStrategy::FallbackImmediate { panic!("cannot mix `cycle_fn` and `cycle_result` in cycles") } } ProvisionalStatus::FallbackImmediate => match C::CYCLE_STRATEGY { CycleRecoveryStrategy::Panic => { // Queries without fallback are not considered when inside a cycle. return false; } // FIXME: We can do the same as with `CycleRecoveryStrategy::Panic` here, I just don't have // a use-case for this. CycleRecoveryStrategy::Fixpoint => { panic!("cannot mix `cycle_fn` and `cycle_result` in cycles") } CycleRecoveryStrategy::FallbackImmediate => {} }, } } // Relaxed is sufficient here because there are no other writes we need to ensure have // happened before marking this memo as verified-final. memo.revisions.verified_final.store(true, Ordering::Relaxed); true } /// If this is a provisional memo, validate that it was cached in the same iteration of the /// same cycle(s) that we are still executing. If so, it is valid for reuse. This avoids /// runaway re-execution of the same queries within a fixpoint iteration. pub(super) fn validate_same_iteration( &self, zalsa: &Zalsa, zalsa_local: &ZalsaLocal, database_key_index: DatabaseKeyIndex, memo: &Memo<'_, C>, ) -> bool { tracing::trace!( "{database_key_index:?}: validate_same_iteration(memo = {memo:#?})", memo = memo.tracing_debug() ); let cycle_heads = memo.revisions.cycle_heads(); if cycle_heads.is_empty() { return true; } zalsa_local.with_query_stack(|stack| { cycle_heads.iter().all(|cycle_head| { stack .iter() .rev() .find(|query| query.database_key_index == cycle_head.database_key_index) .map(|query| query.iteration_count()) .or_else(|| { // If this is a cycle head is owned by another thread that is blocked by this ingredient, // check if it has the same iteration count. let ingredient = zalsa .lookup_ingredient(cycle_head.database_key_index.ingredient_index()); let wait_result = ingredient.wait_for(zalsa, cycle_head.database_key_index.key_index()); if !wait_result.is_cycle_with_other_thread() { return None; } let provisional_status = ingredient .provisional_status(zalsa, cycle_head.database_key_index.key_index())?; provisional_status.iteration() }) == Some(cycle_head.iteration_count) }) }) } /// VerifyResult::Unchanged if the memo's value and `changed_at` time is up-to-date in the /// current revision. When this returns Unchanged with no cycle heads, it also updates the /// memo's `verified_at` field if needed to make future calls cheaper. /// /// Takes an [`ActiveQueryGuard`] argument because this function recursively /// walks dependencies of `old_memo` and may even execute them to see if their /// outputs have changed. pub(super) fn deep_verify_memo( &self, db: &C::DbView, zalsa: &Zalsa, old_memo: &Memo<'_, C>, database_key_index: DatabaseKeyIndex, cycle_heads: &mut CycleHeads, ) -> VerifyResult { tracing::debug!( "{database_key_index:?}: deep_verify_memo(old_memo = {old_memo:#?})", old_memo = old_memo.tracing_debug() ); let can_shallow_update = self.shallow_verify_memo(zalsa, database_key_index, old_memo); if can_shallow_update.yes() && self.validate_may_be_provisional( zalsa, db.zalsa_local(), database_key_index, old_memo, ) { self.update_shallow(zalsa, database_key_index, old_memo, can_shallow_update); return VerifyResult::unchanged(); } match old_memo.revisions.origin.as_ref() { QueryOriginRef::Assigned(_) => { // If the value was assigned by another query, // and that query were up-to-date, // then we would have updated the `verified_at` field already. // So the fact that we are here means that it was not specified // during this revision or is otherwise stale. // // Example of how this can happen: // // Conditionally specified queries // where the value is specified // in rev 1 but not in rev 2. VerifyResult::Changed } // Return `Unchanged` similar to the initial value that we insert // when we hit the cycle. Any dependencies accessed when creating the fixpoint initial // are tracked by the outer query. Nothing should have changed assuming that the // fixpoint initial function is deterministic. QueryOriginRef::FixpointInitial => { cycle_heads.push_initial(database_key_index); VerifyResult::unchanged() } QueryOriginRef::DerivedUntracked(_) => { // Untracked inputs? Have to assume that it changed. VerifyResult::Changed } QueryOriginRef::Derived(edges) => { let is_provisional = old_memo.may_be_provisional(); // If the value is from the same revision but is still provisional, consider it changed // because we're now in a new iteration. if can_shallow_update == ShallowUpdate::Verified && is_provisional { return VerifyResult::Changed; } let dyn_db = db.as_dyn_database(); let mut inputs = InputAccumulatedValues::Empty; // Fully tracked inputs? Iterate over the inputs and check them, one by one. // // NB: It's important here that we are iterating the inputs in the order that // they executed. It's possible that if the value of some input I0 is no longer // valid, then some later input I1 might never have executed at all, so verifying // it is still up to date is meaningless. for &edge in edges { match edge.kind() { QueryEdgeKind::Input(dependency_index) => { match dependency_index.maybe_changed_after( dyn_db, zalsa, old_memo.verified_at.load(), cycle_heads, ) { VerifyResult::Changed => return VerifyResult::Changed, VerifyResult::Unchanged(input_accumulated) => { inputs |= input_accumulated; } } } QueryEdgeKind::Output(dependency_index) => { // Subtle: Mark outputs as validated now, even though we may // later find an input that requires us to re-execute the function. // Even if it re-execute, the function will wind up writing the same value, // since all prior inputs were green. It's important to do this during // this loop, because it's possible that one of our input queries will // re-execute and may read one of our earlier outputs // (e.g., in a scenario where we do something like // `e = Entity::new(..); query(e);` and `query` reads a field of `e`). // // NB. Accumulators are also outputs, but the above logic doesn't // quite apply to them. Since multiple values are pushed, the first value // may be unchanged, but later values could be different. // In that case, however, the data accumulated // by this function cannot be read until this function is marked green, // so even if we mark them as valid here, the function will re-execute // and overwrite the contents. dependency_index.mark_validated_output(zalsa, database_key_index); } } } // Possible scenarios here: // // 1. Cycle heads is empty. We traversed our full dependency graph and neither hit any // cycles, nor found any changed dependencies. We can mark our memo verified and // return Unchanged with empty cycle heads. // // 2. Cycle heads is non-empty, and does not contain our own key index. We are part of // a cycle, and since we don't know if some other cycle participant that hasn't been // traversed yet (that is, some other dependency of the cycle head, which is only a // dependency of ours via the cycle) might still have changed, we can't yet mark our // memo verified. We can return a provisional Unchanged, with cycle heads. // // 3. Cycle heads is non-empty, and contains only our own key index. We are the head of // a cycle, and we've now traversed the entire cycle and found no changes, but no // other cycle participants were verified (they would have all hit case 2 above). // Similar to `execute`, return unchanged and lazily verify the other cycle-participants // when they're used next. // // 4. Cycle heads is non-empty, and contains our own key index as well as other key // indices. We are the head of a cycle nested within another cycle. We can't mark // our own memo verified (for the same reason as in case 2: the full outer cycle // hasn't been validated unchanged yet). We return Unchanged, with ourself removed // from cycle heads. We will handle our own memo (and the rest of our cycle) on a // future iteration; first the outer cycle head needs to verify itself. cycle_heads.remove(&database_key_index); // 1 and 3 if cycle_heads.is_empty() { old_memo.mark_as_verified(zalsa, database_key_index); old_memo.revisions.accumulated_inputs.store(inputs); if is_provisional { old_memo .revisions .verified_final .store(true, Ordering::Relaxed); } } VerifyResult::Unchanged(inputs) } } } } #[derive(Copy, Clone, Eq, PartialEq)] pub(super) enum ShallowUpdate { /// The memo is from this revision and has already been verified Verified, /// The revision for the memo's durability hasn't changed. It can be marked as verified /// in this revision. HigherDurability, /// The memo requires a deep verification. No, } impl ShallowUpdate { pub(super) fn yes(&self) -> bool { matches!( self, ShallowUpdate::Verified | ShallowUpdate::HigherDurability ) } } salsa-0.23.0/src/function/memo.rs000064400000000000000000000457251046102023000147450ustar 00000000000000use std::any::Any; use std::fmt::{Debug, Formatter}; use std::mem::transmute; use std::ptr::NonNull; use crate::cycle::{empty_cycle_heads, CycleHead, CycleHeads, IterationCount, ProvisionalStatus}; use crate::function::{Configuration, IngredientImpl}; use crate::hash::FxHashSet; use crate::ingredient::{Ingredient, WaitForResult}; use crate::key::DatabaseKeyIndex; use crate::revision::AtomicRevision; use crate::runtime::Running; use crate::sync::atomic::Ordering; use crate::table::memo::MemoTableWithTypesMut; use crate::zalsa::{MemoIngredientIndex, Zalsa}; use crate::zalsa_local::{QueryOriginRef, QueryRevisions, ZalsaLocal}; use crate::{Event, EventKind, Id, Revision}; impl IngredientImpl { /// Inserts the memo for the given key; (atomically) overwrites and returns any previously existing memo pub(super) fn insert_memo_into_table_for<'db>( &self, zalsa: &'db Zalsa, id: Id, memo: NonNull>, memo_ingredient_index: MemoIngredientIndex, ) -> Option>> { // SAFETY: The table stores 'static memos (to support `Any`), the memos are in fact valid // for `'db` though as we delay their dropping to the end of a revision. let static_memo = unsafe { transmute::>, NonNull>>(memo) }; let old_static_memo = zalsa .memo_table_for(id) .insert(memo_ingredient_index, static_memo)?; // SAFETY: The table stores 'static memos (to support `Any`), the memos are in fact valid // for `'db` though as we delay their dropping to the end of a revision. Some(unsafe { transmute::>, NonNull>>(old_static_memo) }) } /// Loads the current memo for `key_index`. This does not hold any sort of /// lock on the `memo_map` once it returns, so this memo could immediately /// become outdated if other threads store into the `memo_map`. pub(super) fn get_memo_from_table_for<'db>( &self, zalsa: &'db Zalsa, id: Id, memo_ingredient_index: MemoIngredientIndex, ) -> Option<&'db Memo<'db, C>> { let static_memo = zalsa.memo_table_for(id).get(memo_ingredient_index)?; // SAFETY: The table stores 'static memos (to support `Any`), the memos are in fact valid // for `'db` though as we delay their dropping to the end of a revision. Some(unsafe { transmute::<&Memo<'static, C>, &'db Memo<'db, C>>(static_memo.as_ref()) }) } /// Evicts the existing memo for the given key, replacing it /// with an equivalent memo that has no value. If the memo is untracked, FixpointInitial, /// or has values assigned as output of another query, this has no effect. pub(super) fn evict_value_from_memo_for( table: MemoTableWithTypesMut<'_>, memo_ingredient_index: MemoIngredientIndex, ) { let map = |memo: &mut Memo<'static, C>| { match memo.revisions.origin.as_ref() { QueryOriginRef::Assigned(_) | QueryOriginRef::DerivedUntracked(_) | QueryOriginRef::FixpointInitial => { // Careful: Cannot evict memos whose values were // assigned as output of another query // or those with untracked inputs // as their values cannot be reconstructed. } QueryOriginRef::Derived(_) => { // Set the memo value to `None`. memo.value = None; } } }; table.map_memo(memo_ingredient_index, map) } } #[derive(Debug)] pub struct Memo<'db, C: Configuration> { /// The result of the query, if we decide to memoize it. pub(super) value: Option>, /// Last revision when this memo was verified; this begins /// as the current revision. pub(super) verified_at: AtomicRevision, /// Revision information pub(super) revisions: QueryRevisions, } impl<'db, C: Configuration> Memo<'db, C> { pub(super) fn new( value: Option>, revision_now: Revision, revisions: QueryRevisions, ) -> Self { debug_assert!( !revisions.verified_final.load(Ordering::Relaxed) || revisions.cycle_heads().is_empty(), "Memo must be finalized if it has no cycle heads" ); Memo { value, verified_at: AtomicRevision::from(revision_now), revisions, } } /// True if this may be a provisional cycle-iteration result. #[inline] pub(super) fn may_be_provisional(&self) -> bool { // Relaxed is OK here, because `verified_final` is only ever mutated in one direction (from // `false` to `true`), and changing it to `true` on memos with cycle heads where it was // ever `false` is purely an optimization; if we read an out-of-date `false`, it just means // we might go validate it again unnecessarily. !self.revisions.verified_final.load(Ordering::Relaxed) } /// Invoked when `refresh_memo` is about to return a memo to the caller; if that memo is /// provisional, and its cycle head is claimed by another thread, we need to wait for that /// other thread to complete the fixpoint iteration, and then retry fetching our own memo. /// /// Return `true` if the caller should retry, `false` if the caller should go ahead and return /// this memo to the caller. #[inline(always)] pub(super) fn provisional_retry( &self, zalsa: &Zalsa, zalsa_local: &ZalsaLocal, database_key_index: DatabaseKeyIndex, ) -> bool { if self.revisions.cycle_heads().is_empty() { return false; } if !self.may_be_provisional() { return false; }; if self.block_on_heads(zalsa, zalsa_local) { // If we get here, we are a provisional value of // the cycle head (either initial value, or from a later iteration) and should be // returned to caller to allow fixpoint iteration to proceed. false } else { // all our cycle heads are complete; re-fetch // and we should get a non-provisional memo. tracing::debug!( "Retrying provisional memo {database_key_index:?} after awaiting cycle heads." ); true } } /// Blocks on all cycle heads (recursively) that this memo depends on. /// /// Returns `true` if awaiting all cycle heads results in a cycle. This means, they're all waiting /// for us to make progress. #[inline(always)] pub(super) fn block_on_heads(&self, zalsa: &Zalsa, zalsa_local: &ZalsaLocal) -> bool { // IMPORTANT: If you make changes to this function, make sure to run `cycle_nested_deep` with // shuttle with at least 10k iterations. // The most common case is that the entire cycle is running in the same thread. // If that's the case, short circuit and return `true` immediately. if self.all_cycles_on_stack(zalsa_local) { return true; } // Otherwise, await all cycle heads, recursively. return block_on_heads_cold(zalsa, self.cycle_heads()); #[inline(never)] fn block_on_heads_cold(zalsa: &Zalsa, heads: &CycleHeads) -> bool { let _entered = tracing::debug_span!("block_on_heads").entered(); let mut cycle_heads = TryClaimCycleHeadsIter::new(zalsa, heads); let mut all_cycles = true; while let Some(claim_result) = cycle_heads.next() { match claim_result { TryClaimHeadsResult::Cycle => {} TryClaimHeadsResult::Finalized => { all_cycles = false; } TryClaimHeadsResult::Available => { all_cycles = false; } TryClaimHeadsResult::Running(running) => { all_cycles = false; running.block_on(&mut cycle_heads); } } } all_cycles } } /// Tries to claim all cycle heads to see if they're finalized or available. /// /// Unlike `block_on_heads`, this code does not block on any cycle head. Instead it returns `false` if /// claiming all cycle heads failed because one of them is running on another thread. pub(super) fn try_claim_heads(&self, zalsa: &Zalsa, zalsa_local: &ZalsaLocal) -> bool { let _entered = tracing::debug_span!("try_claim_heads").entered(); if self.all_cycles_on_stack(zalsa_local) { return true; } let cycle_heads = TryClaimCycleHeadsIter::new(zalsa, self.revisions.cycle_heads()); for claim_result in cycle_heads { match claim_result { TryClaimHeadsResult::Cycle | TryClaimHeadsResult::Finalized | TryClaimHeadsResult::Available => {} TryClaimHeadsResult::Running(_) => { return false; } } } true } fn all_cycles_on_stack(&self, zalsa_local: &ZalsaLocal) -> bool { let cycle_heads = self.revisions.cycle_heads(); if cycle_heads.is_empty() { return true; } zalsa_local.with_query_stack(|stack| { cycle_heads.iter().all(|cycle_head| { stack .iter() .rev() .any(|query| query.database_key_index == cycle_head.database_key_index) }) }) } /// Cycle heads that should be propagated to dependent queries. #[inline(always)] pub(super) fn cycle_heads(&self) -> &CycleHeads { if self.may_be_provisional() { self.revisions.cycle_heads() } else { empty_cycle_heads() } } /// Mark memo as having been verified in the `revision_now`, which should /// be the current revision. /// The caller is responsible to update the memo's `accumulated` state if their accumulated /// values have changed since. #[inline] pub(super) fn mark_as_verified(&self, zalsa: &Zalsa, database_key_index: DatabaseKeyIndex) { zalsa.event(&|| { Event::new(EventKind::DidValidateMemoizedValue { database_key: database_key_index, }) }); self.verified_at.store(zalsa.current_revision()); } pub(super) fn mark_outputs_as_verified( &self, zalsa: &Zalsa, database_key_index: DatabaseKeyIndex, ) { for output in self.revisions.origin.as_ref().outputs() { output.mark_validated_output(zalsa, database_key_index); } } pub(super) fn tracing_debug(&self) -> impl std::fmt::Debug + use<'_, 'db, C> { struct TracingDebug<'memo, 'db, C: Configuration> { memo: &'memo Memo<'db, C>, } impl std::fmt::Debug for TracingDebug<'_, '_, C> { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { f.debug_struct("Memo") .field( "value", if self.memo.value.is_some() { &"Some()" } else { &"None" }, ) .field("verified_at", &self.memo.verified_at) .field("revisions", &self.memo.revisions) .finish() } } TracingDebug { memo: self } } } impl crate::table::memo::Memo for Memo<'static, C> where C::Output<'static>: Send + Sync + Any, { fn origin(&self) -> QueryOriginRef<'_> { self.revisions.origin.as_ref() } #[cfg(feature = "salsa_unstable")] fn memory_usage(&self) -> crate::database::MemoInfo { let size_of = std::mem::size_of::>() + self.revisions.allocation_size(); let heap_size = self.value.as_ref().map(C::heap_size).unwrap_or(0); crate::database::MemoInfo { debug_name: C::DEBUG_NAME, output: crate::database::SlotInfo { size_of_metadata: size_of - std::mem::size_of::>(), debug_name: std::any::type_name::>(), size_of_fields: std::mem::size_of::>() + heap_size, memos: Vec::new(), }, } } } pub(super) enum TryClaimHeadsResult<'me> { /// Claiming every cycle head results in a cycle head. Cycle, /// The cycle head has been finalized. Finalized, /// The cycle head is not finalized, but it can be claimed. Available, /// The cycle head is currently executed on another thread. Running(RunningCycleHead<'me>), } pub(super) struct RunningCycleHead<'me> { inner: Running<'me>, ingredient: &'me dyn Ingredient, } impl<'a> RunningCycleHead<'a> { fn block_on(self, cycle_heads: &mut TryClaimCycleHeadsIter<'a>) { let key_index = self.inner.database_key().key_index(); self.inner.block_on(cycle_heads.zalsa); cycle_heads.queue_ingredient_heads(self.ingredient, key_index); } } /// Iterator to try claiming the transitive cycle heads of a memo. struct TryClaimCycleHeadsIter<'a> { zalsa: &'a Zalsa, queue: Vec, queued: FxHashSet, } impl<'a> TryClaimCycleHeadsIter<'a> { fn new(zalsa: &'a Zalsa, heads: &CycleHeads) -> Self { let queue: Vec<_> = heads.iter().copied().collect(); let queued: FxHashSet<_> = queue.iter().copied().collect(); Self { zalsa, queue, queued, } } fn queue_ingredient_heads(&mut self, ingredient: &dyn Ingredient, key: Id) { // Recursively wait for all cycle heads that this head depends on. It's important // that we fetch those from the updated memo because the cycle heads can change // between iterations and new cycle heads can be added if a query depeonds on // some cycle heads depending on a specific condition being met // (`a` calls `b` and `c` in iteration 0 but `c` and `d` in iteration 1 or later). // IMPORTANT: It's critical that we get the cycle head from the latest memo // here, in case the memo has become part of another cycle (we need to block on that too!). self.queue.extend( ingredient .cycle_heads(self.zalsa, key) .iter() .copied() .filter(|head| self.queued.insert(*head)), ) } } impl<'me> Iterator for TryClaimCycleHeadsIter<'me> { type Item = TryClaimHeadsResult<'me>; fn next(&mut self) -> Option { let head = self.queue.pop()?; let head_database_key = head.database_key_index; let head_key_index = head_database_key.key_index(); let ingredient = self .zalsa .lookup_ingredient(head_database_key.ingredient_index()); let cycle_head_kind = ingredient .provisional_status(self.zalsa, head_key_index) .unwrap_or(ProvisionalStatus::Provisional { iteration: IterationCount::initial(), }); match cycle_head_kind { ProvisionalStatus::Final { .. } | ProvisionalStatus::FallbackImmediate => { // This cycle is already finalized, so we don't need to wait on it; // keep looping through cycle heads. tracing::trace!("Dependent cycle head {head:?} has been finalized."); Some(TryClaimHeadsResult::Finalized) } ProvisionalStatus::Provisional { .. } => { match ingredient.wait_for(self.zalsa, head_key_index) { WaitForResult::Cycle { .. } => { // We hit a cycle blocking on the cycle head; this means this query actively // participates in the cycle and some other query is blocked on this thread. tracing::debug!("Waiting for {head:?} results in a cycle"); Some(TryClaimHeadsResult::Cycle) } WaitForResult::Running(running) => { tracing::debug!("Ingredient {head:?} is running: {running:?}"); Some(TryClaimHeadsResult::Running(RunningCycleHead { inner: running, ingredient, })) } WaitForResult::Available => { self.queue_ingredient_heads(ingredient, head_key_index); Some(TryClaimHeadsResult::Available) } } } } } } #[cfg(all(not(feature = "shuttle"), target_pointer_width = "64"))] mod _memory_usage { use crate::cycle::CycleRecoveryStrategy; use crate::ingredient::Location; use crate::plumbing::{IngredientIndices, MemoIngredientSingletonIndex, SalsaStructInDb}; use crate::zalsa::Zalsa; use crate::{CycleRecoveryAction, Database, Id}; use std::any::TypeId; use std::num::NonZeroUsize; // Memo's are stored a lot, make sure their size is doesn't randomly increase. const _: [(); std::mem::size_of::>()] = [(); std::mem::size_of::<[usize; 6]>()]; struct DummyStruct; impl SalsaStructInDb for DummyStruct { type MemoIngredientMap = MemoIngredientSingletonIndex; fn lookup_or_create_ingredient_index(_: &Zalsa) -> IngredientIndices { unimplemented!() } fn cast(_: Id, _: TypeId) -> Option { unimplemented!() } } struct DummyConfiguration; impl super::Configuration for DummyConfiguration { const DEBUG_NAME: &'static str = ""; const LOCATION: Location = Location { file: "", line: 0 }; type DbView = dyn Database; type SalsaStruct<'db> = DummyStruct; type Input<'db> = (); type Output<'db> = NonZeroUsize; const CYCLE_STRATEGY: CycleRecoveryStrategy = CycleRecoveryStrategy::Panic; fn values_equal<'db>(_: &Self::Output<'db>, _: &Self::Output<'db>) -> bool { unimplemented!() } fn id_to_input(_: &Self::DbView, _: Id) -> Self::Input<'_> { unimplemented!() } fn execute<'db>(_: &'db Self::DbView, _: Self::Input<'db>) -> Self::Output<'db> { unimplemented!() } fn cycle_initial<'db>(_: &'db Self::DbView, _: Self::Input<'db>) -> Self::Output<'db> { unimplemented!() } fn recover_from_cycle<'db>( _: &'db Self::DbView, _: &Self::Output<'db>, _: u32, _: Self::Input<'db>, ) -> CycleRecoveryAction> { unimplemented!() } } } salsa-0.23.0/src/function/specify.rs000064400000000000000000000133671046102023000154470ustar 00000000000000use crate::accumulator::accumulated_map::InputAccumulatedValues; use crate::function::memo::Memo; use crate::function::{Configuration, IngredientImpl}; use crate::revision::AtomicRevision; use crate::sync::atomic::AtomicBool; use crate::tracked_struct::TrackedStructInDb; use crate::zalsa::{Zalsa, ZalsaDatabase}; use crate::zalsa_local::{QueryOrigin, QueryOriginRef, QueryRevisions, QueryRevisionsExtra}; use crate::{DatabaseKeyIndex, Id}; impl IngredientImpl where C: Configuration, { /// Specify the value for `key` *and* record that we did so. /// Used for explicit calls to `specify`, but not needed for pre-declared tracked struct fields. pub fn specify_and_record<'db>(&'db self, db: &'db C::DbView, key: Id, value: C::Output<'db>) where C::Input<'db>: TrackedStructInDb, { let (zalsa, zalsa_local) = db.zalsas(); let (active_query_key, current_deps) = match zalsa_local.active_query() { Some(v) => v, None => panic!("can only use `specify` inside a tracked function"), }; // `specify` only works if the key is a tracked struct created in the current query. // // The reason is this. We want to ensure that the same result is reached regardless of // the "path" that the user takes through the execution graph. // If you permit values to be specified from other queries, you can have a situation like this: // * Q0 creates the tracked struct T0 // * Q1 specifies the value for F(T0) // * Q2 invokes F(T0) // * Q3 invokes Q1 and then Q2 // * Q4 invokes Q2 and then Q1 // // Now, if We invoke Q3 first, We get one result for Q2, but if We invoke Q4 first, We get a different value. That's no good. let database_key_index = >::database_key_index(zalsa, key); if !zalsa_local.is_output_of_active_query(database_key_index) { panic!("can only use `specify` on salsa structs created during the current tracked fn"); } // Subtle: we treat the "input" to a set query as if it were // volatile. // // The idea is this. You have the current query C that // created the entity E, and it is setting the value F(E) of the function F. // When some other query R reads the field F(E), in order to have obtained // the entity E, it has to have executed the query C. // // This will have forced C to either: // // - not create E this time, in which case R shouldn't have it (some kind of leak has occurred) // - assign a value to F(E), in which case `verified_at` will be the current revision and `changed_at` will be updated appropriately // - NOT assign a value to F(E), in which case we need to re-execute the function (which typically panics). // // So, ruling out the case of a leak having occurred, that means that the reader R will either see: // // - a result that is verified in the current revision, because it was set, which will use the set value // - a result that is NOT verified and has untracked inputs, which will re-execute (and likely panic) let revision = zalsa.current_revision(); let mut revisions = QueryRevisions { changed_at: current_deps.changed_at, durability: current_deps.durability, origin: QueryOrigin::assigned(active_query_key), accumulated_inputs: Default::default(), verified_final: AtomicBool::new(true), extra: QueryRevisionsExtra::default(), }; let memo_ingredient_index = self.memo_ingredient_index(zalsa, key); if let Some(old_memo) = self.get_memo_from_table_for(zalsa, key, memo_ingredient_index) { self.backdate_if_appropriate(old_memo, database_key_index, &mut revisions, &value); self.diff_outputs(zalsa, database_key_index, old_memo, &mut revisions); } let memo = Memo { value: Some(value), verified_at: AtomicRevision::from(revision), revisions, }; tracing::debug!( "specify: about to add memo {:#?} for key {:?}", memo.tracing_debug(), key ); self.insert_memo(zalsa, key, memo, memo_ingredient_index); // Record that the current query *specified* a value for this cell. let database_key_index = self.database_key_index(key); zalsa_local.add_output(database_key_index); } /// Invoked when the query `executor` has been validated as having green inputs /// and `key` is a value that was specified by `executor`. /// Marks `key` as valid in the current revision since if `executor` had re-executed, /// it would have specified `key` again. pub(super) fn validate_specified_value( &self, zalsa: &Zalsa, executor: DatabaseKeyIndex, key: Id, ) { let memo_ingredient_index = self.memo_ingredient_index(zalsa, key); let memo = match self.get_memo_from_table_for(zalsa, key, memo_ingredient_index) { Some(m) => m, None => return, }; // If we are marking this as validated, it must be a value that was // assigned by `executor`. match memo.revisions.origin.as_ref() { QueryOriginRef::Assigned(by_query) => assert_eq!(by_query, executor), _ => panic!( "expected a query assigned by `{:?}`, not `{:?}`", executor, memo.revisions.origin, ), } let database_key_index = self.database_key_index(key); memo.mark_as_verified(zalsa, database_key_index); memo.revisions .accumulated_inputs .store(InputAccumulatedValues::Empty); } } salsa-0.23.0/src/function/sync.rs000064400000000000000000000076311046102023000147560ustar 00000000000000use rustc_hash::FxHashMap; use crate::key::DatabaseKeyIndex; use crate::runtime::{BlockResult, Running, WaitResult}; use crate::sync::thread::{self, ThreadId}; use crate::sync::Mutex; use crate::zalsa::Zalsa; use crate::{Id, IngredientIndex}; pub(crate) type SyncGuard<'me> = crate::sync::MutexGuard<'me, FxHashMap>; /// Tracks the keys that are currently being processed; used to coordinate between /// worker threads. pub(crate) struct SyncTable { syncs: Mutex>, ingredient: IngredientIndex, } pub(crate) enum ClaimResult<'a> { /// Can't claim the query because it is running on an other thread. Running(Running<'a>), /// Claiming the query results in a cycle. Cycle { same_thread: bool }, /// Successfully claimed the query. Claimed(ClaimGuard<'a>), } pub(crate) struct SyncState { id: ThreadId, /// Set to true if any other queries are blocked, /// waiting for this query to complete. anyone_waiting: bool, } impl SyncTable { pub(crate) fn new(ingredient: IngredientIndex) -> Self { Self { syncs: Default::default(), ingredient, } } pub(crate) fn try_claim<'me>(&'me self, zalsa: &'me Zalsa, key_index: Id) -> ClaimResult<'me> { let mut write = self.syncs.lock(); match write.entry(key_index) { std::collections::hash_map::Entry::Occupied(occupied_entry) => { let &mut SyncState { id, ref mut anyone_waiting, } = occupied_entry.into_mut(); // NB: `Ordering::Relaxed` is sufficient here, // as there are no loads that are "gated" on this // value. Everything that is written is also protected // by a lock that must be acquired. The role of this // boolean is to decide *whether* to acquire the lock, // not to gate future atomic reads. *anyone_waiting = true; match zalsa.runtime().block( DatabaseKeyIndex::new(self.ingredient, key_index), id, write, ) { BlockResult::Running(blocked_on) => ClaimResult::Running(blocked_on), BlockResult::Cycle { same_thread } => ClaimResult::Cycle { same_thread }, } } std::collections::hash_map::Entry::Vacant(vacant_entry) => { vacant_entry.insert(SyncState { id: thread::current().id(), anyone_waiting: false, }); ClaimResult::Claimed(ClaimGuard { key_index, zalsa, sync_table: self, }) } } } } /// Marks an active 'claim' in the synchronization map. The claim is /// released when this value is dropped. #[must_use] pub(crate) struct ClaimGuard<'me> { key_index: Id, zalsa: &'me Zalsa, sync_table: &'me SyncTable, } impl ClaimGuard<'_> { fn remove_from_map_and_unblock_queries(&self) { let mut syncs = self.sync_table.syncs.lock(); let SyncState { anyone_waiting, .. } = syncs.remove(&self.key_index).expect("key claimed twice?"); if anyone_waiting { self.zalsa.runtime().unblock_queries_blocked_on( DatabaseKeyIndex::new(self.sync_table.ingredient, self.key_index), if thread::panicking() { WaitResult::Panicked } else { WaitResult::Completed }, ) } } } impl Drop for ClaimGuard<'_> { fn drop(&mut self) { self.remove_from_map_and_unblock_queries() } } impl std::fmt::Debug for SyncTable { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("SyncTable").finish() } } salsa-0.23.0/src/function.rs000064400000000000000000000337741046102023000140110ustar 00000000000000pub(crate) use maybe_changed_after::VerifyResult; use std::any::Any; use std::fmt; use std::ptr::NonNull; use std::sync::atomic::Ordering; pub(crate) use sync::SyncGuard; use crate::accumulator::accumulated_map::{AccumulatedMap, InputAccumulatedValues}; use crate::cycle::{ empty_cycle_heads, CycleHeads, CycleRecoveryAction, CycleRecoveryStrategy, ProvisionalStatus, }; use crate::function::delete::DeletedEntries; use crate::function::sync::{ClaimResult, SyncTable}; use crate::ingredient::{Ingredient, WaitForResult}; use crate::key::DatabaseKeyIndex; use crate::plumbing::MemoIngredientMap; use crate::salsa_struct::SalsaStructInDb; use crate::sync::Arc; use crate::table::memo::MemoTableTypes; use crate::table::Table; use crate::views::DatabaseDownCaster; use crate::zalsa::{IngredientIndex, MemoIngredientIndex, Zalsa}; use crate::zalsa_local::QueryOriginRef; use crate::{Database, Id, Revision}; mod accumulated; mod backdate; mod delete; mod diff_outputs; mod execute; mod fetch; mod inputs; mod lru; mod maybe_changed_after; mod memo; mod specify; mod sync; pub type Memo = memo::Memo<'static, C>; pub trait Configuration: Any { const DEBUG_NAME: &'static str; const LOCATION: crate::ingredient::Location; /// The database that this function is associated with. type DbView: ?Sized + crate::Database; /// The "salsa struct type" that this function is associated with. /// This can be just `salsa::Id` for functions that intern their arguments /// and are not clearly associated with any one salsa struct. type SalsaStruct<'db>: SalsaStructInDb; /// The input to the function type Input<'db>: Send + Sync; /// The value computed by the function. type Output<'db>: Send + Sync; /// Determines whether this function can recover from being a participant in a cycle /// (and, if so, how). const CYCLE_STRATEGY: CycleRecoveryStrategy; /// Invokes after a new result `new_value` has been computed for which an older memoized value /// existed `old_value`, or in fixpoint iteration. Returns true if the new value is equal to /// the older one. /// /// This invokes user code in form of the `Eq` impl. fn values_equal<'db>(old_value: &Self::Output<'db>, new_value: &Self::Output<'db>) -> bool; // FIXME: This should take a `&Zalsa` /// Convert from the id used internally to the value that execute is expecting. /// This is a no-op if the input to the function is a salsa struct. fn id_to_input(db: &Self::DbView, key: Id) -> Self::Input<'_>; /// Returns the size of any heap allocations in the output value, in bytes. fn heap_size(_value: &Self::Output<'_>) -> usize { 0 } /// Invoked when we need to compute the value for the given key, either because we've never /// computed it before or because the old one relied on inputs that have changed. /// /// This invokes the function the user wrote. fn execute<'db>(db: &'db Self::DbView, input: Self::Input<'db>) -> Self::Output<'db>; /// Get the cycle recovery initial value. fn cycle_initial<'db>(db: &'db Self::DbView, input: Self::Input<'db>) -> Self::Output<'db>; /// Decide whether to iterate a cycle again or fallback. `value` is the provisional return /// value from the latest iteration of this cycle. `count` is the number of cycle iterations /// we've already completed. fn recover_from_cycle<'db>( db: &'db Self::DbView, value: &Self::Output<'db>, count: u32, input: Self::Input<'db>, ) -> CycleRecoveryAction>; } /// Function ingredients are the "workhorse" of salsa. /// /// They are used for tracked functions, for the "value" fields of tracked structs, and for the fields of input structs. /// The function ingredient is fairly complex and so its code is spread across multiple modules, typically one per method. /// The main entry points are: /// /// * the `fetch` method, which is invoked when the function is called by the user's code; /// it will return a memoized value if one exists, or execute the function otherwise. /// * the `specify` method, which can only be used when the key is an entity created by the active query. /// It sets the value of the function imperatively, so that when later fetches occur, they'll return this value. /// * the `store` method, which can only be invoked with an `&mut` reference, and is to set input fields. pub struct IngredientImpl { /// The ingredient index we were assigned in the database. /// Used to construct `DatabaseKeyIndex` values. index: IngredientIndex, /// The index for the memo/sync tables /// /// This may be a [`crate::memo_ingredient_indices::MemoIngredientSingletonIndex`] or a /// [`crate::memo_ingredient_indices::MemoIngredientIndices`], depending on whether the /// tracked function's struct is a plain salsa struct or an enum `#[derive(Supertype)]`. memo_ingredient_indices: as SalsaStructInDb>::MemoIngredientMap, /// Used to find memos to throw out when we have too many memoized values. lru: lru::Lru, /// A downcaster from `dyn Database` to `C::DbView`. /// /// # Safety /// /// The supplied database must be be the same as the database used to construct the [`Views`] /// instances that this downcaster was derived from. view_caster: DatabaseDownCaster, sync_table: SyncTable, /// When `fetch` and friends executes, they return a reference to the /// value stored in the memo that is extended to live as long as the `&self` /// reference we start with. This means that whenever we remove something /// from `memo_map` with an `&self` reference, there *could* be references to its /// internals still in use. Therefore we push the memo into this queue and /// only *actually* free up memory when a new revision starts (which means /// we have an `&mut` reference to self). /// /// You might think that we could do this only if the memo was verified in the /// current revision: you would be right, but we are being defensive, because /// we don't know that we can trust the database to give us the same runtime /// everytime and so forth. deleted_entries: DeletedEntries, } impl IngredientImpl where C: Configuration, { pub fn new( index: IngredientIndex, memo_ingredient_indices: as SalsaStructInDb>::MemoIngredientMap, lru: usize, view_caster: DatabaseDownCaster, ) -> Self { Self { index, memo_ingredient_indices, lru: lru::Lru::new(lru), deleted_entries: Default::default(), view_caster, sync_table: SyncTable::new(index), } } #[inline] pub fn database_key_index(&self, key: Id) -> DatabaseKeyIndex { DatabaseKeyIndex::new(self.index, key) } pub fn set_capacity(&mut self, capacity: usize) { self.lru.set_capacity(capacity); } /// Returns a reference to the memo value that lives as long as self. /// This is UNSAFE: the caller is responsible for ensuring that the /// memo will not be released so long as the `&self` is valid. /// This is done by (a) ensuring the memo is present in the memo-map /// when this function is called and (b) ensuring that any entries /// removed from the memo-map are added to `deleted_entries`, which is /// only cleared with `&mut self`. unsafe fn extend_memo_lifetime<'this>( &'this self, memo: &memo::Memo<'this, C>, ) -> &'this memo::Memo<'this, C> { // SAFETY: the caller must guarantee that the memo will not be released before `&self` unsafe { std::mem::transmute(memo) } } fn insert_memo<'db>( &'db self, zalsa: &'db Zalsa, id: Id, mut memo: memo::Memo<'db, C>, memo_ingredient_index: MemoIngredientIndex, ) -> &'db memo::Memo<'db, C> { if let Some(tracked_struct_ids) = memo.revisions.tracked_struct_ids_mut() { tracked_struct_ids.shrink_to_fit(); } // We convert to a `NonNull` here as soon as possible because we are going to alias // into the `Box`, which is a `noalias` type. // FIXME: Use `Box::into_non_null` once stable let memo = NonNull::from(Box::leak(Box::new(memo))); if let Some(old_value) = self.insert_memo_into_table_for(zalsa, id, memo, memo_ingredient_index) { // In case there is a reference to the old memo out there, we have to store it // in the deleted entries. This will get cleared when a new revision starts. // // SAFETY: Once the revision starts, there will be no outstanding borrows to the // memo contents, and so it will be safe to free. unsafe { self.deleted_entries.push(old_value) }; } // SAFETY: memo has been inserted into the table unsafe { self.extend_memo_lifetime(memo.as_ref()) } } #[inline] fn memo_ingredient_index(&self, zalsa: &Zalsa, id: Id) -> MemoIngredientIndex { self.memo_ingredient_indices.get_zalsa_id(zalsa, id) } } impl Ingredient for IngredientImpl where C: Configuration, { fn location(&self) -> &'static crate::ingredient::Location { &C::LOCATION } fn ingredient_index(&self) -> IngredientIndex { self.index } unsafe fn maybe_changed_after( &self, db: &dyn Database, input: Id, revision: Revision, cycle_heads: &mut CycleHeads, ) -> VerifyResult { // SAFETY: The `db` belongs to the ingredient as per caller invariant let db = unsafe { self.view_caster.downcast_unchecked(db) }; self.maybe_changed_after(db, input, revision, cycle_heads) } /// Returns `final` only if the memo has the `verified_final` flag set and the cycle recovery strategy is not `FallbackImmediate`. /// /// Otherwise, the value is still provisional. For both final and provisional, it also /// returns the iteration in which this memo was created (always 0 except for cycle heads). fn provisional_status(&self, zalsa: &Zalsa, input: Id) -> Option { let memo = self.get_memo_from_table_for(zalsa, input, self.memo_ingredient_index(zalsa, input))?; let iteration = memo.revisions.iteration(); let verified_final = memo.revisions.verified_final.load(Ordering::Relaxed); Some(if verified_final { if C::CYCLE_STRATEGY == CycleRecoveryStrategy::FallbackImmediate { ProvisionalStatus::FallbackImmediate } else { ProvisionalStatus::Final { iteration } } } else { ProvisionalStatus::Provisional { iteration } }) } fn cycle_heads<'db>(&self, zalsa: &'db Zalsa, input: Id) -> &'db CycleHeads { self.get_memo_from_table_for(zalsa, input, self.memo_ingredient_index(zalsa, input)) .map(|memo| memo.cycle_heads()) .unwrap_or(empty_cycle_heads()) } /// Attempts to claim `key_index` without blocking. /// /// * [`WaitForResult::Running`] if the `key_index` is running on another thread. It's up to the caller to block on the other thread /// to wait until the result becomes available. /// * [`WaitForResult::Available`] It is (or at least was) possible to claim the `key_index` /// * [`WaitResult::Cycle`] Claiming the `key_index` results in a cycle because it's on the current's thread query stack or /// running on another thread that is blocked on this thread. fn wait_for<'me>(&'me self, zalsa: &'me Zalsa, key_index: Id) -> WaitForResult<'me> { match self.sync_table.try_claim(zalsa, key_index) { ClaimResult::Running(blocked_on) => WaitForResult::Running(blocked_on), ClaimResult::Cycle { same_thread } => WaitForResult::Cycle { same_thread }, ClaimResult::Claimed(_) => WaitForResult::Available, } } fn origin<'db>(&self, zalsa: &'db Zalsa, key: Id) -> Option> { self.origin(zalsa, key) } fn mark_validated_output( &self, zalsa: &Zalsa, executor: DatabaseKeyIndex, output_key: crate::Id, ) { self.validate_specified_value(zalsa, executor, output_key); } fn remove_stale_output( &self, _zalsa: &Zalsa, _executor: DatabaseKeyIndex, _stale_output_key: crate::Id, ) { // This function is invoked when a query Q specifies the value for `stale_output_key` in rev 1, // but not in rev 2. We don't do anything in this case, we just leave the (now stale) memo. // Since its `verified_at` field has not changed, it will be considered dirty if it is invoked. } fn requires_reset_for_new_revision(&self) -> bool { true } fn reset_for_new_revision(&mut self, table: &mut Table) { self.lru.for_each_evicted(|evict| { let ingredient_index = table.ingredient_index(evict); Self::evict_value_from_memo_for( table.memos_mut(evict), self.memo_ingredient_indices.get(ingredient_index), ) }); self.deleted_entries.clear(); } fn debug_name(&self) -> &'static str { C::DEBUG_NAME } fn memo_table_types(&self) -> Arc { unreachable!("function does not allocate pages") } fn cycle_recovery_strategy(&self) -> CycleRecoveryStrategy { C::CYCLE_STRATEGY } fn accumulated<'db>( &'db self, db: &'db dyn Database, key_index: Id, ) -> (Option<&'db AccumulatedMap>, InputAccumulatedValues) { let db = self.view_caster.downcast(db); self.accumulated_map(db, key_index) } } impl std::fmt::Debug for IngredientImpl where C: Configuration, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct(std::any::type_name::()) .field("index", &self.index) .finish() } } salsa-0.23.0/src/hash.rs000064400000000000000000000015741046102023000131000ustar 00000000000000use std::hash::{BuildHasher, Hash, Hasher}; pub(crate) type FxHasher = std::hash::BuildHasherDefault; pub(crate) type FxIndexSet = indexmap::IndexSet; pub(crate) type FxLinkedHashSet = hashlink::LinkedHashSet; pub(crate) type FxHashSet = std::collections::HashSet; pub(crate) fn hash(t: &T) -> u64 { FxHasher::default().hash_one(t) } // `TypeId` is a 128-bit hash internally, and it's `Hash` implementation // writes the lower 64-bits. Hashing it again would be unnecessary. #[derive(Default)] pub(crate) struct TypeIdHasher(u64); impl Hasher for TypeIdHasher { fn write(&mut self, _: &[u8]) { unreachable!("`TypeId` calls `write_u64`"); } #[inline] fn write_u64(&mut self, id: u64) { self.0 = id; } #[inline] fn finish(&self) -> u64 { self.0 } } salsa-0.23.0/src/id.rs000064400000000000000000000110351046102023000125420ustar 00000000000000use std::fmt::Debug; use std::hash::Hash; use std::num::NonZeroU32; use crate::zalsa::Zalsa; /// The `Id` of a salsa struct in the database [`Table`](`crate::table::Table`). /// /// The high-order bits of an `Id` store a 32-bit generation counter, while /// the low-order bits pack a [`PageIndex`](`crate::table::PageIndex`) and /// [`SlotIndex`](`crate::table::SlotIndex`) within the page. /// /// The low-order bits of `Id` are a `u32` ranging from `0..Id::MAX_U32`. /// The maximum range is smaller than a standard `u32` to leave /// room for niches; currently there is only one niche, so that /// `Option` is the same size as an `Id`. /// /// As an end-user of `Salsa` you will generally not use `Id` directly, /// it is wrapped in new types. #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct Id { index: NonZeroU32, generation: u32, } impl Id { pub const MAX_U32: u32 = u32::MAX - 0xFF; pub const MAX_USIZE: usize = Self::MAX_U32 as usize; /// Create a `salsa::Id` from a u32 value, without a generation. This /// value should be less than [`Self::MAX_U32`]. /// /// In general, you should not need to create salsa ids yourself, /// but it can be useful if you are using the type as a general /// purpose "identifier" internally. /// /// # Safety /// /// The supplied value must be less than [`Self::MAX_U32`]. #[doc(hidden)] #[track_caller] #[inline] pub const unsafe fn from_index(index: u32) -> Self { debug_assert!(index < Self::MAX_U32); Id { // SAFETY: Caller obligation. index: unsafe { NonZeroU32::new_unchecked(index + 1) }, generation: 0, } } /// Create a `salsa::Id` from a `u64` value. /// /// This should only be used to recreate an `Id` together with `Id::as_u64`. /// /// # Safety /// /// The data bits of the supplied value must represent a valid `Id` returned /// by `Id::as_u64`. #[doc(hidden)] #[track_caller] #[inline] pub const unsafe fn from_bits(bits: u64) -> Self { // SAFETY: Caller obligation. let index = unsafe { NonZeroU32::new_unchecked(bits as u32) }; let generation = (bits >> 32) as u32; Id { index, generation } } /// Return a `u64` representation of this `Id`. #[inline] pub fn as_bits(self) -> u64 { u64::from(self.index.get()) | (u64::from(self.generation) << 32) } /// Returns a new `Id` with same index, but the generation incremented by one. /// /// Returns `None` if the generation would overflow, i.e. the current generation /// is `u32::MAX`. #[inline] pub fn next_generation(self) -> Option { self.generation() .checked_add(1) .map(|generation| self.with_generation(generation)) } /// Mark the `Id` with a generation. /// /// This `Id` will refer to the same page and slot in the database, /// but will differ from other identifiers of the slot based on the /// provided generation. #[inline] pub const fn with_generation(self, generation: u32) -> Id { Id { index: self.index, generation, } } /// Return the index portion of this `Id`. #[inline] pub const fn index(self) -> u32 { self.index.get() - 1 } /// Return the generation of this `Id`. #[inline] pub const fn generation(self) -> u32 { self.generation } } impl Debug for Id { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { if self.generation() == 0 { write!(f, "Id({:x})", self.index()) } else { write!(f, "Id({:x}g{:x})", self.index(), self.generation()) } } } /// Internal salsa trait for types that can be represented as a salsa id. pub trait AsId: Sized { fn as_id(&self) -> Id; } /// Internal Salsa trait for types that are just a newtype'd [`Id`][]. pub trait FromId { fn from_id(id: Id) -> Self; } impl AsId for Id { #[inline] fn as_id(&self) -> Id { *self } } impl FromId for Id { #[inline] fn from_id(id: Id) -> Self { id } } /// Enums cannot use [`FromId`] because they need access to the DB to tell the `TypeId` of the variant, /// so they use this trait instead, that has a blanket implementation for `FromId`. pub trait FromIdWithDb { fn from_id(id: Id, zalsa: &Zalsa) -> Self; } impl FromIdWithDb for T { #[inline] fn from_id(id: Id, _zalsa: &Zalsa) -> Self { FromId::from_id(id) } } salsa-0.23.0/src/ingredient.rs000064400000000000000000000224701046102023000143030ustar 00000000000000use std::any::{Any, TypeId}; use std::fmt; use crate::accumulator::accumulated_map::{AccumulatedMap, InputAccumulatedValues}; use crate::cycle::{ empty_cycle_heads, CycleHeads, CycleRecoveryStrategy, IterationCount, ProvisionalStatus, }; use crate::function::VerifyResult; use crate::plumbing::IngredientIndices; use crate::runtime::Running; use crate::sync::Arc; use crate::table::memo::MemoTableTypes; use crate::table::Table; use crate::zalsa::{transmute_data_mut_ptr, transmute_data_ptr, IngredientIndex, Zalsa}; use crate::zalsa_local::QueryOriginRef; use crate::{Database, DatabaseKeyIndex, Id, Revision}; /// A "jar" is a group of ingredients that are added atomically. /// Each type implementing jar can be added to the database at most once. pub trait Jar: Any { /// This creates the ingredient dependencies of this jar. We need to split this from `create_ingredients()` /// because while `create_ingredients()` is called, a lock on the ingredient map is held (to guarantee /// atomicity), so other ingredients could not be created. /// /// Only tracked fns use this. fn create_dependencies(_zalsa: &Zalsa) -> IngredientIndices where Self: Sized, { IngredientIndices::empty() } /// Create the ingredients given the index of the first one. /// All subsequent ingredients will be assigned contiguous indices. fn create_ingredients( zalsa: &Zalsa, first_index: IngredientIndex, dependencies: IngredientIndices, ) -> Vec> where Self: Sized; /// This returns the [`TypeId`] of the ID struct, that is, the struct that wraps `salsa::Id` /// and carry the name of the jar. fn id_struct_type_id() -> TypeId where Self: Sized; } pub struct Location { pub file: &'static str, pub line: u32, } pub trait Ingredient: Any + std::fmt::Debug + Send + Sync { fn debug_name(&self) -> &'static str; fn location(&self) -> &'static Location; /// Has the value for `input` in this ingredient changed after `revision`? /// /// # Safety /// /// The passed in database needs to be the same one that the ingredient was created with. unsafe fn maybe_changed_after<'db>( &'db self, db: &'db dyn Database, input: Id, revision: Revision, cycle_heads: &mut CycleHeads, ) -> VerifyResult; /// Returns information about the current provisional status of `input`. /// /// Is it a provisional value or has it been finalized and in which iteration. /// /// Returns `None` if `input` doesn't exist. fn provisional_status(&self, zalsa: &Zalsa, input: Id) -> Option { _ = (zalsa, input); Some(ProvisionalStatus::Final { iteration: IterationCount::initial(), }) } /// Returns the cycle heads for this ingredient. fn cycle_heads<'db>(&self, zalsa: &'db Zalsa, input: Id) -> &'db CycleHeads { _ = (zalsa, input); empty_cycle_heads() } /// Invoked when the current thread needs to wait for a result for the given `key_index`. /// This call doesn't block the current thread. Instead, it's up to the caller to block /// in case `key_index` is [running](`WaitForResult::Running`) on another thread. /// /// A return value of [`WaitForResult::Available`] indicates that a result is now available. /// A return value of [`WaitForResult::Running`] indicates that `key_index` is currently running /// on an other thread, it's up to caller to block until the result becomes available if desired. /// A return value of [`WaitForResult::Cycle`] means that a cycle was encountered; the waited-on query is either already claimed /// by the current thread, or by a thread waiting on the current thread. fn wait_for<'me>(&'me self, zalsa: &'me Zalsa, key_index: Id) -> WaitForResult<'me> { _ = (zalsa, key_index); WaitForResult::Available } /// Invoked when the value `output_key` should be marked as valid in the current revision. /// This occurs because the value for `executor`, which generated it, was marked as valid /// in the current revision. fn mark_validated_output( &self, zalsa: &Zalsa, executor: DatabaseKeyIndex, output_key: crate::Id, ) { let _ = (zalsa, executor, output_key); unreachable!("only tracked struct and function ingredients can have validatable outputs") } /// Invoked when the value `stale_output` was output by `executor` in a previous /// revision, but was NOT output in the current revision. /// /// This hook is used to clear out the stale value so others cannot read it. fn remove_stale_output(&self, zalsa: &Zalsa, executor: DatabaseKeyIndex, stale_output_key: Id) { let _ = (zalsa, executor, stale_output_key); unreachable!("only tracked struct ingredients can have stale outputs") } /// Returns the [`IngredientIndex`] of this ingredient. fn ingredient_index(&self) -> IngredientIndex; /// Returns true if `reset_for_new_revision` should be called when new revisions start. /// Invoked once when ingredient is added and not after that. fn requires_reset_for_new_revision(&self) -> bool { false } /// Invoked when a new revision is about to start. /// This moment is important because it means that we have an `&mut`-reference to the /// database, and hence any pre-existing `&`-references must have expired. /// Many ingredients, given an `&'db`-reference to the database, /// use unsafe code to return `&'db`-references to internal values. /// The backing memory for those values can only be freed once an `&mut`-reference to the /// database is created. /// /// **Important:** to actually receive resets, the ingredient must set /// [`IngredientRequiresReset::RESET_ON_NEW_REVISION`] to true. fn reset_for_new_revision(&mut self, table: &mut Table) { _ = table; panic!( "Ingredient `{}` set `Ingredient::requires_reset_for_new_revision` to true but does \ not overwrite `Ingredient::reset_for_new_revision`", self.debug_name() ); } fn memo_table_types(&self) -> Arc; fn fmt_index(&self, index: crate::Id, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt_index(self.debug_name(), index, fmt) } // Function ingredient methods /// If this ingredient is a participant in a cycle, what is its cycle recovery strategy? /// (Really only relevant to [`crate::function::FunctionIngredient`], /// since only function ingredients push themselves onto the active query stack.) fn cycle_recovery_strategy(&self) -> CycleRecoveryStrategy { unreachable!("only function ingredients can be part of a cycle") } /// What were the inputs (if any) that were used to create the value at `key_index`. fn origin<'db>(&self, zalsa: &'db Zalsa, key_index: Id) -> Option> { let _ = (zalsa, key_index); unreachable!("only function ingredients have origins") } /// What values were accumulated during the creation of the value at `key_index` /// (if any). fn accumulated<'db>( &'db self, db: &'db dyn Database, key_index: Id, ) -> (Option<&'db AccumulatedMap>, InputAccumulatedValues) { let _ = (db, key_index); (None, InputAccumulatedValues::Empty) } /// Returns memory usage information about any instances of the ingredient, /// if applicable. #[cfg(feature = "salsa_unstable")] fn memory_usage(&self, _db: &dyn Database) -> Option> { None } } impl dyn Ingredient { /// Equivalent to the `downcast` methods on `any`. /// Because we do not have dyn-upcasting support, we need this workaround. pub fn assert_type(&self) -> &T { assert_eq!( self.type_id(), TypeId::of::(), "ingredient `{self:?}` is not of type `{}`", std::any::type_name::() ); // SAFETY: We know that the underlying data pointer // refers to a value of type T because of the `TypeId` check above. unsafe { transmute_data_ptr(self) } } /// Equivalent to the `downcast` methods on `any`. /// Because we do not have dyn-upcasting support, we need this workaround. pub fn assert_type_mut(&mut self) -> &mut T { assert_eq!( Any::type_id(self), TypeId::of::(), "ingredient `{self:?}` is not of type `{}`", std::any::type_name::() ); // SAFETY: We know that the underlying data pointer // refers to a value of type T because of the `TypeId` check above. unsafe { transmute_data_mut_ptr(self) } } } /// A helper function to show human readable fmt. pub(crate) fn fmt_index(debug_name: &str, id: Id, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { write!(fmt, "{debug_name}({id:?})") } pub enum WaitForResult<'me> { Running(Running<'me>), Available, Cycle { same_thread: bool }, } impl WaitForResult<'_> { /// Returns `true` if waiting for this input results in a cycle with another thread. pub const fn is_cycle_with_other_thread(&self) -> bool { matches!(self, WaitForResult::Cycle { same_thread: false }) } } salsa-0.23.0/src/input/input_field.rs000064400000000000000000000051371046102023000156150ustar 00000000000000use std::fmt; use std::marker::PhantomData; use crate::cycle::CycleHeads; use crate::function::VerifyResult; use crate::ingredient::Ingredient; use crate::input::{Configuration, IngredientImpl, Value}; use crate::sync::Arc; use crate::table::memo::MemoTableTypes; use crate::zalsa::IngredientIndex; use crate::{Database, Id, Revision}; /// Ingredient used to represent the fields of a `#[salsa::input]`. /// /// These fields can only be mutated by a call to a setter with an `&mut` /// reference to the database, and therefore cannot be mutated during a tracked /// function or in parallel. /// However for on-demand inputs to work the fields must be able to be set via /// a shared reference, so some locking is required. /// Altogether this makes the implementation somewhat simpler than tracked /// structs. pub struct FieldIngredientImpl { index: IngredientIndex, field_index: usize, phantom: PhantomData Value>, } impl FieldIngredientImpl where C: Configuration, { pub(super) fn new(struct_index: IngredientIndex, field_index: usize) -> Self { Self { index: struct_index.successor(field_index), field_index, phantom: PhantomData, } } } impl Ingredient for FieldIngredientImpl where C: Configuration, { fn location(&self) -> &'static crate::ingredient::Location { &C::LOCATION } fn ingredient_index(&self) -> IngredientIndex { self.index } unsafe fn maybe_changed_after( &self, db: &dyn Database, input: Id, revision: Revision, _cycle_heads: &mut CycleHeads, ) -> VerifyResult { let zalsa = db.zalsa(); let value = >::data(zalsa, input); VerifyResult::changed_if(value.revisions[self.field_index] > revision) } fn fmt_index(&self, index: crate::Id, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { write!( fmt, "{input}.{field}({id:?})", input = C::DEBUG_NAME, field = C::FIELD_DEBUG_NAMES[self.field_index], id = index ) } fn debug_name(&self) -> &'static str { C::FIELD_DEBUG_NAMES[self.field_index] } fn memo_table_types(&self) -> Arc { unreachable!("input fields do not allocate pages") } } impl std::fmt::Debug for FieldIngredientImpl where C: Configuration, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct(std::any::type_name::()) .field("index", &self.index) .finish() } } salsa-0.23.0/src/input/setter.rs000064400000000000000000000033261046102023000146170ustar 00000000000000use std::marker::PhantomData; use crate::input::{Configuration, IngredientImpl}; use crate::{Durability, Runtime}; /// Setter for a field of an input. pub trait Setter: Sized { type FieldTy; fn with_durability(self, durability: Durability) -> Self; fn to(self, value: Self::FieldTy) -> Self::FieldTy; } #[must_use] pub struct SetterImpl<'setter, C: Configuration, S, F> { runtime: &'setter mut Runtime, id: C::Struct, ingredient: &'setter mut IngredientImpl, durability: Option, field_index: usize, setter: S, phantom: PhantomData, } impl<'setter, C, S, F> SetterImpl<'setter, C, S, F> where C: Configuration, S: FnOnce(&mut C::Fields, F) -> F, { pub fn new( runtime: &'setter mut Runtime, id: C::Struct, field_index: usize, ingredient: &'setter mut IngredientImpl, setter: S, ) -> Self { SetterImpl { runtime, id, field_index, ingredient, durability: None, setter, phantom: PhantomData, } } } impl Setter for SetterImpl<'_, C, S, F> where C: Configuration, S: FnOnce(&mut C::Fields, F) -> F, { type FieldTy = F; fn with_durability(mut self, durability: Durability) -> Self { self.durability = Some(durability); self } fn to(self, value: F) -> F { let Self { runtime, id, ingredient, durability, field_index, setter, phantom: _, } = self; ingredient.set_field(runtime, id, field_index, durability, |tuple| { setter(tuple, value) }) } } salsa-0.23.0/src/input/singleton.rs000064400000000000000000000027021046102023000153100ustar 00000000000000use crate::sync::atomic::{AtomicU64, Ordering}; use crate::Id; mod sealed { pub trait Sealed {} } pub trait SingletonChoice: sealed::Sealed + Default { fn with_scope(&self, cb: impl FnOnce() -> Id) -> Id; fn index(&self) -> Option; } pub struct Singleton { index: AtomicU64, } impl sealed::Sealed for Singleton {} impl SingletonChoice for Singleton { fn with_scope(&self, cb: impl FnOnce() -> Id) -> Id { if self.index.load(Ordering::Acquire) != 0 { panic!("singleton struct may not be duplicated"); } let id = cb(); if self .index .compare_exchange(0, id.as_bits(), Ordering::AcqRel, Ordering::Acquire) .is_err() { panic!("singleton struct may not be duplicated"); } id } fn index(&self) -> Option { match self.index.load(Ordering::Acquire) { 0 => None, // SAFETY: Our u64 is derived from an ID and thus safe to convert back. id => Some(unsafe { Id::from_bits(id) }), } } } impl Default for Singleton { fn default() -> Self { Self { index: AtomicU64::new(0), } } } #[derive(Default)] pub struct NotSingleton; impl sealed::Sealed for NotSingleton {} impl SingletonChoice for NotSingleton { fn with_scope(&self, cb: impl FnOnce() -> Id) -> Id { cb() } fn index(&self) -> Option { None } } salsa-0.23.0/src/input.rs000064400000000000000000000242631046102023000133140ustar 00000000000000use std::any::{Any, TypeId}; use std::fmt; use std::ops::IndexMut; pub mod input_field; pub mod setter; pub mod singleton; use input_field::FieldIngredientImpl; use crate::cycle::CycleHeads; use crate::function::VerifyResult; use crate::id::{AsId, FromId, FromIdWithDb}; use crate::ingredient::Ingredient; use crate::input::singleton::{Singleton, SingletonChoice}; use crate::key::DatabaseKeyIndex; use crate::plumbing::Jar; use crate::sync::Arc; use crate::table::memo::{MemoTable, MemoTableTypes}; use crate::table::{Slot, Table}; use crate::zalsa::{IngredientIndex, Zalsa}; use crate::{Database, Durability, Id, Revision, Runtime}; pub trait Configuration: Any { const DEBUG_NAME: &'static str; const FIELD_DEBUG_NAMES: &'static [&'static str]; const LOCATION: crate::ingredient::Location; /// The singleton state for this input if any. type Singleton: SingletonChoice + Send + Sync; /// The input struct (which wraps an `Id`) type Struct: FromId + AsId + 'static + Send + Sync; /// A (possibly empty) tuple of the fields for this struct. type Fields: Send + Sync; /// A array of [`Revision`], one per each of the value fields. type Revisions: Send + Sync + fmt::Debug + IndexMut; /// A array of [`Durability`], one per each of the value fields. type Durabilities: Send + Sync + fmt::Debug + IndexMut; } pub struct JarImpl { _phantom: std::marker::PhantomData, } impl Default for JarImpl { fn default() -> Self { Self { _phantom: Default::default(), } } } impl Jar for JarImpl { fn create_ingredients( _zalsa: &Zalsa, struct_index: crate::zalsa::IngredientIndex, _dependencies: crate::memo_ingredient_indices::IngredientIndices, ) -> Vec> { let struct_ingredient: IngredientImpl = IngredientImpl::new(struct_index); std::iter::once(Box::new(struct_ingredient) as _) .chain((0..C::FIELD_DEBUG_NAMES.len()).map(|field_index| { Box::new(>::new(struct_index, field_index)) as _ })) .collect() } fn id_struct_type_id() -> TypeId { TypeId::of::() } } pub struct IngredientImpl { ingredient_index: IngredientIndex, singleton: C::Singleton, memo_table_types: Arc, _phantom: std::marker::PhantomData, } impl IngredientImpl { pub fn new(index: IngredientIndex) -> Self { Self { ingredient_index: index, singleton: Default::default(), memo_table_types: Arc::new(MemoTableTypes::default()), _phantom: std::marker::PhantomData, } } fn data(zalsa: &Zalsa, id: Id) -> &Value { zalsa.table().get(id) } fn data_raw(table: &Table, id: Id) -> *mut Value { table.get_raw(id) } pub fn database_key_index(&self, id: C::Struct) -> DatabaseKeyIndex { DatabaseKeyIndex::new(self.ingredient_index, id.as_id()) } pub fn new_input( &self, db: &dyn Database, fields: C::Fields, revisions: C::Revisions, durabilities: C::Durabilities, ) -> C::Struct { let (zalsa, zalsa_local) = db.zalsas(); let id = self.singleton.with_scope(|| { zalsa_local.allocate(zalsa, self.ingredient_index, |_| Value:: { fields, revisions, durabilities, memos: Default::default(), }) }); FromIdWithDb::from_id(id, zalsa) } /// Change the value of the field `field_index` to a new value. /// /// # Parameters /// /// * `runtime`, the salsa runtiem /// * `id`, id of the input struct /// * `field_index`, index of the field that will be changed /// * `durability`, durability of the new value. If omitted, uses the durability of the previous value. /// * `setter`, function that modifies the fields tuple; should only modify the element for `field_index` pub fn set_field( &mut self, runtime: &mut Runtime, id: C::Struct, field_index: usize, durability: Option, setter: impl FnOnce(&mut C::Fields) -> R, ) -> R { let id: Id = id.as_id(); let data_raw = Self::data_raw(runtime.table(), id); // SAFETY: We hold `&mut` on the runtime so no `&`-references can be active. // Also, we don't access any other data from the table while `r` is active. let data = unsafe { &mut *data_raw }; data.revisions[field_index] = runtime.current_revision(); let field_durability = &mut data.durabilities[field_index]; if *field_durability != Durability::MIN { runtime.report_tracked_write(*field_durability); } *field_durability = durability.unwrap_or(*field_durability); setter(&mut data.fields) } /// Get the singleton input previously created (if any). #[doc(hidden)] pub fn get_singleton_input(&self, zalsa: &Zalsa) -> Option where C: Configuration, { self.singleton .index() .map(|id| FromIdWithDb::from_id(id, zalsa)) } /// Access field of an input. /// Note that this function returns the entire tuple of value fields. /// The caller is responsible for selecting the appropriate element. pub fn field<'db>( &'db self, db: &'db dyn crate::Database, id: C::Struct, field_index: usize, ) -> &'db C::Fields { let (zalsa, zalsa_local) = db.zalsas(); let field_ingredient_index = self.ingredient_index.successor(field_index); let id = id.as_id(); let value = Self::data(zalsa, id); let durability = value.durabilities[field_index]; let revision = value.revisions[field_index]; zalsa_local.report_tracked_read_simple( DatabaseKeyIndex::new(field_ingredient_index, id), durability, revision, ); &value.fields } #[cfg(feature = "salsa_unstable")] /// Returns all data corresponding to the input struct. pub fn entries<'db>( &'db self, db: &'db dyn crate::Database, ) -> impl Iterator> { db.zalsa().table().slots_of::>() } /// Peek at the field values without recording any read dependency. /// Used for debug printouts. pub fn leak_fields<'db>(&'db self, db: &'db dyn Database, id: C::Struct) -> &'db C::Fields { let zalsa = db.zalsa(); let id = id.as_id(); let value = Self::data(zalsa, id); &value.fields } } impl Ingredient for IngredientImpl { fn location(&self) -> &'static crate::ingredient::Location { &C::LOCATION } fn ingredient_index(&self) -> IngredientIndex { self.ingredient_index } unsafe fn maybe_changed_after( &self, _db: &dyn Database, _input: Id, _revision: Revision, _cycle_heads: &mut CycleHeads, ) -> VerifyResult { // Input ingredients are just a counter, they store no data, they are immortal. // Their *fields* are stored in function ingredients elsewhere. VerifyResult::unchanged() } fn debug_name(&self) -> &'static str { C::DEBUG_NAME } fn memo_table_types(&self) -> Arc { self.memo_table_types.clone() } /// Returns memory usage information about any inputs. #[cfg(feature = "salsa_unstable")] fn memory_usage(&self, db: &dyn Database) -> Option> { let memory_usage = self .entries(db) // SAFETY: The memo table belongs to a value that we allocated, so it // has the correct type. .map(|value| unsafe { value.memory_usage(&self.memo_table_types) }) .collect(); Some(memory_usage) } } impl std::fmt::Debug for IngredientImpl { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct(std::any::type_name::()) .field("index", &self.ingredient_index) .finish() } } #[derive(Debug)] pub struct Value where C: Configuration, { /// Fields of this input struct. /// /// They can change across revisions, but they do not change within /// a particular revision. fields: C::Fields, /// Revisions of the fields. revisions: C::Revisions, /// Durabilities of the fields. durabilities: C::Durabilities, /// Memos memos: MemoTable, } impl Value where C: Configuration, { /// Fields of this tracked struct. /// /// They can change across revisions, but they do not change within /// a particular revision. #[cfg(feature = "salsa_unstable")] pub fn fields(&self) -> &C::Fields { &self.fields } /// Returns memory usage information about the input. /// /// # Safety /// /// The `MemoTable` must belong to a `Value` of the correct type. #[cfg(feature = "salsa_unstable")] unsafe fn memory_usage(&self, memo_table_types: &MemoTableTypes) -> crate::database::SlotInfo { // SAFETY: The caller guarantees this is the correct types table. let memos = unsafe { memo_table_types.attach_memos(&self.memos) }; crate::database::SlotInfo { debug_name: C::DEBUG_NAME, size_of_metadata: std::mem::size_of::() - std::mem::size_of::(), size_of_fields: std::mem::size_of::(), memos: memos.memory_usage(), } } } pub trait HasBuilder { type Builder; } // SAFETY: `Value` is our private type branded over the unique configuration `C`. unsafe impl Slot for Value where C: Configuration, { #[inline(always)] unsafe fn memos(&self, _current_revision: Revision) -> &crate::table::memo::MemoTable { &self.memos } #[inline(always)] fn memos_mut(&mut self) -> &mut crate::table::memo::MemoTable { &mut self.memos } } salsa-0.23.0/src/interned.rs000064400000000000000000001217361046102023000137700ustar 00000000000000use std::any::TypeId; use std::cell::{Cell, UnsafeCell}; use std::fmt; use std::hash::{BuildHasher, Hash, Hasher}; use std::marker::PhantomData; use std::num::NonZeroUsize; use std::path::{Path, PathBuf}; use crossbeam_utils::CachePadded; use intrusive_collections::{intrusive_adapter, LinkedList, LinkedListLink, UnsafeRef}; use rustc_hash::FxBuildHasher; use crate::cycle::CycleHeads; use crate::durability::Durability; use crate::function::VerifyResult; use crate::id::{AsId, FromId}; use crate::ingredient::Ingredient; use crate::plumbing::{IngredientIndices, Jar, ZalsaLocal}; use crate::revision::AtomicRevision; use crate::sync::{Arc, Mutex, OnceLock}; use crate::table::memo::{MemoTable, MemoTableTypes, MemoTableWithTypesMut}; use crate::table::Slot; use crate::zalsa::{IngredientIndex, Zalsa}; use crate::{Database, DatabaseKeyIndex, Event, EventKind, Id, Revision}; /// Trait that defines the key properties of an interned struct. /// /// Implemented by the `#[salsa::interned]` macro when applied to /// a struct. pub trait Configuration: Sized + 'static { const LOCATION: crate::ingredient::Location; const DEBUG_NAME: &'static str; // The minimum number of revisions that must pass before a stale value is garbage collected. #[cfg(test)] const REVISIONS: NonZeroUsize = NonZeroUsize::new(3).unwrap(); #[cfg(not(test))] // More aggressive garbage collection by default when testing. const REVISIONS: NonZeroUsize = NonZeroUsize::new(1).unwrap(); /// The fields of the struct being interned. type Fields<'db>: InternedData; /// The end user struct type Struct<'db>: Copy + FromId + AsId; } pub trait InternedData: Sized + Eq + Hash + Clone + Sync + Send {} impl InternedData for T {} pub struct JarImpl { phantom: PhantomData, } /// The interned ingredient hashes values of type `C::Fields` to produce an `Id`. /// /// It used to store interned structs but also to store the ID fields of a tracked struct. /// Interned values are garbage collected and their memory reused based on an LRU heuristic. pub struct IngredientImpl { /// Index of this ingredient in the database (used to construct database-IDs, etc). ingredient_index: IngredientIndex, /// A hasher for the sharded ID maps. hasher: FxBuildHasher, /// A shift used to determine the shard for a given hash. shift: u32, /// Sharded data that can only be accessed through a lock. shards: Box<[CachePadded>>]>, /// A queue of recent revisions in which values were interned. revision_queue: RevisionQueue, memo_table_types: Arc, _marker: PhantomData C>, } struct IngredientShard { /// Maps from data to the existing interned ID for that data. /// /// This doesn't hold the fields themselves to save memory, instead it points /// to the slot ID. key_map: hashbrown::HashTable, /// An intrusive linked list for LRU. lru: LinkedList>, } impl Default for IngredientShard { fn default() -> Self { Self { lru: LinkedList::default(), key_map: hashbrown::HashTable::new(), } } } // SAFETY: `LinkedListLink` is `!Sync`, however, the linked list is only accessed through the // ingredient lock, and values are only ever linked to a single list on the ingredient. unsafe impl Sync for Value {} intrusive_adapter!(ValueAdapter = UnsafeRef>: Value { link: LinkedListLink } where C: Configuration); /// Struct storing the interned fields. pub struct Value where C: Configuration, { /// The index of the shard containing this value. shard: u16, /// An intrusive linked list for LRU. link: LinkedListLink, /// The interned fields for this value. /// /// These are valid for read-only access as long as the lock is held /// or the value has been validated in the current revision. fields: UnsafeCell>, /// Memos attached to this interned value. /// /// This is valid for read-only access as long as the lock is held /// or the value has been validated in the current revision. memos: UnsafeCell, /// Data that can only be accessed while holding the lock for the /// `key_map` shard containing the value ID. shared: UnsafeCell, } /// Shared value data can only be read through the lock. #[repr(Rust, packed)] // Allow `durability` to be stored in the padding of the outer `Value` struct. struct ValueShared { /// The interned ID for this value. /// /// Storing this on the value itself is necessary to identify slots /// from the LRU list, as well as keep track of the generation. /// /// Values that are reused increment the ID generation, as if they had /// allocated a new slot. This eliminates the need for dependency edges /// on queries that *read* from an interned value, as any memos dependent /// on the previous value will not match the new ID. /// /// However, reusing a slot invalidates the previous ID, so dependency edges /// on queries that *create* an interned value are still required to ensure /// the value is re-interned with a new ID. id: Id, /// The revision the value was most-recently interned in. last_interned_at: Revision, /// The minimum durability of all inputs consumed by the creator /// query prior to creating this interned struct. If any of those /// inputs changes, then the creator query may create this struct /// with different values. durability: Durability, } impl ValueShared { /// Returns `true` if this value slot can be reused when interning, and should be added to the LRU. fn is_reusable(&self) -> bool { // Garbage collection is disabled. if C::REVISIONS == IMMORTAL { return false; } // Collecting higher durability values requires invalidating the revision for their // durability (see `Database::synthetic_write`, which requires a mutable reference to // the database) to avoid short-circuiting calls to `maybe_changed_after`. This is // necessary because `maybe_changed_after` for interned values is not "pure"; it updates // the `last_interned_at` field before validating a given value to ensure that it is not // reused after read in the current revision. self.durability == Durability::LOW } } impl Value where C: Configuration, { /// Fields of this interned struct. #[cfg(feature = "salsa_unstable")] pub fn fields(&self) -> &C::Fields<'static> { // SAFETY: The fact that this function is safe is technically unsound. However, interned // values are only exposed if they have been validated in the current revision, which // ensures that they are not reused while being accessed. unsafe { &*self.fields.get() } } /// Returns memory usage information about the interned value. /// /// # Safety /// /// The `MemoTable` must belong to a `Value` of the correct type. Additionally, the /// lock must be held for the shard containing the value. #[cfg(all(not(feature = "shuttle"), feature = "salsa_unstable"))] unsafe fn memory_usage(&self, memo_table_types: &MemoTableTypes) -> crate::database::SlotInfo { // SAFETY: The caller guarantees we hold the lock for the shard containing the value, so we // have at-least read-only access to the value's memos. let memos = unsafe { &*self.memos.get() }; // SAFETY: The caller guarantees this is the correct types table. let memos = unsafe { memo_table_types.attach_memos(memos) }; crate::database::SlotInfo { debug_name: C::DEBUG_NAME, size_of_metadata: std::mem::size_of::() - std::mem::size_of::>(), size_of_fields: std::mem::size_of::>(), memos: memos.memory_usage(), } } } impl Default for JarImpl { fn default() -> Self { Self { phantom: PhantomData, } } } impl Jar for JarImpl { fn create_ingredients( _zalsa: &Zalsa, first_index: IngredientIndex, _dependencies: IngredientIndices, ) -> Vec> { vec![Box::new(IngredientImpl::::new(first_index)) as _] } fn id_struct_type_id() -> TypeId { TypeId::of::>() } } impl IngredientImpl where C: Configuration, { pub fn new(ingredient_index: IngredientIndex) -> Self { static SHARDS: OnceLock = OnceLock::new(); let shards = *SHARDS.get_or_init(|| { let num_cpus = std::thread::available_parallelism() .map(usize::from) .unwrap_or(1); (num_cpus * 4).next_power_of_two() }); Self { ingredient_index, hasher: FxBuildHasher, memo_table_types: Arc::new(MemoTableTypes::default()), revision_queue: RevisionQueue::default(), shift: usize::BITS - shards.trailing_zeros(), shards: (0..shards).map(|_| Default::default()).collect(), _marker: PhantomData, } } /// Returns the shard for a given hash. /// /// Note that this value is guaranteed to be in-bounds for `self.shards`. #[inline] fn shard(&self, hash: u64) -> usize { // https://github.com/xacrimon/dashmap/blob/366ce7e7872866a06de66eb95002fa6cf2c117a7/src/lib.rs#L421 ((hash as usize) << 7) >> self.shift } /// # Safety /// /// The `from_internal_data` function must be called to restore the correct lifetime /// before access. unsafe fn to_internal_data<'db>(&'db self, data: C::Fields<'db>) -> C::Fields<'static> { // SAFETY: Guaranteed by caller. unsafe { std::mem::transmute(data) } } fn from_internal_data<'db>(data: &'db C::Fields<'static>) -> &'db C::Fields<'db> { // SAFETY: It's sound to go from `Data<'static>` to `Data<'db>`. We shrink the // lifetime here to use a single lifetime in `Lookup::eq(&StructKey<'db>, &C::Data<'db>)` unsafe { std::mem::transmute(data) } } /// Intern data to a unique reference. /// /// If `key` is already interned, returns the existing [`Id`] for the interned data without /// invoking `assemble`. /// /// Otherwise, invokes `assemble` with the given `key` and the [`Id`] to be allocated for this /// interned value. The resulting [`C::Data`] will then be interned. /// /// Note: Using the database within the `assemble` function may result in a deadlock if /// the database ends up trying to intern or allocate a new value. pub fn intern<'db, Key>( &'db self, db: &'db dyn crate::Database, key: Key, assemble: impl FnOnce(Id, Key) -> C::Fields<'db>, ) -> C::Struct<'db> where Key: Hash, C::Fields<'db>: HashEqLike, { FromId::from_id(self.intern_id(db, key, assemble)) } /// Intern data to a unique reference. /// /// If `key` is already interned, returns the existing [`Id`] for the interned data without /// invoking `assemble`. /// /// Otherwise, invokes `assemble` with the given `key` and the [`Id`] to be allocated for this /// interned value. The resulting [`C::Data`] will then be interned. /// /// Note: Using the database within the `assemble` function may result in a deadlock if /// the database ends up trying to intern or allocate a new value. pub fn intern_id<'db, Key>( &'db self, db: &'db dyn crate::Database, key: Key, assemble: impl FnOnce(Id, Key) -> C::Fields<'db>, ) -> crate::Id where Key: Hash, // We'd want the following predicate, but this currently implies `'static` due to a rustc // bug // for<'db> C::Data<'db>: HashEqLike, // so instead we go with this and transmute the lifetime in the `eq` closure C::Fields<'db>: HashEqLike, { let (zalsa, zalsa_local) = db.zalsas(); // Record the current revision as active. let current_revision = zalsa.current_revision(); self.revision_queue.record(current_revision); // Hash the value before acquiring the lock. let hash = self.hasher.hash_one(&key); let shard_index = self.shard(hash); // SAFETY: `shard_index` is guaranteed to be in-bounds for `self.shards`. let shard = unsafe { &mut *self.shards.get_unchecked(shard_index).lock() }; let found_value = Cell::new(None); // SAFETY: We hold the lock for the shard containing the value. let eq = |id: &_| unsafe { Self::value_eq(*id, &key, zalsa, &found_value) }; // Attempt a fast-path lookup of already interned data. if let Some(&id) = shard.key_map.find(hash, eq) { let value = found_value .get() .expect("found the interned value, so `found_value` should be set"); let index = self.database_key_index(id); // SAFETY: We hold the lock for the shard containing the value. let value_shared = unsafe { &mut *value.shared.get() }; // Validate the value in this revision to avoid reuse. if { value_shared.last_interned_at } < current_revision { value_shared.last_interned_at = current_revision; zalsa.event(&|| { Event::new(EventKind::DidValidateInternedValue { key: index, revision: current_revision, }) }); if value_shared.is_reusable::() { // Move the value to the front of the LRU list. // // SAFETY: We hold the lock for the shard containing the value, and `value` is // a reusable value that was previously interned, so is in the list. unsafe { shard.lru.cursor_mut_from_ptr(value).remove() }; // SAFETY: The value pointer is valid for the lifetime of the database // and never accessed mutably directly. unsafe { shard.lru.push_front(UnsafeRef::from_raw(value)) }; } } if let Some((_, stamp)) = zalsa_local.active_query() { let was_reusable = value_shared.is_reusable::(); // Record the maximum durability across all queries that intern this value. value_shared.durability = std::cmp::max(value_shared.durability, stamp.durability); // If the value is no longer reusable, i.e. the durability increased, remove it // from the LRU. if was_reusable && !value_shared.is_reusable::() { // SAFETY: We hold the lock for the shard containing the value, and `value` // was previously reusable, so is in the list. unsafe { shard.lru.cursor_mut_from_ptr(value).remove() }; } } // Record a dependency on the value. // // See `intern_id_cold` for why we need to use `current_revision` here. Note that just // because this value was previously interned does not mean it was previously interned // by *our query*, so the same considerations apply. zalsa_local.report_tracked_read_simple( index, value_shared.durability, current_revision, ); return value_shared.id; } // Fill up the table for the first few revisions without attempting garbage collection. if !self.revision_queue.is_primed() { return self.intern_id_cold( db, key, zalsa, zalsa_local, assemble, shard, shard_index, hash, ); } // Otherwise, try to reuse a stale slot. let mut cursor = shard.lru.back_mut(); while let Some(value) = cursor.get() { // SAFETY: We hold the lock for the shard containing the value. let value_shared = unsafe { &mut *value.shared.get() }; // The value must not have been read in the current revision to be collected // soundly, but we also do not want to collect values that have been read recently. // // Note that the list is sorted by LRU, so if the tail of the list is not stale, we // will not find any stale slots. if !self.revision_queue.is_stale(value_shared.last_interned_at) { break; } // We should never reuse a value that was accessed in the current revision. debug_assert!({ value_shared.last_interned_at } < current_revision); // Record the durability of the current query on the interned value. let (durability, last_interned_at) = zalsa_local .active_query() .map(|(_, stamp)| (stamp.durability, current_revision)) // If there is no active query this durability does not actually matter. // `last_interned_at` needs to be `Revision::MAX`, see the `intern_access_in_different_revision` test. .unwrap_or((Durability::MAX, Revision::max())); let old_id = value_shared.id; // Increment the generation of the ID, as if we allocated a new slot. // // If the ID is at its maximum generation, we are forced to leak the slot. let Some(new_id) = value_shared.id.next_generation() else { // Remove the value from the LRU list as we will never be able to // collect it. cursor.remove().unwrap(); // Retry with the previous element. cursor = shard.lru.back_mut(); continue; }; // Mark the slot as reused. *value_shared = ValueShared { id: new_id, durability, last_interned_at, }; let index = self.database_key_index(value_shared.id); // Record a dependency on the new value. // // See `intern_id_cold` for why we need to use `current_revision` here. zalsa_local.report_tracked_read_simple( index, value_shared.durability, current_revision, ); zalsa.event(&|| { Event::new(EventKind::DidReuseInternedValue { key: index, revision: current_revision, }) }); // Remove the value from the LRU list. // // SAFETY: The value pointer is valid for the lifetime of the database. let value = unsafe { &*UnsafeRef::into_raw(cursor.remove().unwrap()) }; // SAFETY: We hold the lock for the shard containing the value, and the // value has not been interned in the current revision, so no references to // it can exist. let old_fields = unsafe { &mut *value.fields.get() }; // Remove the previous value from the ID map. // // Note that while the ID stays the same when a slot is reused, the fields, // and thus the hash, will change, so we need to re-insert the value into the // map. Crucially, we know that the hashes for the old and new fields both map // to the same shard, because we determined the initial shard based on the new // fields and only accessed the LRU list for that shard. let old_hash = self.hasher.hash_one(&*old_fields); shard .key_map .find_entry(old_hash, |found_id: &Id| *found_id == old_id) .expect("interned value in LRU so must be in key_map") .remove(); // Update the fields. // // SAFETY: We call `from_internal_data` to restore the correct lifetime before access. *old_fields = unsafe { self.to_internal_data(assemble(new_id, key)) }; // SAFETY: We hold the lock for the shard containing the value. let hasher = |id: &_| unsafe { self.value_hash(*id, zalsa) }; // Insert the new value into the ID map. shard.key_map.insert_unique(hash, new_id, hasher); // Free the memos associated with the previous interned value. // // SAFETY: We hold the lock for the shard containing the value, and the // value has not been interned in the current revision, so no references to // it can exist. let mut memo_table = unsafe { std::mem::take(&mut *value.memos.get()) }; // SAFETY: The memo table belongs to a value that we allocated, so it has the // correct type. unsafe { self.clear_memos(zalsa, &mut memo_table, new_id) }; if value_shared.is_reusable::() { // Move the value to the front of the LRU list. // // SAFETY: The value pointer is valid for the lifetime of the database. // and never accessed mutably directly. shard.lru.push_front(unsafe { UnsafeRef::from_raw(value) }); } return new_id; } // If we could not find any stale slots, we are forced to allocate a new one. self.intern_id_cold( db, key, zalsa, zalsa_local, assemble, shard, shard_index, hash, ) } /// The cold path for interning a value, allocating a new slot. /// /// Returns `true` if the current thread interned the value. #[allow(clippy::too_many_arguments)] fn intern_id_cold<'db, Key>( &'db self, _db: &'db dyn crate::Database, key: Key, zalsa: &Zalsa, zalsa_local: &ZalsaLocal, assemble: impl FnOnce(Id, Key) -> C::Fields<'db>, shard: &mut IngredientShard, shard_index: usize, hash: u64, ) -> crate::Id where Key: Hash, C::Fields<'db>: HashEqLike, { let current_revision = zalsa.current_revision(); // Record the durability of the current query on the interned value. let (durability, last_interned_at) = zalsa_local .active_query() .map(|(_, stamp)| (stamp.durability, current_revision)) // If there is no active query this durability does not actually matter. // `last_interned_at` needs to be `Revision::MAX`, see the `intern_access_in_different_revision` test. .unwrap_or((Durability::MAX, Revision::max())); // Allocate the value slot. let id = zalsa_local.allocate(zalsa, self.ingredient_index, |id| Value:: { shard: shard_index as u16, link: LinkedListLink::new(), memos: UnsafeCell::new(MemoTable::default()), // SAFETY: We call `from_internal_data` to restore the correct lifetime before access. fields: UnsafeCell::new(unsafe { self.to_internal_data(assemble(id, key)) }), shared: UnsafeCell::new(ValueShared { id, durability, last_interned_at, }), }); let value = zalsa.table().get::>(id); // SAFETY: We hold the lock for the shard containing the value. let value_shared = unsafe { &mut *value.shared.get() }; if value_shared.is_reusable::() { // Add the value to the front of the LRU list. // // SAFETY: The value pointer is valid for the lifetime of the database // and never accessed mutably directly. shard.lru.push_front(unsafe { UnsafeRef::from_raw(value) }); } // SAFETY: We hold the lock for the shard containing the value. let hasher = |id: &_| unsafe { self.value_hash(*id, zalsa) }; // Insert the value into the ID map. shard.key_map.insert_unique(hash, id, hasher); debug_assert_eq!(hash, { let value = zalsa.table().get::>(id); // SAFETY: We hold the lock for the shard containing the value. unsafe { self.hasher.hash_one(&*value.fields.get()) } }); let index = self.database_key_index(id); // Record a dependency on the newly interned value. // // Note that the ID is unique to this use of the interned slot, so it seems logical to use // `Revision::start()` here. However, it is possible that the ID we read is different from // the previous execution of this query if the previous slot has been reused. In that case, // the query has changed without a corresponding input changing. Using `current_revision` // for dependencies on interned values encodes the fact that interned IDs are not stable // across revisions. zalsa_local.report_tracked_read_simple(index, durability, current_revision); zalsa.event(&|| { Event::new(EventKind::DidInternValue { key: index, revision: current_revision, }) }); id } /// Clears the given memo table. /// /// # Safety /// /// The `MemoTable` must belong to a `Value` of the correct type. pub(crate) unsafe fn clear_memos(&self, zalsa: &Zalsa, memo_table: &mut MemoTable, id: Id) { // SAFETY: The caller guarantees this is the correct types table. let table = unsafe { self.memo_table_types.attach_memos_mut(memo_table) }; // `Database::salsa_event` is a user supplied callback which may panic // in that case we need a drop guard to free the memo table struct TableDropGuard<'a>(MemoTableWithTypesMut<'a>); impl Drop for TableDropGuard<'_> { fn drop(&mut self) { // SAFETY: We have `&mut MemoTable`, so no more references to these memos exist and we are good // to drop them. unsafe { self.0.drop() }; } } let mut table_guard = TableDropGuard(table); // SAFETY: We have `&mut MemoTable`, so no more references to these memos exist and we are good // to drop them. unsafe { table_guard.0.take_memos(|memo_ingredient_index, memo| { let ingredient_index = zalsa.ingredient_index_for_memo(self.ingredient_index, memo_ingredient_index); let executor = DatabaseKeyIndex::new(ingredient_index, id); zalsa.event(&|| Event::new(EventKind::DidDiscard { key: executor })); for stale_output in memo.origin().outputs() { stale_output.remove_stale_output(zalsa, executor); } }) }; std::mem::forget(table_guard); } // Hashes the value by its fields. // // # Safety // // The lock must be held for the shard containing the value. unsafe fn value_hash<'db>(&'db self, id: Id, zalsa: &'db Zalsa) -> u64 { // This closure is only called if the table is resized. So while it's expensive // to lookup all values, it will only happen rarely. let value = zalsa.table().get::>(id); // SAFETY: We hold the lock for the shard containing the value. unsafe { self.hasher.hash_one(&*value.fields.get()) } } // Compares the value by its fields to the given key. // // # Safety // // The lock must be held for the shard containing the value. unsafe fn value_eq<'db, Key>( id: Id, key: &Key, zalsa: &'db Zalsa, found_value: &Cell>>, ) -> bool where C::Fields<'db>: HashEqLike, { let value = zalsa.table().get::>(id); found_value.set(Some(value)); // SAFETY: We hold the lock for the shard containing the value. let fields = unsafe { &*value.fields.get() }; HashEqLike::eq(Self::from_internal_data(fields), key) } /// Returns the database key index for an interned value with the given id. #[inline] pub fn database_key_index(&self, id: Id) -> DatabaseKeyIndex { DatabaseKeyIndex::new(self.ingredient_index, id) } /// Lookup the data for an interned value based on its ID. pub fn data<'db>(&'db self, db: &'db dyn Database, id: Id) -> &'db C::Fields<'db> { let zalsa = db.zalsa(); let value = zalsa.table().get::>(id); debug_assert!( { let _shard = self.shards[value.shard as usize].lock(); // SAFETY: We hold the lock for the shard containing the value. let value_shared = unsafe { &mut *value.shared.get() }; let last_changed_revision = zalsa.last_changed_revision(value_shared.durability); ({ value_shared.last_interned_at }) >= last_changed_revision }, "Data was not interned in the latest revision for its durability." ); // SAFETY: Interned values are only exposed if they have been validated in the // current revision, as checked by the assertion above, which ensures that they // are not reused while being accessed. unsafe { Self::from_internal_data(&*value.fields.get()) } } /// Lookup the fields from an interned struct. /// /// Note that this is not "leaking" since no dependency edge is required. pub fn fields<'db>(&'db self, db: &'db dyn Database, s: C::Struct<'db>) -> &'db C::Fields<'db> { self.data(db, AsId::as_id(&s)) } pub fn reset(&mut self, db: &mut dyn Database) { _ = db.zalsa_mut(); for shard in self.shards.iter() { // We can clear the key maps now that we have cancelled all other handles. shard.lock().key_map.clear(); } } #[cfg(feature = "salsa_unstable")] /// Returns all data corresponding to the interned struct. pub fn entries<'db>( &'db self, db: &'db dyn crate::Database, ) -> impl Iterator> { db.zalsa().table().slots_of::>() } } impl Ingredient for IngredientImpl where C: Configuration, { fn location(&self) -> &'static crate::ingredient::Location { &C::LOCATION } fn ingredient_index(&self) -> IngredientIndex { self.ingredient_index } unsafe fn maybe_changed_after( &self, db: &dyn Database, input: Id, _revision: Revision, _cycle_heads: &mut CycleHeads, ) -> VerifyResult { let zalsa = db.zalsa(); // Record the current revision as active. let current_revision = zalsa.current_revision(); self.revision_queue.record(current_revision); let value = zalsa.table().get::>(input); // SAFETY: `value.shard` is guaranteed to be in-bounds for `self.shards`. let _shard = unsafe { self.shards.get_unchecked(value.shard as usize) }.lock(); // SAFETY: We hold the lock for the shard containing the value. let value_shared = unsafe { &mut *value.shared.get() }; // The slot was reused. if value_shared.id.generation() > input.generation() { return VerifyResult::Changed; } // Validate the value for the current revision to avoid reuse. value_shared.last_interned_at = current_revision; zalsa.event(&|| { let index = self.database_key_index(input); Event::new(EventKind::DidValidateInternedValue { key: index, revision: current_revision, }) }); // Any change to an interned value results in a new ID generation. VerifyResult::unchanged() } fn debug_name(&self) -> &'static str { C::DEBUG_NAME } fn memo_table_types(&self) -> Arc { self.memo_table_types.clone() } /// Returns memory usage information about any interned values. #[cfg(all(not(feature = "shuttle"), feature = "salsa_unstable"))] fn memory_usage(&self, db: &dyn Database) -> Option> { use parking_lot::lock_api::RawMutex; for shard in self.shards.iter() { // SAFETY: We do not hold any active mutex guards. unsafe { shard.raw().lock() }; } let memory_usage = self .entries(db) // SAFETY: The memo table belongs to a value that we allocated, so it // has the correct type. Additionally, we are holding the locks for all shards. .map(|value| unsafe { value.memory_usage(&self.memo_table_types) }) .collect(); for shard in self.shards.iter() { // SAFETY: We acquired the locks for all shards. unsafe { shard.raw().unlock() }; } Some(memory_usage) } } impl std::fmt::Debug for IngredientImpl where C: Configuration, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct(std::any::type_name::()) .field("index", &self.ingredient_index) .finish() } } // SAFETY: `Value` is our private type branded over the unique configuration `C`. unsafe impl Slot for Value where C: Configuration, { #[inline(always)] unsafe fn memos(&self, _current_revision: Revision) -> &MemoTable { // SAFETY: The fact that we have a reference to the `Value` means it must // have been interned, and thus validated, in the current revision. unsafe { &*self.memos.get() } } #[inline(always)] fn memos_mut(&mut self) -> &mut MemoTable { self.memos.get_mut() } } /// Keep track of revisions in which interned values were read, to determine staleness. /// /// An interned value is considered stale if it has not been read in the past `REVS` /// revisions. However, we only consider revisions in which interned values were actually /// read, as revisions may be created in bursts. struct RevisionQueue { lock: Mutex<()>, // Once `feature(generic_const_exprs)` is stable this can just be an array. revisions: Box<[AtomicRevision]>, _configuration: PhantomData C>, } // `#[salsa::interned(revisions = usize::MAX)]` disables garbage collection. const IMMORTAL: NonZeroUsize = NonZeroUsize::MAX; impl Default for RevisionQueue { fn default() -> RevisionQueue { let revisions = if C::REVISIONS == IMMORTAL { Box::default() } else { (0..C::REVISIONS.get()) .map(|_| AtomicRevision::start()) .collect() }; RevisionQueue { lock: Mutex::new(()), revisions, _configuration: PhantomData, } } } impl RevisionQueue { /// Record the given revision as active. #[inline] fn record(&self, revision: Revision) { // Garbage collection is disabled. if C::REVISIONS == IMMORTAL { return; } // Fast-path: We already recorded this revision. if self.revisions[0].load() >= revision { return; } self.record_cold(revision); } #[cold] fn record_cold(&self, revision: Revision) { let _lock = self.lock.lock(); // Otherwise, update the queue, maintaining sorted order. // // Note that this should only happen once per revision. for i in (1..C::REVISIONS.get()).rev() { self.revisions[i].store(self.revisions[i - 1].load()); } self.revisions[0].store(revision); } /// Returns `true` if the given revision is old enough to be considered stale. #[inline] fn is_stale(&self, revision: Revision) -> bool { // Garbage collection is disabled. if C::REVISIONS == IMMORTAL { return false; } let oldest = self.revisions[C::REVISIONS.get() - 1].load(); // If we have not recorded `REVS` revisions yet, nothing can be stale. if oldest == Revision::start() { return false; } revision < oldest } /// Returns `true` if `C::REVISIONS` revisions have been recorded as active, /// i.e. enough data has been recorded to start garbage collection. #[inline] fn is_primed(&self) -> bool { // Garbage collection is disabled. if C::REVISIONS == IMMORTAL { return false; } self.revisions[C::REVISIONS.get() - 1].load() > Revision::start() } } /// A trait for types that hash and compare like `O`. pub trait HashEqLike { fn hash(&self, h: &mut H); fn eq(&self, data: &O) -> bool; } /// The `Lookup` trait is a more flexible variant on [`std::borrow::Borrow`] /// and [`std::borrow::ToOwned`]. /// /// It is implemented by "some type that can be used as the lookup key for `O`". /// This means that `self` can be hashed and compared for equality with values /// of type `O` without actually creating an owned value. It `self` needs to be interned, /// it can be converted into an equivalent value of type `O`. /// /// The canonical example is `&str: Lookup`. However, this example /// alone can be handled by [`std::borrow::Borrow`][]. In our case, we may have /// multiple keys accumulated into a struct, like `ViewStruct: Lookup<(K1, ...)>`, /// where `struct ViewStruct...>(K1...)`. The `Borrow` trait /// requires that `&(K1...)` be convertible to `&ViewStruct` which just isn't /// possible. `Lookup` instead offers direct `hash` and `eq` methods. pub trait Lookup { fn into_owned(self) -> O; } impl Lookup for T { fn into_owned(self) -> T { self } } impl HashEqLike for T where T: Hash + Eq, { fn hash(&self, h: &mut H) { Hash::hash(self, &mut *h); } fn eq(&self, data: &T) -> bool { self == data } } impl HashEqLike for &T where T: Hash + Eq, { fn hash(&self, h: &mut H) { Hash::hash(*self, &mut *h); } fn eq(&self, data: &T) -> bool { **self == *data } } impl HashEqLike<&T> for T where T: Hash + Eq, { fn hash(&self, h: &mut H) { Hash::hash(self, &mut *h); } fn eq(&self, data: &&T) -> bool { *self == **data } } impl Lookup for &T where T: Clone, { fn into_owned(self) -> T { Clone::clone(self) } } impl<'a, T> HashEqLike<&'a T> for Box where T: ?Sized + Hash + Eq, Box: From<&'a T>, { fn hash(&self, h: &mut H) { Hash::hash(self, &mut *h) } fn eq(&self, data: &&T) -> bool { **self == **data } } impl<'a, T> Lookup> for &'a T where T: ?Sized + Hash + Eq, Box: From<&'a T>, { fn into_owned(self) -> Box { Box::from(self) } } impl<'a, T> HashEqLike<&'a T> for Arc where T: ?Sized + Hash + Eq, Arc: From<&'a T>, { fn hash(&self, h: &mut H) { Hash::hash(&**self, &mut *h) } fn eq(&self, data: &&T) -> bool { **self == **data } } impl<'a, T> Lookup> for &'a T where T: ?Sized + Hash + Eq, Arc: From<&'a T>, { fn into_owned(self) -> Arc { Arc::from(self) } } impl Lookup for &str { fn into_owned(self) -> String { self.to_owned() } } impl HashEqLike<&str> for String { fn hash(&self, h: &mut H) { Hash::hash(self, &mut *h) } fn eq(&self, data: &&str) -> bool { self == *data } } impl> HashEqLike<&[A]> for Vec { fn hash(&self, h: &mut H) { Hash::hash(self, h); } fn eq(&self, data: &&[A]) -> bool { self.len() == data.len() && data.iter().enumerate().all(|(i, a)| &self[i] == a) } } impl + Clone + Lookup, T> Lookup> for &[A] { fn into_owned(self) -> Vec { self.iter().map(|a| Lookup::into_owned(a.clone())).collect() } } impl> HashEqLike<[A; N]> for Vec { fn hash(&self, h: &mut H) { Hash::hash(self, h); } fn eq(&self, data: &[A; N]) -> bool { self.len() == data.len() && data.iter().enumerate().all(|(i, a)| &self[i] == a) } } impl + Clone + Lookup, T> Lookup> for [A; N] { fn into_owned(self) -> Vec { self.into_iter() .map(|a| Lookup::into_owned(a.clone())) .collect() } } impl HashEqLike<&Path> for PathBuf { fn hash(&self, h: &mut H) { Hash::hash(self, h); } fn eq(&self, data: &&Path) -> bool { self == data } } impl Lookup for &Path { fn into_owned(self) -> PathBuf { self.to_owned() } } salsa-0.23.0/src/key.rs000064400000000000000000000047261046102023000127470ustar 00000000000000use core::fmt; use crate::cycle::CycleHeads; use crate::function::VerifyResult; use crate::zalsa::{IngredientIndex, Zalsa}; use crate::{Database, Id}; // ANCHOR: DatabaseKeyIndex /// An integer that uniquely identifies a particular query instance within the /// database. Used to track input and output dependencies between queries. Fully /// ordered and equatable but those orderings are arbitrary, and meant to be used /// only for inserting into maps and the like. #[derive(Copy, Clone, PartialEq, Eq, Hash)] pub struct DatabaseKeyIndex { key_index: Id, ingredient_index: IngredientIndex, } // ANCHOR_END: DatabaseKeyIndex impl DatabaseKeyIndex { #[inline] pub(crate) fn new(ingredient_index: IngredientIndex, key_index: Id) -> Self { Self { key_index, ingredient_index, } } pub const fn ingredient_index(self) -> IngredientIndex { self.ingredient_index } pub const fn key_index(self) -> Id { self.key_index } pub(crate) fn maybe_changed_after( &self, db: &dyn Database, zalsa: &Zalsa, last_verified_at: crate::Revision, cycle_heads: &mut CycleHeads, ) -> VerifyResult { // SAFETY: The `db` belongs to the ingredient unsafe { zalsa .lookup_ingredient(self.ingredient_index()) .maybe_changed_after(db, self.key_index(), last_verified_at, cycle_heads) } } pub(crate) fn remove_stale_output(&self, zalsa: &Zalsa, executor: DatabaseKeyIndex) { zalsa .lookup_ingredient(self.ingredient_index()) .remove_stale_output(zalsa, executor, self.key_index()) } pub(crate) fn mark_validated_output( &self, zalsa: &Zalsa, database_key_index: DatabaseKeyIndex, ) { zalsa .lookup_ingredient(self.ingredient_index()) .mark_validated_output(zalsa, database_key_index, self.key_index()) } } impl fmt::Debug for DatabaseKeyIndex { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { crate::attach::with_attached_database(|db| { let ingredient = db.zalsa().lookup_ingredient(self.ingredient_index()); ingredient.fmt_index(self.key_index(), f) }) .unwrap_or_else(|| { f.debug_tuple("DatabaseKeyIndex") .field(&self.ingredient_index()) .field(&self.key_index()) .finish() }) } } salsa-0.23.0/src/lib.rs000064400000000000000000000104571046102023000127230ustar 00000000000000#![deny(clippy::undocumented_unsafe_blocks)] #![forbid(unsafe_op_in_unsafe_fn)] mod accumulator; mod active_query; mod attach; mod cancelled; mod cycle; mod database; mod database_impl; mod durability; mod event; mod function; mod hash; mod id; mod ingredient; mod input; mod interned; mod key; mod memo_ingredient_indices; mod nonce; #[cfg(feature = "rayon")] mod parallel; mod return_mode; mod revision; mod runtime; mod salsa_struct; mod storage; mod sync; mod table; mod tracked_struct; mod update; mod views; mod zalsa; mod zalsa_local; #[cfg(feature = "rayon")] pub use parallel::{join, par_map}; #[cfg(feature = "macros")] pub use salsa_macros::{accumulator, db, input, interned, tracked, Supertype, Update}; #[cfg(feature = "salsa_unstable")] pub use self::database::IngredientInfo; pub use self::accumulator::Accumulator; pub use self::active_query::Backtrace; pub use self::cancelled::Cancelled; pub use self::cycle::CycleRecoveryAction; pub use self::database::{AsDynDatabase, Database}; pub use self::database_impl::DatabaseImpl; pub use self::durability::Durability; pub use self::event::{Event, EventKind}; pub use self::id::Id; pub use self::input::setter::Setter; pub use self::key::DatabaseKeyIndex; pub use self::return_mode::SalsaAsDeref; pub use self::return_mode::SalsaAsRef; pub use self::revision::Revision; pub use self::runtime::Runtime; pub use self::storage::{Storage, StorageHandle}; pub use self::update::Update; pub use self::zalsa::IngredientIndex; pub use crate::attach::{attach, with_attached_database}; pub mod prelude { pub use crate::{Accumulator, Database, Setter}; } /// Internal names used by salsa macros. /// /// # WARNING /// /// The contents of this module are NOT subject to semver. #[doc(hidden)] pub mod plumbing { pub use std::any::TypeId; pub use std::option::Option::{self, None, Some}; pub use salsa_macro_rules::{ macro_if, maybe_backdate, maybe_default, maybe_default_tt, return_mode_expression, return_mode_ty, setup_accumulator_impl, setup_input_struct, setup_interned_struct, setup_tracked_assoc_fn_body, setup_tracked_fn, setup_tracked_method_body, setup_tracked_struct, unexpected_cycle_initial, unexpected_cycle_recovery, }; pub use crate::accumulator::Accumulator; pub use crate::attach::{attach, with_attached_database}; pub use crate::cycle::{CycleRecoveryAction, CycleRecoveryStrategy}; pub use crate::database::{current_revision, Database}; pub use crate::durability::Durability; pub use crate::id::{AsId, FromId, FromIdWithDb, Id}; pub use crate::ingredient::{Ingredient, Jar, Location}; pub use crate::key::DatabaseKeyIndex; pub use crate::memo_ingredient_indices::{ IngredientIndices, MemoIngredientIndices, MemoIngredientMap, MemoIngredientSingletonIndex, NewMemoIngredientIndices, }; pub use crate::revision::Revision; pub use crate::runtime::{stamp, Runtime, Stamp}; pub use crate::salsa_struct::SalsaStructInDb; pub use crate::storage::{HasStorage, Storage}; pub use crate::tracked_struct::TrackedStructInDb; pub use crate::update::helper::{Dispatch as UpdateDispatch, Fallback as UpdateFallback}; pub use crate::update::{always_update, Update}; pub use crate::zalsa::{ transmute_data_ptr, views, IngredientCache, IngredientIndex, Zalsa, ZalsaDatabase, }; pub use crate::zalsa_local::ZalsaLocal; pub mod accumulator { pub use crate::accumulator::{IngredientImpl, JarImpl}; } pub mod input { pub use crate::input::input_field::FieldIngredientImpl; pub use crate::input::setter::SetterImpl; pub use crate::input::singleton::{NotSingleton, Singleton}; pub use crate::input::{Configuration, HasBuilder, IngredientImpl, JarImpl, Value}; } pub mod interned { pub use crate::interned::{ Configuration, HashEqLike, IngredientImpl, JarImpl, Lookup, Value, }; } pub mod function { pub use crate::function::Configuration; pub use crate::function::IngredientImpl; pub use crate::function::Memo; pub use crate::table::memo::MemoEntryType; } pub mod tracked_struct { pub use crate::tracked_struct::tracked_field::FieldIngredientImpl; pub use crate::tracked_struct::{Configuration, IngredientImpl, JarImpl, Value}; } } salsa-0.23.0/src/memo_ingredient_indices.rs000064400000000000000000000137501046102023000170170ustar 00000000000000use crate::sync::Arc; use crate::table::memo::{MemoEntryType, MemoTableTypes}; use crate::zalsa::{MemoIngredientIndex, Zalsa}; use crate::{Id, IngredientIndex}; /// An ingredient has an [ingredient index][IngredientIndex]. However, Salsa also supports /// enums of salsa structs (and other salsa enums), and those don't have a constant ingredient index, /// because they are not ingredients by themselves but rather composed of them. However, an enum can /// be viewed as a *set* of [`IngredientIndex`], where each instance of the enum can belong /// to one, potentially different, index. This is what this type represents: a set of /// `IngredientIndex`. #[derive(Clone)] pub struct IngredientIndices { indices: Box<[IngredientIndex]>, } impl From for IngredientIndices { #[inline] fn from(value: IngredientIndex) -> Self { Self { indices: Box::new([value]), } } } impl IngredientIndices { #[inline] pub fn empty() -> Self { Self { indices: Box::default(), } } pub fn merge(iter: impl IntoIterator) -> Self { let mut indices = Vec::new(); for index in iter { indices.extend(index.indices); } indices.sort_unstable(); indices.dedup(); Self { indices: indices.into_boxed_slice(), } } } pub trait NewMemoIngredientIndices { /// # Safety /// /// The memo types must be correct. unsafe fn create( zalsa: &Zalsa, struct_indices: IngredientIndices, ingredient: IngredientIndex, memo_type: MemoEntryType, intern_ingredient_memo_types: Option>, ) -> Self; } impl NewMemoIngredientIndices for MemoIngredientIndices { /// # Safety /// /// The memo types must be correct. unsafe fn create( zalsa: &Zalsa, struct_indices: IngredientIndices, ingredient: IngredientIndex, memo_type: MemoEntryType, _intern_ingredient_memo_types: Option>, ) -> Self { debug_assert!( _intern_ingredient_memo_types.is_none(), "intern ingredient can only have a singleton memo ingredient" ); let Some(&last) = struct_indices.indices.last() else { unreachable!("Attempting to construct struct memo mapping for non tracked function?") }; let mut indices = Vec::new(); indices.resize( (last.as_u32() as usize) + 1, MemoIngredientIndex::from_usize((u32::MAX - 1) as usize), ); for &struct_ingredient in &struct_indices.indices { let memo_types = zalsa .lookup_ingredient(struct_ingredient) .memo_table_types(); let mi = zalsa.next_memo_ingredient_index(struct_ingredient, ingredient); memo_types.set(mi, &memo_type); indices[struct_ingredient.as_u32() as usize] = mi; } MemoIngredientIndices { indices: indices.into_boxed_slice(), } } } /// This type is to [`MemoIngredientIndex`] what [`IngredientIndices`] is to [`IngredientIndex`]: /// since enums can contain different ingredient indices, they can also have different memo indices, /// so we need to keep track of them. /// /// This acts a map from [`IngredientIndex`] to [`MemoIngredientIndex`] but implemented /// via a slice for fast lookups, trading memory for speed. With these changes, lookups are `O(1)` /// instead of `O(n)`. /// /// A database tends to have few ingredients (i), less function ingredients and even less /// function ingredients targeting `#[derive(Supertype)]` enums (e). /// While this is bounded as `O(i * e)` memory usage, the average case is significantly smaller: a /// function ingredient targeting enums only stores a slice whose length corresponds to the largest /// ingredient index's _value_. For example, if we have the ingredient indices `[2, 6, 17]`, then we /// will allocate a slice whose length is `17 + 1`. /// /// Assuming a heavy example scenario of 1000 ingredients (500 of which are function ingredients, 100 /// of which are enum targeting functions) this would come out to a maximum possibly memory usage of /// 4bytes * 1000 * 100 ~= 0.38MB which is negligible. pub struct MemoIngredientIndices { indices: Box<[MemoIngredientIndex]>, } impl MemoIngredientMap for MemoIngredientIndices { #[inline(always)] fn get_zalsa_id(&self, zalsa: &Zalsa, id: Id) -> MemoIngredientIndex { self.get(zalsa.ingredient_index(id)) } #[inline(always)] fn get(&self, index: IngredientIndex) -> MemoIngredientIndex { self.indices[index.as_u32() as usize] } } #[derive(Debug)] pub struct MemoIngredientSingletonIndex(MemoIngredientIndex); impl MemoIngredientMap for MemoIngredientSingletonIndex { #[inline(always)] fn get_zalsa_id(&self, _: &Zalsa, _: Id) -> MemoIngredientIndex { self.0 } #[inline(always)] fn get(&self, _: IngredientIndex) -> MemoIngredientIndex { self.0 } } impl NewMemoIngredientIndices for MemoIngredientSingletonIndex { #[inline] unsafe fn create( zalsa: &Zalsa, indices: IngredientIndices, ingredient: IngredientIndex, memo_type: MemoEntryType, intern_ingredient_memo_types: Option>, ) -> Self { let &[struct_ingredient] = &*indices.indices else { unreachable!("Attempting to construct struct memo mapping from enum?") }; let memo_types = intern_ingredient_memo_types.unwrap_or_else(|| { zalsa .lookup_ingredient(struct_ingredient) .memo_table_types() }); let mi = zalsa.next_memo_ingredient_index(struct_ingredient, ingredient); memo_types.set(mi, &memo_type); Self(mi) } } pub trait MemoIngredientMap: Send + Sync { fn get_zalsa_id(&self, zalsa: &Zalsa, id: Id) -> MemoIngredientIndex; fn get(&self, index: IngredientIndex) -> MemoIngredientIndex; } salsa-0.23.0/src/nonce.rs000064400000000000000000000024201046102023000132460ustar 00000000000000use crate::sync::atomic::{AtomicU32, Ordering}; use std::marker::PhantomData; use std::num::NonZeroU32; /// A type to generate nonces. Store it in a static and each nonce it produces will be unique from other nonces. /// The type parameter `T` just serves to distinguish different kinds of nonces. pub(crate) struct NonceGenerator { value: AtomicU32, phantom: PhantomData, } /// A "nonce" is a value that gets created exactly once. /// We use it to mark the database storage so we can be sure we're seeing the same database. #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] pub struct Nonce(NonZeroU32, PhantomData); impl NonceGenerator { pub(crate) const fn new() -> Self { Self { // start at 1 so we can detect rollover more easily value: AtomicU32::new(1), phantom: PhantomData, } } pub(crate) fn nonce(&self) -> Nonce { let value = self.value.fetch_add(1, Ordering::Relaxed); assert!(value != 0, "nonce rolled over"); Nonce(NonZeroU32::new(value).unwrap(), self.phantom) } } impl Nonce { pub(crate) fn into_u32(self) -> NonZeroU32 { self.0 } pub(crate) fn from_u32(u32: NonZeroU32) -> Self { Self(u32, PhantomData) } } salsa-0.23.0/src/parallel.rs000064400000000000000000000022251046102023000137430ustar 00000000000000use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelIterator}; use crate::Database; pub fn par_map(db: &Db, inputs: impl IntoParallelIterator, op: F) -> C where Db: Database + ?Sized, F: Fn(&Db, T) -> R + Sync + Send, T: Send, R: Send + Sync, C: FromParallelIterator, { inputs .into_par_iter() .map_with(DbForkOnClone(db.fork_db()), |db, element| { op(db.0.as_view(), element) }) .collect() } struct DbForkOnClone(Box); impl Clone for DbForkOnClone { fn clone(&self) -> Self { DbForkOnClone(self.0.fork_db()) } } pub fn join(db: &Db, a: A, b: B) -> (RA, RB) where A: FnOnce(&Db) -> RA + Send, B: FnOnce(&Db) -> RB + Send, RA: Send, RB: Send, { // we need to fork eagerly, as `rayon::join_context` gives us no option to tell whether we get // moved to another thread before the closure is executed let db_a = db.fork_db(); let db_b = db.fork_db(); rayon::join( move || a(db_a.as_view::()), move || b(db_b.as_view::()), ) } salsa-0.23.0/src/return_mode.rs000064400000000000000000000033521046102023000144740ustar 00000000000000//! User-implementable salsa traits for refining the return type via `returns(as_ref)` and `returns(as_deref)`. use std::ops::Deref; /// Used to determine the return type and value for tracked fields and functions annotated with `returns(as_ref)`. pub trait SalsaAsRef { // The type returned by tracked fields and functions annotated with `returns(as_ref)`. type AsRef<'a> where Self: 'a; // The value returned by tracked fields and functions annotated with `returns(as_ref)`. fn as_ref(&self) -> Self::AsRef<'_>; } impl SalsaAsRef for Option { type AsRef<'a> = Option<&'a T> where Self: 'a; fn as_ref(&self) -> Self::AsRef<'_> { self.as_ref() } } impl SalsaAsRef for Result { type AsRef<'a> = Result<&'a T, &'a E> where Self: 'a; fn as_ref(&self) -> Self::AsRef<'_> { self.as_ref() } } /// Used to determine the return type and value for tracked fields and functions annotated with `returns(as_deref)`. pub trait SalsaAsDeref { // The type returned by tracked fields and functions annotated with `returns(as_deref)`. type AsDeref<'a> where Self: 'a; // The value returned by tracked fields and functions annotated with `returns(as_deref)`. fn as_deref(&self) -> Self::AsDeref<'_>; } impl SalsaAsDeref for Option { type AsDeref<'a> = Option<&'a T::Target> where Self: 'a; fn as_deref(&self) -> Self::AsDeref<'_> { self.as_deref() } } impl SalsaAsDeref for Result { type AsDeref<'a> = Result<&'a T::Target, &'a E> where Self: 'a; fn as_deref(&self) -> Self::AsDeref<'_> { self.as_deref() } } salsa-0.23.0/src/revision.rs000064400000000000000000000105221046102023000140040ustar 00000000000000use std::num::NonZeroUsize; use crate::sync::atomic::{AtomicUsize, Ordering}; /// Value of the initial revision, as a u64. We don't use 0 /// because we want to use a `NonZeroUsize`. const START: usize = 1; /// A unique identifier for the current version of the database. /// /// Each time an input is changed, the revision number is incremented. /// `Revision` is used internally to track which values may need to be /// recomputed, but is not something you should have to interact with /// directly as a user of salsa. #[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] pub struct Revision { generation: NonZeroUsize, } impl Revision { #[inline] pub(crate) fn max() -> Self { Self::from(usize::MAX) } #[inline] pub(crate) const fn start() -> Self { Self { // SAFETY: `START` is non-zero. generation: unsafe { NonZeroUsize::new_unchecked(START) }, } } #[inline] pub(crate) fn from(g: usize) -> Self { Self { generation: NonZeroUsize::new(g).unwrap(), } } #[inline] pub(crate) fn from_opt(g: usize) -> Option { NonZeroUsize::new(g).map(|generation| Self { generation }) } #[inline] pub(crate) fn next(self) -> Revision { Self::from(self.generation.get() + 1) } #[inline] pub(crate) fn as_usize(self) -> usize { self.generation.get() } } impl std::fmt::Debug for Revision { fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(fmt, "R{}", self.generation) } } #[derive(Debug)] pub(crate) struct AtomicRevision { data: AtomicUsize, } impl From for AtomicRevision { fn from(value: Revision) -> Self { Self { data: AtomicUsize::new(value.as_usize()), } } } impl AtomicRevision { pub(crate) const fn start() -> Self { Self { data: AtomicUsize::new(START), } } pub(crate) fn load(&self) -> Revision { Revision { // SAFETY: We know that the value is non-zero because we only ever store `START` which 1, or a // Revision which is guaranteed to be non-zero. generation: unsafe { NonZeroUsize::new_unchecked(self.data.load(Ordering::Acquire)) }, } } pub(crate) fn store(&self, r: Revision) { self.data.store(r.as_usize(), Ordering::Release); } } #[derive(Debug)] pub(crate) struct OptionalAtomicRevision { data: AtomicUsize, } impl From for OptionalAtomicRevision { fn from(value: Revision) -> Self { Self { data: AtomicUsize::new(value.as_usize()), } } } impl OptionalAtomicRevision { pub(crate) fn new(revision: Option) -> Self { Self { data: AtomicUsize::new(revision.map_or(0, |r| r.as_usize())), } } pub(crate) fn load(&self) -> Option { Revision::from_opt(self.data.load(Ordering::Acquire)) } pub(crate) fn swap(&self, val: Option) -> Option { Revision::from_opt( self.data .swap(val.map_or(0, |r| r.as_usize()), Ordering::AcqRel), ) } pub(crate) fn compare_exchange( &self, current: Option, new: Option, ) -> Result, Option> { self.data .compare_exchange( current.map_or(0, |r| r.as_usize()), new.map_or(0, |r| r.as_usize()), Ordering::AcqRel, Ordering::Acquire, ) .map(Revision::from_opt) .map_err(Revision::from_opt) } } #[cfg(test)] mod tests { use super::*; #[test] fn optional_atomic_revision() { let val = OptionalAtomicRevision::new(Some(Revision::start())); assert_eq!(val.load(), Some(Revision::start())); assert_eq!(val.swap(None), Some(Revision::start())); assert_eq!(val.load(), None); assert_eq!(val.swap(Some(Revision::start())), None); assert_eq!(val.load(), Some(Revision::start())); assert_eq!( val.compare_exchange(Some(Revision::start()), None), Ok(Some(Revision::start())) ); assert_eq!( val.compare_exchange(Some(Revision::start()), None), Err(None) ); } } salsa-0.23.0/src/runtime/dependency_graph.rs000064400000000000000000000147361046102023000171430ustar 00000000000000use std::pin::Pin; use rustc_hash::FxHashMap; use smallvec::SmallVec; use crate::key::DatabaseKeyIndex; use crate::runtime::dependency_graph::edge::EdgeCondvar; use crate::runtime::WaitResult; use crate::sync::thread::ThreadId; use crate::sync::MutexGuard; #[derive(Debug, Default)] pub(super) struct DependencyGraph { /// A `(K -> V)` pair in this map indicates that the runtime /// `K` is blocked on some query executing in the runtime `V`. /// This encodes a graph that must be acyclic (or else deadlock /// will result). edges: FxHashMap, /// Encodes the `ThreadId` that are blocked waiting for the result /// of a given query. query_dependents: FxHashMap>, /// When a key K completes which had dependent queries Qs blocked on it, /// it stores its `WaitResult` here. As they wake up, each query Q in Qs will /// come here to fetch their results. wait_results: FxHashMap, } impl DependencyGraph { /// True if `from_id` depends on `to_id`. /// /// (i.e., there is a path from `from_id` to `to_id` in the graph.) pub(super) fn depends_on(&self, from_id: ThreadId, to_id: ThreadId) -> bool { let mut p = from_id; while let Some(q) = self.edges.get(&p).map(|edge| edge.blocked_on_id) { if q == to_id { return true; } p = q; } p == to_id } /// Modifies the graph so that `from_id` is blocked /// on `database_key`, which is being computed by /// `to_id`. /// /// For this to be reasonable, the lock on the /// results table for `database_key` must be held. /// This ensures that computing `database_key` doesn't /// complete before `block_on` executes. /// /// Preconditions: /// * No path from `to_id` to `from_id` /// (i.e., `me.depends_on(to_id, from_id)` is false) /// * `held_mutex` is a read lock (or stronger) on `database_key` pub(super) fn block_on( mut me: MutexGuard<'_, Self>, from_id: ThreadId, database_key: DatabaseKeyIndex, to_id: ThreadId, query_mutex_guard: QueryMutexGuard, ) -> WaitResult { let cvar = std::pin::pin!(EdgeCondvar::default()); let cvar = cvar.as_ref(); // SAFETY: We are blocking until the result is removed from `DependencyGraph::wait_results` // at which point the `edge` won't signal the condvar anymore. // As such we are keeping the cond var alive until the reference in the edge drops. unsafe { me.add_edge(from_id, database_key, to_id, cvar) }; // Release the mutex that prevents `database_key` // from completing, now that the edge has been added. drop(query_mutex_guard); loop { if let Some(result) = me.wait_results.remove(&from_id) { debug_assert!(!me.edges.contains_key(&from_id)); return result; } me = cvar.wait(me); } } /// Helper for `block_on`: performs actual graph modification /// to add a dependency edge from `from_id` to `to_id`, which is /// computing `database_key`. /// /// # Safety /// /// The caller needs to keep the referent of `cvar` alive until the corresponding /// [`Self::wait_results`] entry has been inserted. unsafe fn add_edge( &mut self, from_id: ThreadId, database_key: DatabaseKeyIndex, to_id: ThreadId, cvar: Pin<&EdgeCondvar>, ) { assert_ne!(from_id, to_id); debug_assert!(!self.edges.contains_key(&from_id)); debug_assert!(!self.depends_on(to_id, from_id)); // SAFETY: The caller is responsible for ensuring that the `EdgeGuard` outlives the `Edge`. let edge = unsafe { edge::Edge::new(to_id, cvar) }; self.edges.insert(from_id, edge); self.query_dependents .entry(database_key) .or_default() .push(from_id); } /// Invoked when runtime `to_id` completes executing /// `database_key`. pub(super) fn unblock_runtimes_blocked_on( &mut self, database_key: DatabaseKeyIndex, wait_result: WaitResult, ) { let dependents = self .query_dependents .remove(&database_key) .unwrap_or_default(); for from_id in dependents { self.unblock_runtime(from_id, wait_result); } } /// Unblock the runtime with the given id with the given wait-result. /// This will cause it resume execution (though it will have to grab /// the lock on this data structure first, to recover the wait result). fn unblock_runtime(&mut self, id: ThreadId, wait_result: WaitResult) { let edge = self.edges.remove(&id).expect("not blocked"); self.wait_results.insert(id, wait_result); // Now that we have inserted the `wait_results`, // notify the thread. edge.notify(); } } mod edge { use crate::sync::thread::ThreadId; use crate::sync::{Condvar, MutexGuard}; use std::pin::Pin; #[derive(Default, Debug)] pub(super) struct EdgeCondvar { condvar: Condvar, _phantom_pin: std::marker::PhantomPinned, } impl EdgeCondvar { #[inline] pub(super) fn wait<'a, T>(&self, mutex_guard: MutexGuard<'a, T>) -> MutexGuard<'a, T> { self.condvar.wait(mutex_guard) } } #[derive(Debug)] pub(super) struct Edge { pub(super) blocked_on_id: ThreadId, /// Signalled whenever a query with dependents completes. /// Allows those dependents to check if they are ready to unblock. // condvar: unsafe<'stack_frame> Pin<&'stack_frame Condvar>, condvar: Pin<&'static EdgeCondvar>, } impl Edge { /// # SAFETY /// /// The caller must ensure that the [`EdgeCondvar`] is kept alive until the [`Edge`] is dropped. pub(super) unsafe fn new(blocked_on_id: ThreadId, condvar: Pin<&EdgeCondvar>) -> Self { Self { blocked_on_id, // SAFETY: The caller is responsible for ensuring that the `EdgeCondvar` outlives the `Edge`. condvar: unsafe { std::mem::transmute::, Pin<&'static EdgeCondvar>>(condvar) }, } } #[inline] pub(super) fn notify(self) { self.condvar.condvar.notify_one(); } } } salsa-0.23.0/src/runtime.rs000064400000000000000000000212321046102023000136310ustar 00000000000000use self::dependency_graph::DependencyGraph; use crate::durability::Durability; use crate::function::SyncGuard; use crate::key::DatabaseKeyIndex; use crate::sync::atomic::{AtomicBool, Ordering}; use crate::sync::thread::{self, ThreadId}; use crate::sync::Mutex; use crate::table::Table; use crate::zalsa::Zalsa; use crate::{Cancelled, Event, EventKind, Revision}; mod dependency_graph; pub struct Runtime { /// Set to true when the current revision has been canceled. /// This is done when we an input is being changed. The flag /// is set back to false once the input has been changed. revision_canceled: AtomicBool, /// Stores the "last change" revision for values of each duration. /// This vector is always of length at least 1 (for Durability 0) /// but its total length depends on the number of durations. The /// element at index 0 is special as it represents the "current /// revision". In general, we have the invariant that revisions /// in here are *declining* -- that is, `revisions[i] >= /// revisions[i + 1]`, for all `i`. This is because when you /// modify a value with durability D, that implies that values /// with durability less than D may have changed too. revisions: [Revision; Durability::LEN], /// The dependency graph tracks which runtimes are blocked on one /// another, waiting for queries to terminate. dependency_graph: Mutex, /// Data for instances table: Table, } #[derive(Copy, Clone, Debug)] pub(super) enum WaitResult { Completed, Panicked, } #[derive(Debug)] pub(crate) enum BlockResult<'me> { /// The query is running on another thread. Running(Running<'me>), /// Blocking resulted in a cycle. /// /// The lock is hold by the current thread or there's another thread that is waiting on the current thread, /// and blocking this thread on the other thread would result in a deadlock/cycle. Cycle { same_thread: bool }, } pub struct Running<'me>(Box>); struct BlockedOnInner<'me> { dg: crate::sync::MutexGuard<'me, DependencyGraph>, query_mutex_guard: SyncGuard<'me>, database_key: DatabaseKeyIndex, other_id: ThreadId, thread_id: ThreadId, } impl Running<'_> { pub(crate) fn database_key(&self) -> DatabaseKeyIndex { self.0.database_key } /// Blocks on the other thread to complete the computation. pub(crate) fn block_on(self, zalsa: &Zalsa) { let BlockedOnInner { dg, query_mutex_guard, database_key, other_id, thread_id, } = *self.0; zalsa.event(&|| { Event::new(EventKind::WillBlockOn { other_thread_id: other_id, database_key, }) }); tracing::debug!( "block_on: thread {thread_id:?} is blocking on {database_key:?} in thread {other_id:?}", ); let result = DependencyGraph::block_on(dg, thread_id, database_key, other_id, query_mutex_guard); match result { WaitResult::Panicked => { // If the other thread panicked, then we consider this thread // cancelled. The assumption is that the panic will be detected // by the other thread and responded to appropriately. Cancelled::PropagatedPanic.throw() } WaitResult::Completed => {} } } } impl std::fmt::Debug for Running<'_> { fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fmt.debug_struct("Running") .field("database_key", &self.0.database_key) .field("other_id", &self.0.other_id) .field("thread_id", &self.0.thread_id) .finish() } } #[derive(Copy, Clone, Debug)] pub struct Stamp { pub durability: Durability, pub changed_at: Revision, } pub fn stamp(revision: Revision, durability: Durability) -> Stamp { Stamp { durability, changed_at: revision, } } impl Default for Runtime { fn default() -> Self { Runtime { revisions: [Revision::start(); Durability::LEN], revision_canceled: Default::default(), dependency_graph: Default::default(), table: Default::default(), } } } impl std::fmt::Debug for Runtime { fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fmt.debug_struct("Runtime") .field("revisions", &self.revisions) .field("revision_canceled", &self.revision_canceled) .field("dependency_graph", &self.dependency_graph) .finish() } } impl Runtime { #[inline] pub(crate) fn current_revision(&self) -> Revision { self.revisions[0] } /// Reports that an input with durability `durability` changed. /// This will update the 'last changed at' values for every durability /// less than or equal to `durability` to the current revision. pub(crate) fn report_tracked_write(&mut self, durability: Durability) { let new_revision = self.current_revision(); self.revisions[1..=durability.index()].fill(new_revision); } /// The revision in which values with durability `d` may have last /// changed. For D0, this is just the current revision. But for /// higher levels of durability, this value may lag behind the /// current revision. If we encounter a value of durability Di, /// then, we can check this function to get a "bound" on when the /// value may have changed, which allows us to skip walking its /// dependencies. #[inline] pub(crate) fn last_changed_revision(&self, d: Durability) -> Revision { self.revisions[d.index()] } pub(crate) fn load_cancellation_flag(&self) -> bool { self.revision_canceled.load(Ordering::Acquire) } pub(crate) fn set_cancellation_flag(&self) { tracing::trace!("set_cancellation_flag"); self.revision_canceled.store(true, Ordering::Release); } pub(crate) fn reset_cancellation_flag(&mut self) { *self.revision_canceled.get_mut() = false; } /// Returns the [`Table`] used to store the value of salsa structs #[inline] pub(crate) fn table(&self) -> &Table { &self.table } pub(crate) fn table_mut(&mut self) -> &mut Table { &mut self.table } /// Increments the "current revision" counter and clears /// the cancellation flag. /// /// This should only be done by the storage when the state is "quiescent". pub(crate) fn new_revision(&mut self) -> Revision { let r_old = self.current_revision(); let r_new = r_old.next(); self.revisions[0] = r_new; tracing::debug!("new_revision: {r_old:?} -> {r_new:?}"); r_new } /// Block until `other_id` completes executing `database_key`, or return `BlockResult::Cycle` /// immediately in case of a cycle. /// /// `query_mutex_guard` is the guard for the current query's state; /// it will be dropped after we have successfully registered the /// dependency. /// /// # Propagating panics /// /// If the thread `other_id` panics, then our thread is considered /// cancelled, so this function will panic with a `Cancelled` value. pub(crate) fn block<'a>( &'a self, database_key: DatabaseKeyIndex, other_id: ThreadId, query_mutex_guard: SyncGuard<'a>, ) -> BlockResult<'a> { let thread_id = thread::current().id(); // Cycle in the same thread. if thread_id == other_id { return BlockResult::Cycle { same_thread: true }; } let dg = self.dependency_graph.lock(); if dg.depends_on(other_id, thread_id) { tracing::debug!("block_on: cycle detected for {database_key:?} in thread {thread_id:?} on {other_id:?}"); return BlockResult::Cycle { same_thread: false }; } BlockResult::Running(Running(Box::new(BlockedOnInner { dg, query_mutex_guard, database_key, other_id, thread_id, }))) } /// Invoked when this runtime completed computing `database_key` with /// the given result `wait_result` (`wait_result` should be `None` if /// computing `database_key` panicked and could not complete). /// This function unblocks any dependent queries and allows them /// to continue executing. pub(crate) fn unblock_queries_blocked_on( &self, database_key: DatabaseKeyIndex, wait_result: WaitResult, ) { self.dependency_graph .lock() .unblock_runtimes_blocked_on(database_key, wait_result); } } salsa-0.23.0/src/salsa_struct.rs000064400000000000000000000052231046102023000146570ustar 00000000000000use std::any::TypeId; use crate::memo_ingredient_indices::{IngredientIndices, MemoIngredientMap}; use crate::zalsa::Zalsa; use crate::Id; pub trait SalsaStructInDb: Sized { type MemoIngredientMap: MemoIngredientMap; /// Lookup or create ingredient indices. /// /// Note that this method does *not* create the ingredients themselves, this is handled by /// [`crate::zalsa::JarEntry::get_or_create`]. This method only creates /// or looks up the indices corresponding to the ingredients. /// /// While implementors of this trait may call [`crate::zalsa::JarEntry::get_or_create`] /// to create the ingredient, they aren't required to. For example, supertypes recursively /// call [`crate::zalsa::JarEntry::get_or_create`] for their variants and combine them. fn lookup_or_create_ingredient_index(zalsa: &Zalsa) -> IngredientIndices; /// Plumbing to support nested salsa supertypes. /// /// In the example below, there are two supertypes: `InnerEnum` and `OuterEnum`, /// where the former is a supertype of `Input` and `Interned1` and the latter /// is a supertype of `InnerEnum` and `Interned2`. /// /// ```ignore /// #[salsa::input] /// struct Input {} /// /// #[salsa::interned] /// struct Interned1 {} /// /// #[salsa::interned] /// struct Interned2 {} /// /// #[derive(Debug, salsa::Enum)] /// enum InnerEnum { /// Input(Input), /// Interned1(Interned1), /// } /// /// #[derive(Debug, salsa::Enum)] /// enum OuterEnum { /// InnerEnum(InnerEnum), /// Interned2(Interned2), /// } /// ``` /// /// Imagine `OuterEnum` got a [`salsa::Id`][Id] and it wants to know which variant it belongs to. /// /// `OuterEnum` cannot ask each variant "what is your ingredient index?" and compare because `InnerEnum` /// has *multiple*, possible ingredient indices. Alternatively, `OuterEnum` could ask eaach variant /// "is this value yours?" and then invoke [`FromId`][crate::id::FromId] with the correct variant, /// but this duplicates work: now, `InnerEnum` will have to repeat this check-and-cast for *its* /// variants. /// /// Instead, the implementor keeps track of the [`std::any::TypeId`] of the ID struct, and ask each /// variant to "cast" to it. If it succeeds, `cast` returns that value; if not, we /// go to the next variant. /// /// Why `TypeId` and not `IngredientIndex`? Because it's cheaper and easier: the `TypeId` is readily /// available at compile time, while the `IngredientIndex` requires a runtime lookup. fn cast(id: Id, type_id: TypeId) -> Option; } salsa-0.23.0/src/storage.rs000064400000000000000000000152221046102023000136140ustar 00000000000000//! Public API facades for the implementation details of [`Zalsa`] and [`ZalsaLocal`]. use std::marker::PhantomData; use std::panic::RefUnwindSafe; use crate::sync::{Arc, Condvar, Mutex}; use crate::zalsa::{Zalsa, ZalsaDatabase}; use crate::zalsa_local::{self, ZalsaLocal}; use crate::{Database, Event, EventKind}; /// A handle to non-local database state. pub struct StorageHandle { // Note: Drop order is important, zalsa_impl needs to drop before coordinate /// Reference to the database. zalsa_impl: Arc, // Note: Drop order is important, coordinate needs to drop after zalsa_impl /// Coordination data for cancellation of other handles when `zalsa_mut` is called. /// This could be stored in Zalsa but it makes things marginally cleaner to keep it separate. coordinate: CoordinateDrop, /// We store references to `Db` phantom: PhantomData Db>, } impl Clone for StorageHandle { fn clone(&self) -> Self { *self.coordinate.clones.lock() += 1; Self { zalsa_impl: self.zalsa_impl.clone(), coordinate: CoordinateDrop(Arc::clone(&self.coordinate)), phantom: PhantomData, } } } impl Default for StorageHandle { fn default() -> Self { Self::new(None) } } impl StorageHandle { pub fn new(event_callback: Option>) -> Self { Self { zalsa_impl: Arc::new(Zalsa::new::(event_callback)), coordinate: CoordinateDrop(Arc::new(Coordinate { clones: Mutex::new(1), cvar: Default::default(), })), phantom: PhantomData, } } pub fn into_storage(self) -> Storage { Storage { handle: self, zalsa_local: ZalsaLocal::new(), } } } /// Access the "storage" of a Salsa database: this is an internal plumbing trait /// automatically implemented by `#[salsa::db]` applied to a struct. /// /// # Safety /// /// The `storage` and `storage_mut` fields must both return a reference to the same /// storage field which must be owned by `self`. pub unsafe trait HasStorage: Database + Clone + Sized { fn storage(&self) -> &Storage; fn storage_mut(&mut self) -> &mut Storage; } /// Concrete implementation of the [`Database`] trait with local state that can be used to drive computations. pub struct Storage { handle: StorageHandle, /// Per-thread state zalsa_local: zalsa_local::ZalsaLocal, } impl Drop for Storage { fn drop(&mut self) { self.zalsa_local .record_unfilled_pages(self.handle.zalsa_impl.table()); } } struct Coordinate { /// Counter of the number of clones of actor. Begins at 1. /// Incremented when cloned, decremented when dropped. clones: Mutex, cvar: Condvar, } // We cannot panic while holding a lock to `clones: Mutex` and therefore we cannot enter an // inconsistent state. impl RefUnwindSafe for Coordinate {} impl Default for Storage { fn default() -> Self { Self::new(None) } } impl Storage { /// Create a new database storage. /// /// The `event_callback` function is invoked by the salsa runtime at various points during execution. pub fn new(event_callback: Option>) -> Self { Self { handle: StorageHandle::new(event_callback), zalsa_local: ZalsaLocal::new(), } } /// Convert this instance of [`Storage`] into a [`StorageHandle`]. /// /// This will discard the local state of this [`Storage`], thereby returning a value that /// is both [`Sync`] and [`std::panic::UnwindSafe`]. pub fn into_zalsa_handle(mut self) -> StorageHandle { self.zalsa_local .record_unfilled_pages(self.handle.zalsa_impl.table()); let Self { handle, zalsa_local: _, } = &self; // Avoid rust's annoying destructure prevention rules for `Drop` types // SAFETY: We forget `Self` afterwards to discard the original copy, and the destructure // above makes sure we won't forget to take into account newly added fields. let handle = unsafe { std::ptr::read(handle) }; std::mem::forget::(self); handle } // ANCHOR: cancel_other_workers /// Sets cancellation flag and blocks until all other workers with access /// to this storage have completed. /// /// This could deadlock if there is a single worker with two handles to the /// same database! /// /// Needs to be paired with a call to `reset_cancellation_flag`. fn cancel_others(&mut self) -> &mut Zalsa { debug_assert!( self.zalsa_local .try_with_query_stack(|stack| stack.is_empty()) == Some(true), "attempted to cancel within query computation, this is a deadlock" ); self.handle.zalsa_impl.runtime().set_cancellation_flag(); self.handle .zalsa_impl .event(&|| Event::new(EventKind::DidSetCancellationFlag)); let mut clones = self.handle.coordinate.clones.lock(); while *clones != 1 { clones = self.handle.coordinate.cvar.wait(clones); } // The ref count on the `Arc` should now be 1 let zalsa = Arc::get_mut(&mut self.handle.zalsa_impl).unwrap(); // cancellation is done, so reset the flag zalsa.runtime_mut().reset_cancellation_flag(); zalsa } // ANCHOR_END: cancel_other_workers } #[allow(clippy::undocumented_unsafe_blocks)] // TODO(#697) document safety unsafe impl ZalsaDatabase for T { #[inline(always)] fn zalsa(&self) -> &Zalsa { &self.storage().handle.zalsa_impl } fn zalsa_mut(&mut self) -> &mut Zalsa { self.storage_mut().cancel_others() } #[inline(always)] fn zalsa_local(&self) -> &ZalsaLocal { &self.storage().zalsa_local } #[inline(always)] fn fork_db(&self) -> Box { Box::new(self.clone()) } } impl Clone for Storage { fn clone(&self) -> Self { Self { handle: self.handle.clone(), zalsa_local: ZalsaLocal::new(), } } } struct CoordinateDrop(Arc); impl std::ops::Deref for CoordinateDrop { type Target = Arc; fn deref(&self) -> &Self::Target { &self.0 } } impl Drop for CoordinateDrop { fn drop(&mut self) { *self.0.clones.lock() -= 1; self.0.cvar.notify_all(); } } salsa-0.23.0/src/sync.rs000064400000000000000000000156431046102023000131330ustar 00000000000000pub use shim::*; #[cfg(feature = "shuttle")] pub mod shim { pub use shuttle::sync::*; pub use shuttle::{thread, thread_local}; pub mod papaya { use std::hash::{BuildHasher, Hash}; use std::marker::PhantomData; pub struct HashMap(super::Mutex>); impl Default for HashMap { fn default() -> Self { Self(super::Mutex::default()) } } pub struct LocalGuard<'a>(PhantomData<&'a ()>); impl HashMap where K: Eq + Hash, V: Clone, S: BuildHasher, { pub fn guard(&self) -> LocalGuard<'_> { LocalGuard(PhantomData) } pub fn get(&self, key: &K, _guard: &LocalGuard<'_>) -> Option { self.0.lock().get(key).cloned() } pub fn insert(&self, key: K, value: V, _guard: &LocalGuard<'_>) { self.0.lock().insert(key, value); } } } /// A wrapper around shuttle's `Mutex` to mirror parking-lot's API. #[derive(Default, Debug)] pub struct Mutex(shuttle::sync::Mutex); impl Mutex { pub const fn new(value: T) -> Mutex { Mutex(shuttle::sync::Mutex::new(value)) } pub fn lock(&self) -> MutexGuard<'_, T> { self.0.lock().unwrap() } pub fn get_mut(&mut self) -> &mut T { self.0.get_mut().unwrap() } } /// A wrapper around shuttle's `RwLock` to mirror parking-lot's API. #[derive(Default, Debug)] pub struct RwLock(shuttle::sync::RwLock); impl RwLock { pub fn read(&self) -> RwLockReadGuard<'_, T> { self.0.read().unwrap() } pub fn write(&self) -> RwLockWriteGuard<'_, T> { self.0.write().unwrap() } pub fn get_mut(&mut self) -> &mut T { self.0.get_mut().unwrap() } } /// A wrapper around shuttle's `Condvar` to mirror parking-lot's API. #[derive(Default, Debug)] pub struct Condvar(shuttle::sync::Condvar); impl Condvar { // We cannot match parking-lot identically because shuttle's version takes ownership of the `MutexGuard`. pub fn wait<'a, T>(&self, guard: MutexGuard<'a, T>) -> MutexGuard<'a, T> { self.0.wait(guard).unwrap() } pub fn notify_one(&self) { self.0.notify_one(); } pub fn notify_all(&self) { self.0.notify_all(); } } use std::cell::UnsafeCell; use std::mem::MaybeUninit; /// A polyfill for `std::sync::OnceLock`. pub struct OnceLock(Mutex, UnsafeCell>); impl Default for OnceLock { fn default() -> Self { OnceLock::new() } } impl OnceLock { pub const fn new() -> OnceLock { OnceLock(Mutex::new(false), UnsafeCell::new(MaybeUninit::uninit())) } pub fn get(&self) -> Option<&T> { let initialized = self.0.lock(); if *initialized { // SAFETY: The value is initialized and write-once. Some(unsafe { (*self.1.get()).assume_init_ref() }) } else { None } } pub fn get_or_init(&self, f: F) -> &T where F: FnOnce() -> T, { let _ = self.set_with(f); self.get().unwrap() } pub fn set(&self, value: T) -> Result<(), T> { self.set_with(|| value).map_err(|f| f()) } fn set_with(&self, f: F) -> Result<(), F> where F: FnOnce() -> T, { let mut initialized = self.0.lock(); if *initialized { return Err(f); } // SAFETY: We hold the lock. unsafe { self.1.get().write(MaybeUninit::new(f())) } *initialized = true; Ok(()) } } impl From for OnceLock { fn from(value: T) -> OnceLock { OnceLock(Mutex::new(true), UnsafeCell::new(MaybeUninit::new(value))) } } // SAFETY: Mirroring `std::sync::OnceLock`. unsafe impl Send for OnceLock {} // SAFETY: Mirroring `std::sync::OnceLock`. unsafe impl Sync for OnceLock {} } #[cfg(not(feature = "shuttle"))] pub mod shim { pub use parking_lot::{Mutex, MutexGuard, RwLock}; pub use std::sync::*; pub use std::{thread, thread_local}; pub mod atomic { pub use portable_atomic::AtomicU64; pub use std::sync::atomic::*; } pub mod papaya { use std::hash::{BuildHasher, Hash}; pub use papaya::LocalGuard; pub struct HashMap(papaya::HashMap); impl Default for HashMap { fn default() -> Self { Self( papaya::HashMap::builder() .capacity(256) // A relatively large capacity to hopefully avoid resizing. .resize_mode(papaya::ResizeMode::Blocking) .hasher(S::default()) .build(), ) } } impl HashMap where K: Eq + Hash, V: Clone, S: BuildHasher, { #[inline] pub fn guard(&self) -> LocalGuard<'_> { self.0.guard() } #[inline] pub fn get(&self, key: &K, guard: &LocalGuard<'_>) -> Option { self.0.get(key, guard).cloned() } #[inline] pub fn insert(&self, key: K, value: V, guard: &LocalGuard<'_>) { self.0.insert(key, value, guard); } } } /// A wrapper around parking-lot's `Condvar` to mirror shuttle's API. pub struct Condvar(parking_lot::Condvar); // this is not derived because it confuses rust-analyzer ... https://github.com/rust-lang/rust-analyzer/issues/19755 #[allow(clippy::derivable_impls)] impl Default for Condvar { fn default() -> Self { Self(Default::default()) } } // this is not derived because it confuses rust-analyzer ... https://github.com/rust-lang/rust-analyzer/issues/19755 impl std::fmt::Debug for Condvar { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_tuple("Condvar").field(&self.0).finish() } } impl Condvar { pub fn wait<'a, T>(&self, mut guard: MutexGuard<'a, T>) -> MutexGuard<'a, T> { self.0.wait(&mut guard); guard } pub fn notify_one(&self) { self.0.notify_one(); } pub fn notify_all(&self) { self.0.notify_all(); } } } salsa-0.23.0/src/table/memo.rs000064400000000000000000000324501046102023000141760ustar 00000000000000use std::any::{Any, TypeId}; use std::fmt::Debug; use std::mem; use std::ptr::{self, NonNull}; use portable_atomic::hint::spin_loop; use thin_vec::ThinVec; use crate::sync::atomic::{AtomicPtr, Ordering}; use crate::sync::{OnceLock, RwLock}; use crate::{zalsa::MemoIngredientIndex, zalsa_local::QueryOriginRef}; /// The "memo table" stores the memoized results of tracked function calls. /// Every tracked function must take a salsa struct as its first argument /// and memo tables are attached to those salsa structs as auxiliary data. #[derive(Default)] pub(crate) struct MemoTable { memos: RwLock>, } pub trait Memo: Any + Send + Sync { /// Returns the `origin` of this memo fn origin(&self) -> QueryOriginRef<'_>; /// Returns memory usage information about the memoized value. #[cfg(feature = "salsa_unstable")] fn memory_usage(&self) -> crate::database::MemoInfo; } /// Data for a memoized entry. /// This is a type-erased `Box`, where `M` is the type of memo associated /// with that particular ingredient index. /// /// # Implementation note /// /// Every entry is associated with some ingredient that has been added to the database. /// That ingredient has a fixed type of values that it produces etc. /// Therefore, once a given entry goes from `Empty` to `Full`, /// the type-id associated with that entry should never change. /// /// We take advantage of this and use an `AtomicPtr` to store the actual memo. /// This allows us to store into the memo-entry without acquiring a write-lock. /// However, using `AtomicPtr` means we cannot use a `Box` or any other wide pointer. /// Therefore, we hide the type by transmuting to `DummyMemo`; but we must then be very careful /// when freeing `MemoEntryData` values to transmute things back. See the `Drop` impl for /// [`MemoEntry`][] for details. #[derive(Default)] struct MemoEntry { /// An [`AtomicPtr`][] to a `Box` for the erased memo type `M` atomic_memo: AtomicPtr, } #[derive(Default)] pub struct MemoEntryType { data: OnceLock, } #[derive(Clone, Copy, Debug)] struct MemoEntryTypeData { /// The `type_id` of the erased memo type `M` type_id: TypeId, /// A type-coercion function for the erased memo type `M` to_dyn_fn: fn(NonNull) -> NonNull, } impl MemoEntryType { fn to_dummy(memo: NonNull) -> NonNull { memo.cast() } unsafe fn from_dummy(memo: NonNull) -> NonNull { memo.cast() } const fn to_dyn_fn() -> fn(NonNull) -> NonNull { let f: fn(NonNull) -> NonNull = |x| x; // SAFETY: `M: Sized` and `DummyMemo: Sized`, as such they are ABI compatible behind a // `NonNull` making it safe to do type erasure. unsafe { mem::transmute::< fn(NonNull) -> NonNull, fn(NonNull) -> NonNull, >(f) } } #[inline] pub fn of() -> Self { Self { data: OnceLock::from(MemoEntryTypeData { type_id: TypeId::of::(), to_dyn_fn: Self::to_dyn_fn::(), }), } } #[inline] fn load(&self) -> Option<&MemoEntryTypeData> { self.data.get() } } /// Dummy placeholder type that we use when erasing the memo type `M` in [`MemoEntryData`][]. #[derive(Debug)] struct DummyMemo; impl Memo for DummyMemo { fn origin(&self) -> QueryOriginRef<'_> { unreachable!("should not get here") } #[cfg(feature = "salsa_unstable")] fn memory_usage(&self) -> crate::database::MemoInfo { crate::database::MemoInfo { debug_name: "dummy", output: crate::database::SlotInfo { debug_name: "dummy", size_of_metadata: 0, size_of_fields: 0, memos: Vec::new(), }, } } } #[derive(Default)] pub struct MemoTableTypes { types: boxcar::Vec, } impl MemoTableTypes { pub(crate) fn set( &self, memo_ingredient_index: MemoIngredientIndex, memo_type: &MemoEntryType, ) { let memo_ingredient_index = memo_ingredient_index.as_usize(); // Try to create our entry if it has not already been created. if memo_ingredient_index >= self.types.count() { while self.types.push(MemoEntryType::default()) < memo_ingredient_index {} } loop { let Some(memo_entry_type) = self.types.get(memo_ingredient_index) else { // It's possible that someone else began pushing to our index but has not // completed the entry's initialization yet, as `boxcar` is lock-free. This // is extremely unlikely given initialization is just a handful of instructions. // Additionally, this function is generally only called on startup, so we can // just spin here. spin_loop(); continue; }; memo_entry_type .data .set( *memo_type.data.get().expect( "cannot provide an empty `MemoEntryType` for `MemoEntryType::set()`", ), ) .expect("memo type should only be set once"); break; } } /// # Safety /// /// The types table must be the correct one of `memos`. #[inline] pub(crate) unsafe fn attach_memos<'a>( &'a self, memos: &'a MemoTable, ) -> MemoTableWithTypes<'a> { MemoTableWithTypes { types: self, memos } } /// # Safety /// /// The types table must be the correct one of `memos`. #[inline] pub(crate) unsafe fn attach_memos_mut<'a>( &'a self, memos: &'a mut MemoTable, ) -> MemoTableWithTypesMut<'a> { MemoTableWithTypesMut { types: self, memos } } } pub(crate) struct MemoTableWithTypes<'a> { types: &'a MemoTableTypes, memos: &'a MemoTable, } impl MemoTableWithTypes<'_> { pub(crate) fn insert( self, memo_ingredient_index: MemoIngredientIndex, memo: NonNull, ) -> Option> { // The type must already exist, we insert it when creating the memo ingredient. assert_eq!( self.types .types .get(memo_ingredient_index.as_usize()) .and_then(MemoEntryType::load)? .type_id, TypeId::of::(), "inconsistent type-id for `{memo_ingredient_index:?}`" ); // If the memo slot is already occupied, it must already have the // right type info etc, and we only need the read-lock. if let Some(MemoEntry { atomic_memo }) = self .memos .memos .read() .get(memo_ingredient_index.as_usize()) { let old_memo = atomic_memo.swap(MemoEntryType::to_dummy(memo).as_ptr(), Ordering::AcqRel); let old_memo = NonNull::new(old_memo); // SAFETY: `type_id` check asserted above return old_memo.map(|old_memo| unsafe { MemoEntryType::from_dummy(old_memo) }); } // Otherwise we need the write lock. self.insert_cold(memo_ingredient_index, memo) } #[cold] fn insert_cold( self, memo_ingredient_index: MemoIngredientIndex, memo: NonNull, ) -> Option> { let memo_ingredient_index = memo_ingredient_index.as_usize(); let mut memos = self.memos.memos.write(); // Grow the table if needed. if memos.len() <= memo_ingredient_index { let additional_len = memo_ingredient_index - memos.len() + 1; memos.reserve(additional_len); while memos.len() <= memo_ingredient_index { memos.push(MemoEntry::default()); } } let old_entry = mem::replace( memos[memo_ingredient_index].atomic_memo.get_mut(), MemoEntryType::to_dummy(memo).as_ptr(), ); // SAFETY: The `TypeId` is asserted in `insert()`. NonNull::new(old_entry).map(|memo| unsafe { MemoEntryType::from_dummy(memo) }) } #[inline] pub(crate) fn get( self, memo_ingredient_index: MemoIngredientIndex, ) -> Option> { let read = self.memos.memos.read(); let memo = read.get(memo_ingredient_index.as_usize())?; let type_ = self .types .types .get(memo_ingredient_index.as_usize()) .and_then(MemoEntryType::load)?; assert_eq!( type_.type_id, TypeId::of::(), "inconsistent type-id for `{memo_ingredient_index:?}`" ); let memo = NonNull::new(memo.atomic_memo.load(Ordering::Acquire))?; // SAFETY: `type_id` check asserted above Some(unsafe { MemoEntryType::from_dummy(memo) }) } #[cfg(feature = "salsa_unstable")] pub(crate) fn memory_usage(&self) -> Vec { let mut memory_usage = Vec::new(); let memos = self.memos.memos.read(); for (index, memo) in memos.iter().enumerate() { let Some(memo) = NonNull::new(memo.atomic_memo.load(Ordering::Acquire)) else { continue; }; let Some(type_) = self.types.types.get(index).and_then(MemoEntryType::load) else { continue; }; // SAFETY: The `TypeId` is asserted in `insert()`. let dyn_memo: &dyn Memo = unsafe { (type_.to_dyn_fn)(memo).as_ref() }; memory_usage.push(dyn_memo.memory_usage()); } memory_usage } } pub(crate) struct MemoTableWithTypesMut<'a> { types: &'a MemoTableTypes, memos: &'a mut MemoTable, } impl MemoTableWithTypesMut<'_> { /// Calls `f` on the memo at `memo_ingredient_index`. /// /// If the memo is not present, `f` is not called. pub(crate) fn map_memo( self, memo_ingredient_index: MemoIngredientIndex, f: impl FnOnce(&mut M), ) { let Some(type_) = self .types .types .get(memo_ingredient_index.as_usize()) .and_then(MemoEntryType::load) else { return; }; assert_eq!( type_.type_id, TypeId::of::(), "inconsistent type-id for `{memo_ingredient_index:?}`" ); // If the memo slot is already occupied, it must already have the // right type info etc, and we only need the read-lock. let memos = self.memos.memos.get_mut(); let Some(MemoEntry { atomic_memo }) = memos.get_mut(memo_ingredient_index.as_usize()) else { return; }; let Some(memo) = NonNull::new(*atomic_memo.get_mut()) else { return; }; // SAFETY: `type_id` check asserted above f(unsafe { MemoEntryType::from_dummy(memo).as_mut() }); } /// To drop an entry, we need its type, so we don't implement `Drop`, and instead have this method. /// /// Note that calling this multiple times is safe, dropping an uninitialized entry is a no-op. /// /// # Safety /// /// The caller needs to make sure to not call this function until no more references into /// the database exist as there may be outstanding borrows into the pointer contents. #[inline] pub unsafe fn drop(&mut self) { let types = self.types.types.iter(); for ((_, type_), memo) in std::iter::zip(types, self.memos.memos.get_mut()) { // SAFETY: The types match as per our constructor invariant. unsafe { memo.take(type_) }; } } /// # Safety /// /// The caller needs to make sure to not call this function until no more references into /// the database exist as there may be outstanding borrows into the pointer contents. pub(crate) unsafe fn take_memos( &mut self, mut f: impl FnMut(MemoIngredientIndex, Box), ) { let memos = self.memos.memos.get_mut(); memos .iter_mut() .zip(self.types.types.iter()) .enumerate() .filter_map(|(index, (memo, (_, type_)))| { // SAFETY: The types match as per our constructor invariant. let memo = unsafe { memo.take(type_)? }; Some((MemoIngredientIndex::from_usize(index), memo)) }) .for_each(|(index, memo)| f(index, memo)); } } impl MemoEntry { /// # Safety /// /// The type must match. #[inline] unsafe fn take(&mut self, type_: &MemoEntryType) -> Option> { let memo = mem::replace(self.atomic_memo.get_mut(), ptr::null_mut()); let memo = NonNull::new(memo)?; let type_ = type_.load()?; // SAFETY: Our preconditions. Some(unsafe { Box::from_raw((type_.to_dyn_fn)(memo).as_ptr()) }) } } impl Drop for DummyMemo { fn drop(&mut self) { unreachable!("should never get here") } } impl std::fmt::Debug for MemoTable { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("MemoTable").finish_non_exhaustive() } } salsa-0.23.0/src/table.rs000064400000000000000000000343311046102023000132410ustar 00000000000000use std::alloc::Layout; use std::any::{Any, TypeId}; use std::cell::UnsafeCell; use std::marker::PhantomData; use std::mem::{self, MaybeUninit}; use std::ptr::{self, NonNull}; use std::slice; use memo::MemoTable; use rustc_hash::FxHashMap; use crate::sync::atomic::{AtomicUsize, Ordering}; use crate::sync::{Arc, Mutex}; use crate::table::memo::{MemoTableTypes, MemoTableWithTypes, MemoTableWithTypesMut}; use crate::{Id, IngredientIndex, Revision}; pub(crate) mod memo; const PAGE_LEN_BITS: usize = 10; const PAGE_LEN_MASK: usize = PAGE_LEN - 1; const PAGE_LEN: usize = 1 << PAGE_LEN_BITS; const MAX_PAGES: usize = 1 << (u32::BITS as usize - PAGE_LEN_BITS); /// A typed [`Page`] view. pub(crate) struct PageView<'p, T: Slot>(&'p Page, PhantomData<&'p T>); pub struct Table { pages: boxcar::Vec, /// Map from ingredient to non-full pages that are up for grabs non_full_pages: Mutex>>, } /// # Safety /// /// Implementors of this trait need to make sure that their type is unique with respect to /// their owning ingredient as the allocation strategy relies on this. pub(crate) unsafe trait Slot: Any + Send + Sync { /// Access the [`MemoTable`][] for this slot. /// /// # Safety condition /// /// The current revision MUST be the current revision of the database containing this slot. unsafe fn memos(&self, current_revision: Revision) -> &MemoTable; /// Mutably access the [`MemoTable`] for this slot. fn memos_mut(&mut self) -> &mut MemoTable; } /// [Slot::memos] type SlotMemosFnRaw = unsafe fn(*const (), current_revision: Revision) -> *const MemoTable; /// [Slot::memos] type SlotMemosFn = unsafe fn(&T, current_revision: Revision) -> &MemoTable; /// [Slot::memos_mut] type SlotMemosMutFnRaw = unsafe fn(*mut ()) -> *mut MemoTable; /// [Slot::memos_mut] type SlotMemosMutFn = fn(&mut T) -> &mut MemoTable; struct SlotVTable { layout: Layout, /// [`Slot`] methods memos: SlotMemosFnRaw, memos_mut: SlotMemosMutFnRaw, /// A drop impl to call when the own page drops /// SAFETY: The caller is required to supply a correct data pointer to a `Box>` and initialized length, /// and correct memo types. drop_impl: unsafe fn(data: *mut (), initialized: usize, memo_types: &MemoTableTypes), } impl SlotVTable { const fn of() -> &'static Self { const { &Self { drop_impl: |data, initialized, memo_types| // SAFETY: The caller is required to supply a correct data pointer and initialized length unsafe { let data = Box::from_raw(data.cast::>()); for i in 0..initialized { let item = data[i].get().cast::(); memo_types.attach_memos_mut((*item).memos_mut()).drop(); ptr::drop_in_place(item); } }, layout: Layout::new::(), // SAFETY: The signatures are compatible memos: unsafe { mem::transmute::, SlotMemosFnRaw>(T::memos) }, // SAFETY: The signatures are compatible memos_mut: unsafe { mem::transmute::, SlotMemosMutFnRaw>(T::memos_mut) }, } } } } type PageDataEntry = UnsafeCell>; type PageData = [PageDataEntry; PAGE_LEN]; struct Page { /// The ingredient for elements on this page. ingredient: IngredientIndex, /// Number of elements of `data` that are initialized. allocated: AtomicUsize, /// The "allocation lock" is held when we allocate a new entry. /// /// It ensures that we can load the index, initialize it, and then update the length atomically /// with respect to other allocations. /// /// We could avoid it if we wanted, we'd just have to be a bit fancier in our reasoning /// (for example, the bounds check in `Page::get` no longer suffices to truly guarantee /// that the data is initialized). allocation_lock: Mutex<()>, /// The potentially uninitialized data of this page. As we initialize new entries, we increment `allocated`. /// This is a box allocated `PageData` data: NonNull<()>, /// A vtable for the slot type stored in this page. slot_vtable: &'static SlotVTable, /// The type id of what is stored as entries in data. // FIXME: Move this into SlotVTable once const stable slot_type_id: TypeId, /// The type name of what is stored as entries in data. // FIXME: Move this into SlotVTable once const stable slot_type_name: &'static str, memo_types: Arc, } // SAFETY: `Page` is `Send` as we make sure to only ever store `Slot` types in it which // requires `Send`.` unsafe impl Send for Page /* where for M: Send */ {} // SAFETY: `Page` is `Sync` as we make sure to only ever store `Slot` types in it which // requires `Sync`.` unsafe impl Sync for Page /* where for M: Sync */ {} #[derive(Copy, Clone, Debug)] pub struct PageIndex(usize); impl PageIndex { #[inline] fn new(idx: usize) -> Self { debug_assert!(idx < MAX_PAGES); Self(idx) } } #[derive(Copy, Clone, Debug)] struct SlotIndex(usize); impl SlotIndex { #[inline] fn new(idx: usize) -> Self { debug_assert!(idx < PAGE_LEN); Self(idx) } } impl Default for Table { fn default() -> Self { Self { pages: boxcar::Vec::new(), non_full_pages: Default::default(), } } } impl Table { /// Returns the [`IngredientIndex`] for an [`Id`]. #[inline] pub fn ingredient_index(&self, id: Id) -> IngredientIndex { let (page_idx, _) = split_id(id); self.pages[page_idx.0].ingredient } /// Get a reference to the data for `id`, which must have been allocated from this table with type `T`. /// /// # Panics /// /// If `id` is out of bounds or the does not have the type `T`. pub(crate) fn get(&self, id: Id) -> &T { let (page, slot) = split_id(id); let page_ref = self.page::(page); &page_ref.data()[slot.0] } /// Get a raw pointer to the data for `id`, which must have been allocated from this table. /// /// # Panics /// /// If `id` is out of bounds or the does not have the type `T`. /// /// # Safety /// /// See [`Page::get_raw`][]. pub(crate) fn get_raw(&self, id: Id) -> *mut T { let (page, slot) = split_id(id); let page_ref = self.page::(page); page_ref.page_data()[slot.0].get().cast::() } /// Gets a reference to the page which has slots of type `T` /// /// # Panics /// /// If `page` is out of bounds or the type `T` is incorrect. #[inline] pub(crate) fn page(&self, page: PageIndex) -> PageView<'_, T> { self.pages[page.0].assert_type::() } /// Allocate a new page for the given ingredient and with slots of type `T` #[inline] pub(crate) fn push_page( &self, ingredient: IngredientIndex, memo_types: Arc, ) -> PageIndex { PageIndex::new(self.pages.push(Page::new::(ingredient, memo_types))) } /// Get the memo table associated with `id` /// /// # Safety condition /// /// The parameter `current_revision` MUST be the current revision /// of the owner of database owning this table. pub(crate) unsafe fn memos( &self, id: Id, current_revision: Revision, ) -> MemoTableWithTypes<'_> { let (page, slot) = split_id(id); let page = &self.pages[page.0]; // SAFETY: We supply a proper slot pointer and the caller is required to pass the `current_revision`. let memos = unsafe { &*(page.slot_vtable.memos)(page.get(slot), current_revision) }; // SAFETY: The `Page` keeps the correct memo types. unsafe { page.memo_types.attach_memos(memos) } } /// Get the memo table associated with `id` pub(crate) fn memos_mut(&mut self, id: Id) -> MemoTableWithTypesMut<'_> { let (page, slot) = split_id(id); let page_index = page.0; let page = self .pages .get_mut(page_index) .unwrap_or_else(|| panic!("index `{page_index}` is uninitialized")); // SAFETY: We supply a proper slot pointer and the caller is required to pass the `current_revision`. let memos = unsafe { &mut *(page.slot_vtable.memos_mut)(page.get(slot)) }; // SAFETY: The `Page` keeps the correct memo types. unsafe { page.memo_types.attach_memos_mut(memos) } } #[cfg(feature = "salsa_unstable")] pub(crate) fn slots_of(&self) -> impl Iterator + '_ { self.pages .iter() .filter_map(|(_, page)| page.cast_type::()) .flat_map(|view| view.data()) } pub(crate) fn fetch_or_push_page( &self, ingredient: IngredientIndex, memo_types: impl FnOnce() -> Arc, ) -> PageIndex { if let Some(page) = self .non_full_pages .lock() .get_mut(&ingredient) .and_then(Vec::pop) { return page; } self.push_page::(ingredient, memo_types()) } pub(crate) fn record_unfilled_page(&self, ingredient: IngredientIndex, page: PageIndex) { self.non_full_pages .lock() .entry(ingredient) .or_default() .push(page); } } impl<'p, T: Slot> PageView<'p, T> { #[inline] fn page_data(&self) -> &'p [PageDataEntry] { let len = self.0.allocated.load(Ordering::Acquire); // SAFETY: `len` is the initialized length of the page unsafe { slice::from_raw_parts(self.0.data.cast::>().as_ptr(), len) } } #[inline] fn data(&self) -> &'p [T] { let len = self.0.allocated.load(Ordering::Acquire); // SAFETY: `len` is the initialized length of the page unsafe { slice::from_raw_parts(self.0.data.cast::().as_ptr(), len) } } pub(crate) fn allocate(&self, page: PageIndex, value: V) -> Result where V: FnOnce(Id) -> T, { let _guard = self.0.allocation_lock.lock(); let index = self.0.allocated.load(Ordering::Acquire); if index >= PAGE_LEN { return Err(value); } // Initialize entry `index` let id = make_id(page, SlotIndex::new(index)); let data = self.0.data.cast::>(); // SAFETY: `index` is also guaranteed to be in bounds as per the check above. let entry = unsafe { &*data.as_ptr().add(index) }; // SAFETY: We acquired the allocation lock, so we have unique access to the UnsafeCell // interior unsafe { (*entry.get()).write(value(id)) }; // Update the length (this must be done after initialization as otherwise an uninitialized // read could occur!) self.0.allocated.store(index + 1, Ordering::Release); Ok(id) } } impl Page { #[inline] fn new(ingredient: IngredientIndex, memo_types: Arc) -> Self { #[cfg(not(feature = "shuttle"))] let data: Box> = Box::new([const { UnsafeCell::new(MaybeUninit::uninit()) }; PAGE_LEN]); #[cfg(feature = "shuttle")] let data = { // Avoid stack overflows when using larger shuttle types. let data = (0..PAGE_LEN) .map(|_| UnsafeCell::new(MaybeUninit::uninit())) .collect::]>>(); let data: *mut [PageDataEntry] = Box::into_raw(data); // SAFETY: `*mut PageDataEntry` and `*mut [PageDataEntry; N]` have the same layout. unsafe { Box::from_raw(data.cast::>().cast::>()) } }; Self { slot_vtable: SlotVTable::of::(), slot_type_id: TypeId::of::(), slot_type_name: std::any::type_name::(), ingredient, allocated: Default::default(), allocation_lock: Default::default(), data: NonNull::from(Box::leak(data)).cast::<()>(), memo_types, } } /// Retrieves the pointer for the given slot. /// /// # Panics /// /// If slot is out of bounds fn get(&self, slot: SlotIndex) -> *mut () { let len = self.allocated.load(Ordering::Acquire); assert!( slot.0 < len, "out of bounds access `{slot:?}` (maximum slot `{len}`)" ); // SAFETY: We have checked that the resulting pointer will be within bounds. unsafe { self.data .as_ptr() .byte_add(slot.0 * self.slot_vtable.layout.size()) } } #[inline] fn assert_type(&self) -> PageView<'_, T> { assert_eq!( self.slot_type_id, TypeId::of::(), "page has slot type `{:?}` but `{:?}` was expected", self.slot_type_name, std::any::type_name::(), ); PageView(self, PhantomData) } #[cfg(feature = "salsa_unstable")] fn cast_type(&self) -> Option> { if self.slot_type_id == TypeId::of::() { Some(PageView(self, PhantomData)) } else { None } } } impl Drop for Page { fn drop(&mut self) { let len = *self.allocated.get_mut(); // SAFETY: We supply the data pointer and the initialized length unsafe { (self.slot_vtable.drop_impl)(self.data.as_ptr(), len, &self.memo_types) }; } } fn make_id(page: PageIndex, slot: SlotIndex) -> Id { let page = page.0 as u32; let slot = slot.0 as u32; // SAFETY: `slot` is guaranteed to be small enough that the resulting Id won't be bigger than `Id::MAX_U32` unsafe { Id::from_index((page << PAGE_LEN_BITS) | slot) } } #[inline] fn split_id(id: Id) -> (PageIndex, SlotIndex) { let index = id.index() as usize; let slot = index & PAGE_LEN_MASK; let page = index >> PAGE_LEN_BITS; (PageIndex::new(page), SlotIndex::new(slot)) } salsa-0.23.0/src/tracked_struct/tracked_field.rs000064400000000000000000000057541046102023000177620ustar 00000000000000use std::marker::PhantomData; use crate::cycle::CycleHeads; use crate::function::VerifyResult; use crate::ingredient::Ingredient; use crate::sync::Arc; use crate::table::memo::MemoTableTypes; use crate::tracked_struct::{Configuration, Value}; use crate::zalsa::IngredientIndex; use crate::{Database, Id}; /// Created for each tracked struct. /// /// This ingredient only stores the "id" fields. /// It is a kind of "dressed up" interner; /// the active query + values of id fields are hashed to create the tracked struct id. /// The value fields are stored in [`crate::function::IngredientImpl`] instances keyed by the tracked struct id. /// Unlike normal interners, tracked struct indices can be deleted and reused aggressively: /// when a tracked function re-executes, /// any tracked structs that it created before but did not create this time can be deleted. pub struct FieldIngredientImpl where C: Configuration, { /// Index of this ingredient in the database (used to construct database-ids, etc). ingredient_index: IngredientIndex, /// The index of this field on the tracked struct relative to all other tracked fields. field_index: usize, phantom: PhantomData Value>, } impl FieldIngredientImpl where C: Configuration, { pub(super) fn new(field_index: usize, ingredient_index: IngredientIndex) -> Self { Self { field_index, ingredient_index, phantom: PhantomData, } } } impl Ingredient for FieldIngredientImpl where C: Configuration, { fn location(&self) -> &'static crate::ingredient::Location { &C::LOCATION } fn ingredient_index(&self) -> IngredientIndex { self.ingredient_index } unsafe fn maybe_changed_after<'db>( &'db self, db: &'db dyn Database, input: Id, revision: crate::Revision, _cycle_heads: &mut CycleHeads, ) -> VerifyResult { let zalsa = db.zalsa(); let data = >::data(zalsa.table(), input); let field_changed_at = data.revisions[self.field_index]; VerifyResult::changed_if(field_changed_at > revision) } fn fmt_index(&self, index: crate::Id, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( fmt, "{}.{}({:?})", C::DEBUG_NAME, C::TRACKED_FIELD_NAMES[self.field_index], index ) } fn debug_name(&self) -> &'static str { C::TRACKED_FIELD_NAMES[self.field_index] } fn memo_table_types(&self) -> Arc { unreachable!("tracked field does not allocate pages") } } impl std::fmt::Debug for FieldIngredientImpl where C: Configuration, { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct(std::any::type_name::()) .field("ingredient_index", &self.ingredient_index) .field("field_index", &self.field_index) .finish() } } salsa-0.23.0/src/tracked_struct.rs000064400000000000000000001141661046102023000152000ustar 00000000000000#![allow(clippy::undocumented_unsafe_blocks)] // TODO(#697) document safety use std::any::TypeId; use std::hash::Hash; use std::marker::PhantomData; use std::ops::Index; use std::{fmt, mem}; use crossbeam_queue::SegQueue; use thin_vec::ThinVec; use tracked_field::FieldIngredientImpl; use crate::cycle::CycleHeads; use crate::function::VerifyResult; use crate::id::{AsId, FromId}; use crate::ingredient::{Ingredient, Jar}; use crate::key::DatabaseKeyIndex; use crate::plumbing::ZalsaLocal; use crate::revision::OptionalAtomicRevision; use crate::runtime::Stamp; use crate::salsa_struct::SalsaStructInDb; use crate::sync::Arc; use crate::table::memo::{MemoTable, MemoTableTypes, MemoTableWithTypesMut}; use crate::table::{Slot, Table}; use crate::zalsa::{IngredientIndex, Zalsa}; use crate::{Database, Durability, Event, EventKind, Id, Revision}; pub mod tracked_field; // ANCHOR: Configuration /// Trait that defines the key properties of a tracked struct. /// /// Implemented by the `#[salsa::tracked]` macro when applied /// to a struct. pub trait Configuration: Sized + 'static { const LOCATION: crate::ingredient::Location; /// The debug name of the tracked struct. const DEBUG_NAME: &'static str; /// The debug names of any tracked fields. const TRACKED_FIELD_NAMES: &'static [&'static str]; /// The relative indices of any tracked fields. const TRACKED_FIELD_INDICES: &'static [usize]; /// A (possibly empty) tuple of the fields for this struct. type Fields<'db>: Send + Sync; /// A array of [`Revision`][] values, one per each of the tracked value fields. /// When a struct is re-recreated in a new revision, the corresponding /// entries for each field are updated to the new revision if their /// values have changed (or if the field is marked as `#[no_eq]`). type Revisions: Send + Sync + Index; type Struct<'db>: Copy + FromId + AsId; fn untracked_fields(fields: &Self::Fields<'_>) -> impl Hash; /// Create a new value revision array where each element is set to `current_revision`. fn new_revisions(current_revision: Revision) -> Self::Revisions; /// Update the field data and, if the value has changed, /// the appropriate entry in the `revisions` array (tracked fields only). /// /// Returns `true` if any untracked field was updated and /// the struct should be considered re-created. /// /// # Safety /// /// Requires the same conditions as the `maybe_update` /// method on [the `Update` trait](`crate::update::Update`). /// /// In short, requires that `old_fields` be a pointer into /// storage from a previous revision. /// It must meet its validity invariant. /// Owned content must meet safety invariant. /// `*mut` here is not strictly needed; /// it is used to signal that the content /// is not guaranteed to recursively meet /// its safety invariant and /// hence this must be dereferenced with caution. /// /// Ensures that `old_fields` is fully updated and valid /// after it returns and that `revisions` has been updated /// for any field that changed. unsafe fn update_fields<'db>( current_revision: Revision, revisions: &mut Self::Revisions, old_fields: *mut Self::Fields<'db>, new_fields: Self::Fields<'db>, ) -> bool; } // ANCHOR_END: Configuration pub struct JarImpl where C: Configuration, { phantom: PhantomData, } impl Default for JarImpl { fn default() -> Self { Self { phantom: Default::default(), } } } impl Jar for JarImpl { fn create_ingredients( _zalsa: &Zalsa, struct_index: crate::zalsa::IngredientIndex, _dependencies: crate::memo_ingredient_indices::IngredientIndices, ) -> Vec> { let struct_ingredient = >::new(struct_index); let tracked_field_ingredients = C::TRACKED_FIELD_INDICES .iter() .copied() .map(|tracked_index| { Box::new(>::new( tracked_index, struct_index.successor(tracked_index), )) as _ }); std::iter::once(Box::new(struct_ingredient) as _) .chain(tracked_field_ingredients) .collect() } fn id_struct_type_id() -> TypeId { TypeId::of::>() } } pub trait TrackedStructInDb: SalsaStructInDb { /// Converts the identifier for this tracked struct into a `DatabaseKeyIndex`. fn database_key_index(zalsa: &Zalsa, id: Id) -> DatabaseKeyIndex; } /// Created for each tracked struct. /// /// This ingredient only stores the "id" fields. It is a kind of "dressed up" interner; /// the active query + values of id fields are hashed to create the tracked /// struct id. The value fields are stored in [`crate::function::IngredientImpl`] /// instances keyed by the tracked struct id. /// /// Unlike normal interned values, tracked struct indices can be deleted and reused aggressively /// without dependency edges on the creating query. When a tracked function is collected, /// any tracked structs it created can be deleted. Additionally, when a tracked function /// re-executes but does not create a tracked struct that was previously created, it can /// be deleted. No dependency edge is required as the lifetime of a tracked struct is tied /// directly to the query that created it. pub struct IngredientImpl where C: Configuration, { /// Our index in the database. ingredient_index: IngredientIndex, /// Phantom data: we fetch `Value` out from `Table` phantom: PhantomData Value>, /// Store freed ids free_list: SegQueue, memo_table_types: Arc, } /// Defines the identity of a tracked struct. /// This is the key to a hashmap that is (initially) /// stored in the [`ActiveQuery`](`crate::active_query::ActiveQuery`) /// struct and later moved to the [`Memo`](`crate::function::memo::Memo`). #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Copy, Clone)] pub(crate) struct Identity { // Conceptually, this contains an `IdentityHash`, but using `IdentityHash` directly will grow the size // of this struct struct by a `std::mem::size_of::()` due to unusable padding. To avoid this increase // in size, we inline the fields of `IdentityHash`. /// Index of the tracked struct ingredient. ingredient_index: IngredientIndex, /// Hash of the id fields. hash: u64, /// The unique disambiguator assigned within the active query /// to distinguish distinct tracked structs with the same identity_hash. disambiguator: Disambiguator, } impl Identity { pub(crate) fn ingredient_index(&self) -> IngredientIndex { self.ingredient_index } } /// Stores the data that (almost) uniquely identifies a tracked struct. /// This includes the ingredient index of that struct type plus the hash of its untracked fields. /// This is mapped to a disambiguator -- a value that starts as 0 but increments each round, /// allowing for multiple tracked structs with the same hash and ingredient_index /// created within the query to each have a unique id. #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Copy, Clone)] pub struct IdentityHash { /// Index of the tracked struct ingredient. ingredient_index: IngredientIndex, /// Hash of the id fields. hash: u64, } /// A map from tracked struct keys (which include the hash + [Disambiguator]) to their /// final [Id]. #[derive(Default, Debug)] pub(crate) struct IdentityMap { // we use a hashtable here as our key contains its own hash (`Identity::hash`) // so we do the hash wrangling ourselves table: hashbrown::HashTable<(Identity, Id)>, } impl Clone for IdentityMap { fn clone(&self) -> Self { Self { table: self.table.clone(), } } } impl IdentityMap { pub(crate) fn clone_from_slice(&mut self, source: &[(Identity, Id)]) { self.table.clear(); self.table.reserve(source.len(), |(k, _)| k.hash); for (key, id) in source { self.insert(*key, *id); } } pub(crate) fn insert(&mut self, key: Identity, id: Id) -> Option { let entry = self.table.find_mut(key.hash, |&(k, _)| k == key); match entry { Some(occupied) => Some(mem::replace(&mut occupied.1, id)), None => { self.table .insert_unique(key.hash, (key, id), |(k, _)| k.hash); None } } } pub(crate) fn get(&self, key: &Identity) -> Option { self.table .find(key.hash, |&(k, _)| k == *key) .map(|&(_, v)| v) } pub(crate) fn is_empty(&self) -> bool { self.table.is_empty() } pub(crate) fn clear(&mut self) { self.table.clear() } pub(crate) fn into_thin_vec(self) -> ThinVec<(Identity, Id)> { self.table.into_iter().collect() } } // ANCHOR: ValueStruct #[derive(Debug)] pub struct Value where C: Configuration, { /// The minimum durability of all inputs consumed by the creator /// query prior to creating this tracked struct. If any of those /// inputs changes, then the creator query may create this struct /// with different values. durability: Durability, /// The revision when this tracked struct was last updated. /// This field also acts as a kind of "lock". Once it is equal /// to `Some(current_revision)`, the fields are locked and /// cannot change further. This makes it safe to give out `&`-references /// so long as they do not live longer than the current revision /// (which is assured by tying their lifetime to the lifetime of an `&`-ref /// to the database). /// /// The struct is updated from an older revision `R0` to the current revision `R1` /// when the struct is first accessed in `R1`, whether that be because the original /// query re-created the struct (i.e., by user calling `Struct::new`) or because /// the struct was read from. (Structs may not be recreated in the new revision if /// the inputs to the query have not changed.) /// /// When re-creating the struct, the field is temporarily set to `None`. /// This is signal that there is an active `&mut` modifying the other fields: /// even reading from those fields in that situation would create UB. /// This `None` value should never be observable by users unless they have /// leaked a reference across threads somehow. updated_at: OptionalAtomicRevision, /// Fields of this tracked struct. They can change across revisions, /// but they do not change within a particular revision. fields: C::Fields<'static>, /// The revision information for each field: when did this field last change. /// When tracked structs are re-created, this revision may be updated to the /// current revision if the value is different. revisions: C::Revisions, /// Memo table storing the results of query functions etc. /*unsafe */ memos: MemoTable, } // ANCHOR_END: ValueStruct #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Copy, Clone)] pub struct Disambiguator(u32); #[derive(Default, Debug)] pub(crate) struct DisambiguatorMap { // we use a non-hasher hashmap here as our key contains its own hash (in a sense) // so we use the raw entry api instead to avoid the overhead of hashing unnecessarily map: hashbrown::HashMap, } impl DisambiguatorMap { pub(crate) fn disambiguate(&mut self, key: IdentityHash) -> Disambiguator { use hashbrown::hash_map::RawEntryMut; let entry = self.map.raw_entry_mut().from_hash(key.hash, |k| *k == key); let disambiguator = match entry { RawEntryMut::Occupied(occupied) => occupied.into_mut(), RawEntryMut::Vacant(vacant) => { vacant .insert_with_hasher(key.hash, key, Disambiguator(0), |k| k.hash) .1 } }; let result = *disambiguator; disambiguator.0 += 1; result } pub fn clear(&mut self) { self.map.clear() } pub fn is_empty(&self) -> bool { self.map.is_empty() } } impl IngredientImpl where C: Configuration, { /// Create a tracked struct ingredient. Generated by the `#[tracked]` macro, /// not meant to be called directly by end-users. fn new(index: IngredientIndex) -> Self { Self { ingredient_index: index, phantom: PhantomData, free_list: Default::default(), memo_table_types: Arc::new(MemoTableTypes::default()), } } /// Returns the database key index for a tracked struct with the given id. pub fn database_key_index(&self, id: Id) -> DatabaseKeyIndex { DatabaseKeyIndex::new(self.ingredient_index, id) } pub fn new_struct<'db>( &'db self, db: &'db dyn Database, mut fields: C::Fields<'db>, ) -> C::Struct<'db> { let (zalsa, zalsa_local) = db.zalsas(); let identity_hash = IdentityHash { ingredient_index: self.ingredient_index, hash: crate::hash::hash(&C::untracked_fields(&fields)), }; let (current_deps, disambiguator) = zalsa_local.disambiguate(identity_hash); let identity = Identity { hash: identity_hash.hash, ingredient_index: identity_hash.ingredient_index, disambiguator, }; let current_revision = zalsa.current_revision(); if let Some(id) = zalsa_local.tracked_struct_id(&identity) { // The struct already exists in the intern map. let index = self.database_key_index(id); tracing::trace!("Reuse tracked struct {id:?}", id = index); zalsa_local.add_output(index); // SAFETY: The `id` was present in the interned map, so the value must be initialized. let update_result = unsafe { self.update(zalsa, current_revision, id, ¤t_deps, fields) }; fields = match update_result { // Overwrite the previous ID if we are reusing the old slot with new fields. Ok(updated_id) if updated_id != id => { zalsa_local.store_tracked_struct_id(identity, updated_id); return FromId::from_id(updated_id); } // The id has not changed. Ok(id) => return FromId::from_id(id), // Failed to perform the update, we are forced to allocate a new slot. Err(fields) => fields, }; } // We failed to perform the update, or this is a new tracked struct, so allocate a new entry // in the struct map. let id = self.allocate(zalsa, zalsa_local, current_revision, ¤t_deps, fields); let key = self.database_key_index(id); tracing::trace!("Allocated new tracked struct {key:?}"); zalsa_local.add_output(key); zalsa_local.store_tracked_struct_id(identity, id); FromId::from_id(id) } fn allocate<'db>( &'db self, zalsa: &'db Zalsa, zalsa_local: &'db ZalsaLocal, current_revision: Revision, current_deps: &Stamp, fields: C::Fields<'db>, ) -> Id { let value = |_| Value { updated_at: OptionalAtomicRevision::new(Some(current_revision)), durability: current_deps.durability, // lifetime erase for storage fields: unsafe { mem::transmute::, C::Fields<'static>>(fields) }, revisions: C::new_revisions(current_deps.changed_at), memos: Default::default(), }; while let Some(id) = self.free_list.pop() { // Increment the ID generation before reusing it, as if we have allocated a new // slot in the table. // // If the generation would overflow, we are forced to leak the slot. Note that this // shouldn't be a problem in general as sufficient bits are reserved for the generation. let Some(id) = id.next_generation() else { tracing::info!( "leaking tracked struct {:?} due to generation overflow", self.database_key_index(id) ); continue; }; // SAFETY: We just removed `id` from the free-list, so we have exclusive access. let data = unsafe { &mut *Self::data_raw(zalsa.table(), id) }; assert!( data.updated_at.load().is_none(), "free list entry for `{id:?}` does not have `None` for `updated_at`" ); // Overwrite the free-list entry. Use `*foo = ` because the entry // has been previously initialized and we want to free the old contents. *data = value(id); return id; } zalsa_local.allocate::>(zalsa, self.ingredient_index, value) } /// Get mutable access to the data for `id` -- this holds a write lock for the duration /// of the returned value. /// /// # Panics /// /// * If the value is not present in the map. /// * If the value is already updated in this revision. /// /// # Safety /// /// The value at the given `id` must be initialized. unsafe fn update<'db>( &'db self, zalsa: &'db Zalsa, current_revision: Revision, mut id: Id, current_deps: &Stamp, fields: C::Fields<'db>, ) -> Result> { let data_raw = Self::data_raw(zalsa.table(), id); // The protocol is: // // * When we begin updating, we store `None` in the `updated_at` field // * When completed, we store `Some(current_revision)` in `updated_at` // // No matter what mischief users get up to, it should be impossible for us to // observe `None` in `updated_at`. The `id` should only be associated with one // query and that query can only be running in one thread at a time. // // We *can* observe `Some(current_revision)` however, which means that this // tracked struct is already updated for this revision in two ways. // In that case we should not modify or touch it because there may be // `&`-references to its contents floating around. // // Observing `Some(current_revision)` can happen in two scenarios: leaks (tsk tsk) // but also the scenario embodied by the test test `test_run_5_then_20` in `specify_tracked_fn_in_rev_1_but_not_2.rs`: // // * Revision 1: // * Tracked function F creates tracked struct S // * F reads input I // * Revision 2: I is changed, F is re-executed // // When F is re-executed in rev 2, we first try to validate F's inputs/outputs, // which is the list [output: S, input: I]. As no inputs have changed by the time // we reach S, we mark it as verified. But then input I is seen to have changed, // and so we re-execute F. Note that we *know* that S will have the same value // (barring program bugs). // // Further complicating things: it is possible that F calls F2 // and gives it (e.g.) S as one of its arguments. Validating F2 may cause F2 to // re-execute which means that it may indeed have read from S's fields // during the current revision and thus obtained an `&` reference to those fields // that is still live. { // SAFETY: Guaranteed by caller. let data = unsafe { &*data_raw }; let last_updated_at = data.updated_at.load(); assert!( last_updated_at.is_some(), "two concurrent writers to {id:?}, should not be possible" ); // The value is already read-locked, but we can reuse it safely as per above. if last_updated_at == Some(current_revision) { return Ok(id); } // Updating the fields may make it necessary to increment the generation of the ID. In // the unlikely case that the ID is already at its maximum generation, we are forced to leak // the previous slot and allocate a new value. if id.generation() == u32::MAX { tracing::info!( "leaking tracked struct {:?} due to generation overflow", self.database_key_index(id) ); return Err(fields); } // Acquire the write-lock. This can only fail if there is a parallel thread // reading from this same `id`, which can only happen if the user has leaked it. // Tsk tsk. let swapped_out = data.updated_at.swap(None); if swapped_out != last_updated_at { panic!( "failed to acquire write lock, id `{id:?}` must have been leaked across threads" ); } } // UNSAFE: Marking as mut requires exclusive access for the duration of // the `mut`. We have now *claimed* this data by swapping in `None`, // any attempt to read concurrently will panic. let data = unsafe { &mut *data_raw }; // SAFETY: We assert that the pointer to `data.revisions` // is a pointer into the database referencing a value // from a previous revision. As such, it continues to meet // its validity invariant and any owned content also continues // to meet its safety invariant. let untracked_update = unsafe { C::update_fields( current_deps.changed_at, &mut data.revisions, mem::transmute::<*mut C::Fields<'static>, *mut C::Fields<'db>>( std::ptr::addr_of_mut!(data.fields), ), fields, ) }; if untracked_update { // Consider this a new tracked-struct when any non-tracked field got updated. // This should be rare and only ever happen if there's a hash collision. // // Note that we hold the lock and have exclusive access to the tracked struct data, // so there should be no live instances of IDs from the previous generation. We clear // the memos and return a new ID here as if we have allocated a new slot. let mut table = data.take_memo_table(); // SAFETY: The memo table belongs to a value that we allocated, so it has the // correct type. unsafe { self.clear_memos(zalsa, &mut table, id) }; id = id .next_generation() .expect("already verified that generation is not maximum"); } if current_deps.durability < data.durability { data.revisions = C::new_revisions(current_deps.changed_at); } data.durability = current_deps.durability; let swapped_out = data.updated_at.swap(Some(current_revision)); assert!(swapped_out.is_none()); Ok(id) } /// Fetch the data for a given id created by this ingredient from the table, /// -giving it the appropriate type. fn data(table: &Table, id: Id) -> &Value { table.get(id) } fn data_raw(table: &Table, id: Id) -> *mut Value { table.get_raw(id) } /// Deletes the given entities. This is used after a query `Q` executes and we can compare /// the entities `E_now` that it produced in this revision vs the entities /// `E_prev` it produced in the last revision. Any missing entities `E_prev - E_new` can be /// deleted. /// /// # Warning /// /// Using this method on an entity id that MAY be used in the current revision will lead to /// unspecified results (but not UB). See [`InternedIngredient::delete_index`] for more /// discussion and important considerations. pub(crate) fn delete_entity(&self, zalsa: &Zalsa, id: Id) { zalsa.event(&|| { Event::new(crate::EventKind::DidDiscard { key: self.database_key_index(id), }) }); let current_revision = zalsa.current_revision(); let data_raw = Self::data_raw(zalsa.table(), id); { let data = unsafe { &*data_raw }; // We want to set `updated_at` to `None`, signalling that other field values // cannot be read. The current value should be `Some(R0)` for some older revision. match data.updated_at.load() { None => { panic!("cannot delete write-locked id `{id:?}`; value leaked across threads"); } Some(r) if r == current_revision => panic!( "cannot delete read-locked id `{id:?}`; value leaked across threads or user functions not deterministic" ), Some(r) => { if data.updated_at.compare_exchange(Some(r), None).is_err() { panic!("race occurred when deleting value `{id:?}`") } } } } // SAFETY: We have acquired the write lock let data = unsafe { &mut *data_raw }; let mut memo_table = data.take_memo_table(); // SAFETY: The memo table belongs to a value that we allocated, so it // has the correct type. unsafe { self.clear_memos(zalsa, &mut memo_table, id) }; // now that all cleanup has occurred, make available for re-use self.free_list.push(id); } /// Clears the given memo table. /// /// # Safety /// /// The `MemoTable` must belong to a `Value` of the correct type. pub(crate) unsafe fn clear_memos(&self, zalsa: &Zalsa, memo_table: &mut MemoTable, id: Id) { // SAFETY: The caller guarantees this is the correct types table. let table = unsafe { self.memo_table_types.attach_memos_mut(memo_table) }; // `Database::salsa_event` is a user supplied callback which may panic // in that case we need a drop guard to free the memo table struct TableDropGuard<'a>(MemoTableWithTypesMut<'a>); impl Drop for TableDropGuard<'_> { fn drop(&mut self) { // SAFETY: We have `&mut MemoTable`, so no more references to these memos exist and we are good // to drop them. unsafe { self.0.drop() }; } } let mut table_guard = TableDropGuard(table); // SAFETY: We have `&mut MemoTable`, so no more references to these memos exist and we are good // to drop them. unsafe { table_guard.0.take_memos(|memo_ingredient_index, memo| { let ingredient_index = zalsa.ingredient_index_for_memo(self.ingredient_index, memo_ingredient_index); let executor = DatabaseKeyIndex::new(ingredient_index, id); zalsa.event(&|| Event::new(EventKind::DidDiscard { key: executor })); for stale_output in memo.origin().outputs() { stale_output.remove_stale_output(zalsa, executor); } }) }; mem::forget(table_guard); } /// Return reference to the field data ignoring dependency tracking. /// Used for debugging. pub fn leak_fields<'db>( &'db self, db: &'db dyn Database, s: C::Struct<'db>, ) -> &'db C::Fields<'db> { let id = AsId::as_id(&s); let data = Self::data(db.zalsa().table(), id); data.fields() } /// Access to this tracked field. /// /// Note that this function returns the entire tuple of value fields. /// The caller is responsible for selecting the appropriate element. pub fn tracked_field<'db>( &'db self, db: &'db dyn crate::Database, s: C::Struct<'db>, relative_tracked_index: usize, ) -> &'db C::Fields<'db> { let (zalsa, zalsa_local) = db.zalsas(); let id = AsId::as_id(&s); let field_ingredient_index = self.ingredient_index.successor(relative_tracked_index); let data = Self::data(zalsa.table(), id); data.read_lock(zalsa.current_revision()); let field_changed_at = data.revisions[relative_tracked_index]; zalsa_local.report_tracked_read_simple( DatabaseKeyIndex::new(field_ingredient_index, id), data.durability, field_changed_at, ); data.fields() } /// Access to this untracked field. /// /// Note that this function returns the entire tuple of value fields. /// The caller is responsible for selecting the appropriate element. pub fn untracked_field<'db>( &'db self, db: &'db dyn crate::Database, s: C::Struct<'db>, ) -> &'db C::Fields<'db> { let zalsa = db.zalsa(); let id = AsId::as_id(&s); let data = Self::data(zalsa.table(), id); data.read_lock(zalsa.current_revision()); // Note that we do not need to add a dependency on the tracked struct // as IDs that are reused increment their generation, invalidating any // dependent queries directly. data.fields() } #[cfg(feature = "salsa_unstable")] /// Returns all data corresponding to the tracked struct. pub fn entries<'db>( &'db self, db: &'db dyn crate::Database, ) -> impl Iterator> { db.zalsa().table().slots_of::>() } } impl Ingredient for IngredientImpl where C: Configuration, { fn location(&self) -> &'static crate::ingredient::Location { &C::LOCATION } fn ingredient_index(&self) -> IngredientIndex { self.ingredient_index } unsafe fn maybe_changed_after( &self, _db: &dyn Database, _input: Id, _revision: Revision, _cycle_heads: &mut CycleHeads, ) -> VerifyResult { // Any change to a tracked struct results in a new ID generation. VerifyResult::unchanged() } fn mark_validated_output( &self, _zalsa: &Zalsa, _executor: DatabaseKeyIndex, _output_key: crate::Id, ) { // we used to update `update_at` field but now we do it lazilly when data is accessed // // FIXME: delete this method } fn remove_stale_output( &self, zalsa: &Zalsa, _executor: DatabaseKeyIndex, stale_output_key: crate::Id, ) { // This method is called when, in prior revisions, // `executor` creates a tracked struct `salsa_output_key`, // but it did not in the current revision. // In that case, we can delete `stale_output_key` and any data associated with it. self.delete_entity(zalsa, stale_output_key) } fn debug_name(&self) -> &'static str { C::DEBUG_NAME } fn memo_table_types(&self) -> Arc { self.memo_table_types.clone() } /// Returns memory usage information about any tracked structs. #[cfg(feature = "salsa_unstable")] fn memory_usage(&self, db: &dyn Database) -> Option> { let memory_usage = self .entries(db) // SAFETY: The memo table belongs to a value that we allocated, so it // has the correct type. .map(|value| unsafe { value.memory_usage(&self.memo_table_types) }) .collect(); Some(memory_usage) } } impl std::fmt::Debug for IngredientImpl where C: Configuration, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct(std::any::type_name::()) .field("ingredient_index", &self.ingredient_index) .finish() } } impl Value where C: Configuration, { /// Fields of this tracked struct. /// /// They can change across revisions, but they do not change within /// a particular revision. #[cfg_attr(not(feature = "salsa_unstable"), doc(hidden))] pub fn fields(&self) -> &C::Fields<'_> { // SAFETY: We are shrinking the lifetime from storage back to the db lifetime. unsafe { mem::transmute::<&C::Fields<'static>, &C::Fields<'_>>(&self.fields) } } fn take_memo_table(&mut self) -> MemoTable { // This fn is only called after `updated_at` has been set to `None`; // this ensures that there is no concurrent access // (and that the `&mut self` is accurate...). assert!(self.updated_at.load().is_none()); mem::take(&mut self.memos) } fn read_lock(&self, current_revision: Revision) { loop { match self.updated_at.load() { None => { panic!("access to field whilst the value is being initialized"); } Some(r) => { if r == current_revision { return; } if self .updated_at .compare_exchange(Some(r), Some(current_revision)) .is_ok() { break; } } } } } /// Returns memory usage information about the tracked struct. /// /// # Safety /// /// The `MemoTable` must belong to a `Value` of the correct type. #[cfg(feature = "salsa_unstable")] unsafe fn memory_usage(&self, memo_table_types: &MemoTableTypes) -> crate::database::SlotInfo { // SAFETY: The caller guarantees this is the correct types table. let memos = unsafe { memo_table_types.attach_memos(&self.memos) }; crate::database::SlotInfo { debug_name: C::DEBUG_NAME, size_of_metadata: mem::size_of::() - mem::size_of::>(), size_of_fields: mem::size_of::>(), memos: memos.memory_usage(), } } } // SAFETY: `Value` is our private type branded over the unique configuration `C`. unsafe impl Slot for Value where C: Configuration, { #[inline(always)] unsafe fn memos(&self, current_revision: Revision) -> &crate::table::memo::MemoTable { // Acquiring the read lock here with the current revision // ensures that there is no danger of a race // when deleting a tracked struct. self.read_lock(current_revision); &self.memos } #[inline(always)] fn memos_mut(&mut self) -> &mut crate::table::memo::MemoTable { &mut self.memos } } #[cfg(test)] mod tests { use super::*; #[test] fn disambiguate_map_works() { let mut d = DisambiguatorMap::default(); // set up all 4 permutations of differing field values let h1 = IdentityHash { ingredient_index: IngredientIndex::from(0), hash: 0, }; let h2 = IdentityHash { ingredient_index: IngredientIndex::from(1), hash: 0, }; let h3 = IdentityHash { ingredient_index: IngredientIndex::from(0), hash: 1, }; let h4 = IdentityHash { ingredient_index: IngredientIndex::from(1), hash: 1, }; assert_eq!(d.disambiguate(h1), Disambiguator(0)); assert_eq!(d.disambiguate(h1), Disambiguator(1)); assert_eq!(d.disambiguate(h2), Disambiguator(0)); assert_eq!(d.disambiguate(h2), Disambiguator(1)); assert_eq!(d.disambiguate(h3), Disambiguator(0)); assert_eq!(d.disambiguate(h3), Disambiguator(1)); assert_eq!(d.disambiguate(h4), Disambiguator(0)); assert_eq!(d.disambiguate(h4), Disambiguator(1)); } #[test] fn identity_map_works() { let mut d = IdentityMap::default(); // set up all 8 permutations of differing field values let i1 = Identity { ingredient_index: IngredientIndex::from(0), hash: 0, disambiguator: Disambiguator(0), }; let i2 = Identity { ingredient_index: IngredientIndex::from(1), hash: 0, disambiguator: Disambiguator(0), }; let i3 = Identity { ingredient_index: IngredientIndex::from(0), hash: 1, disambiguator: Disambiguator(0), }; let i4 = Identity { ingredient_index: IngredientIndex::from(1), hash: 1, disambiguator: Disambiguator(0), }; let i5 = Identity { ingredient_index: IngredientIndex::from(0), hash: 0, disambiguator: Disambiguator(1), }; let i6 = Identity { ingredient_index: IngredientIndex::from(1), hash: 0, disambiguator: Disambiguator(1), }; let i7 = Identity { ingredient_index: IngredientIndex::from(0), hash: 1, disambiguator: Disambiguator(1), }; let i8 = Identity { ingredient_index: IngredientIndex::from(1), hash: 1, disambiguator: Disambiguator(1), }; // SAFETY: We don't use the IDs within salsa internals so this is fine unsafe { assert_eq!(d.insert(i1, Id::from_index(0)), None); assert_eq!(d.insert(i2, Id::from_index(1)), None); assert_eq!(d.insert(i3, Id::from_index(2)), None); assert_eq!(d.insert(i4, Id::from_index(3)), None); assert_eq!(d.insert(i5, Id::from_index(4)), None); assert_eq!(d.insert(i6, Id::from_index(5)), None); assert_eq!(d.insert(i7, Id::from_index(6)), None); assert_eq!(d.insert(i8, Id::from_index(7)), None); assert_eq!(d.get(&i1), Some(Id::from_index(0))); assert_eq!(d.get(&i2), Some(Id::from_index(1))); assert_eq!(d.get(&i3), Some(Id::from_index(2))); assert_eq!(d.get(&i4), Some(Id::from_index(3))); assert_eq!(d.get(&i5), Some(Id::from_index(4))); assert_eq!(d.get(&i6), Some(Id::from_index(5))); assert_eq!(d.get(&i7), Some(Id::from_index(6))); assert_eq!(d.get(&i8), Some(Id::from_index(7))); }; } } salsa-0.23.0/src/update.rs000064400000000000000000000403021046102023000134270ustar 00000000000000#![allow(clippy::undocumented_unsafe_blocks)] // TODO(#697) document safety use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; use std::hash::{BuildHasher, Hash}; use std::marker::PhantomData; use std::path::PathBuf; #[cfg(feature = "rayon")] use rayon::iter::Either; use crate::sync::Arc; use crate::Revision; /// This is used by the macro generated code. /// If possible, uses `Update` trait, but else requires `'static`. /// /// To use: /// /// ```rust,ignore /// use crate::update::helper::Fallback; /// update::helper::Dispatch::<$ty>::maybe_update(pointer, new_value); /// ``` /// /// It is important that you specify the `$ty` explicitly. /// /// This uses the ["method dispatch hack"](https://github.com/nvzqz/impls#how-it-works) /// to use the `Update` trait if it is available and else fallback to `'static`. pub mod helper { use std::marker::PhantomData; use super::{update_fallback, Update}; pub struct Dispatch(PhantomData); #[allow(clippy::new_without_default)] impl Dispatch { pub fn new() -> Self { Dispatch(PhantomData) } } impl Dispatch where D: Update, { /// # Safety /// /// See the `maybe_update` method in the [`Update`][] trait. pub unsafe fn maybe_update(old_pointer: *mut D, new_value: D) -> bool { // SAFETY: Same safety conditions as `Update::maybe_update` unsafe { D::maybe_update(old_pointer, new_value) } } } /// # Safety /// /// Impl will fulfill the postconditions of `maybe_update` pub unsafe trait Fallback { /// # Safety /// /// Same safety conditions as `Update::maybe_update` unsafe fn maybe_update(old_pointer: *mut T, new_value: T) -> bool; } // SAFETY: Same safety conditions as `Update::maybe_update` unsafe impl Fallback for Dispatch { unsafe fn maybe_update(old_pointer: *mut T, new_value: T) -> bool { // SAFETY: Same safety conditions as `Update::maybe_update` unsafe { update_fallback(old_pointer, new_value) } } } } /// "Fallback" for maybe-update that is suitable for fully owned T /// that implement `Eq`. In this version, we update only if the new value /// is not `Eq` to the old one. Note that given `Eq` impls that are not just /// structurally comparing fields, this may cause us not to update even if /// the value has changed (presumably because this change is not semantically /// significant). /// /// # Safety /// /// See `Update::maybe_update` pub unsafe fn update_fallback(old_pointer: *mut T, new_value: T) -> bool where T: 'static + PartialEq, { // SAFETY: Because everything is owned, this ref is simply a valid `&mut` let old_ref: &mut T = unsafe { &mut *old_pointer }; if *old_ref != new_value { *old_ref = new_value; true } else { // Subtle but important: Eq impls can be buggy or define equality // in surprising ways. If it says that the value has not changed, // we do not modify the existing value, and thus do not have to // update the revision, as downstream code will not see the new value. false } } /// Helper for generated code. Updates `*old_pointer` with `new_value` /// and updates `*old_revision` with `new_revision.` Used for fields /// tagged with `#[no_eq]` pub fn always_update( old_revision: &mut Revision, new_revision: Revision, old_pointer: &mut T, new_value: T, ) { *old_revision = new_revision; *old_pointer = new_value; } /// # Safety /// /// Implementing this trait requires the implementor to verify: /// /// * `maybe_update` ensures the properties it is intended to ensure. /// * If the value implements `Eq`, it is safe to compare an instance /// of the value from an older revision with one from the newer /// revision. If the value compares as equal, no update is needed to /// bring it into the newer revision. /// /// NB: The second point implies that `Update` cannot be implemented for any /// `&'db T` -- (i.e., any Rust reference tied to the database). /// Such a value could refer to memory that was freed in some /// earlier revision. Even if the memory is still valid, it could also /// have been part of a tracked struct whose values were mutated, /// thus invalidating the `'db` lifetime (from a stacked borrows perspective). /// Either way, the `Eq` implementation would be invalid. pub unsafe trait Update { /// # Returns /// /// True if the value should be considered to have changed in the new revision. /// /// # Safety /// /// ## Requires /// /// Informally, requires that `old_value` points to a value in the /// database that is potentially from a previous revision and `new_value` /// points to a value produced in this revision. /// /// More formally, requires that /// /// * all parameters meet the [validity and safety invariants][i] for their type /// * `old_value` further points to allocated memory that meets the [validity invariant][i] for `Self` /// * all data *owned* by `old_value` further meets its safety invariant /// * not that borrowed data in `old_value` only meets its validity invariant /// and hence cannot be dereferenced; essentially, a `&T` may point to memory /// in the database which has been modified or even freed in the newer revision. /// /// [i]: https://www.ralfj.de/blog/2018/08/22/two-kinds-of-invariants.html /// /// ## Ensures /// /// That `old_value` is updated with unsafe fn maybe_update(old_pointer: *mut Self, new_value: Self) -> bool; } unsafe impl Update for std::convert::Infallible { unsafe fn maybe_update(_old_pointer: *mut Self, new_value: Self) -> bool { match new_value {} } } macro_rules! maybe_update_vec { ($old_pointer: expr, $new_vec: expr, $elem_ty: ty) => {{ let old_pointer = $old_pointer; let new_vec = $new_vec; let old_vec: &mut Self = unsafe { &mut *old_pointer }; if old_vec.len() != new_vec.len() { old_vec.clear(); old_vec.extend(new_vec); return true; } let mut changed = false; for (old_element, new_element) in old_vec.iter_mut().zip(new_vec) { changed |= unsafe { <$elem_ty>::maybe_update(old_element, new_element) }; } changed }}; } unsafe impl Update for Vec where T: Update, { unsafe fn maybe_update(old_pointer: *mut Self, new_vec: Self) -> bool { maybe_update_vec!(old_pointer, new_vec, T) } } unsafe impl Update for thin_vec::ThinVec where T: Update, { unsafe fn maybe_update(old_pointer: *mut Self, new_vec: Self) -> bool { maybe_update_vec!(old_pointer, new_vec, T) } } unsafe impl Update for smallvec::SmallVec where A: smallvec::Array, A::Item: Update, { unsafe fn maybe_update(old_pointer: *mut Self, new_vec: Self) -> bool { maybe_update_vec!(old_pointer, new_vec, A::Item) } } macro_rules! maybe_update_set { ($old_pointer: expr, $new_set: expr) => {{ let old_pointer = $old_pointer; let new_set = $new_set; let old_set: &mut Self = unsafe { &mut *old_pointer }; if *old_set == new_set { false } else { old_set.clear(); old_set.extend(new_set); return true; } }}; } unsafe impl Update for HashSet where K: Update + Eq + Hash, S: BuildHasher, { unsafe fn maybe_update(old_pointer: *mut Self, new_set: Self) -> bool { maybe_update_set!(old_pointer, new_set) } } unsafe impl Update for BTreeSet where K: Update + Eq + Ord, { unsafe fn maybe_update(old_pointer: *mut Self, new_set: Self) -> bool { maybe_update_set!(old_pointer, new_set) } } // Duck typing FTW, it was too annoying to make a proper function out of this. macro_rules! maybe_update_map { ($old_pointer: expr, $new_map: expr) => { 'function: { let old_pointer = $old_pointer; let new_map = $new_map; let old_map: &mut Self = unsafe { &mut *old_pointer }; // To be considered "equal", the set of keys // must be the same between the two maps. let same_keys = old_map.len() == new_map.len() && old_map.keys().all(|k| new_map.contains_key(k)); // If the set of keys has changed, then just pull in the new values // from new_map and discard the old ones. if !same_keys { old_map.clear(); old_map.extend(new_map); break 'function true; } // Otherwise, recursively descend to the values. // We do not invoke `K::update` because we assume // that if the values are `Eq` they must not need // updating (see the trait criteria). let mut changed = false; for (key, new_value) in new_map.into_iter() { let old_value = old_map.get_mut(&key).unwrap(); changed |= unsafe { V::maybe_update(old_value, new_value) }; } changed } }; } unsafe impl Update for HashMap where K: Update + Eq + Hash, V: Update, S: BuildHasher, { unsafe fn maybe_update(old_pointer: *mut Self, new_map: Self) -> bool { maybe_update_map!(old_pointer, new_map) } } unsafe impl Update for hashbrown::HashMap where K: Update + Eq + Hash, V: Update, S: BuildHasher, { unsafe fn maybe_update(old_pointer: *mut Self, new_map: Self) -> bool { maybe_update_map!(old_pointer, new_map) } } unsafe impl Update for hashbrown::HashSet where K: Update + Eq + Hash, S: BuildHasher, { unsafe fn maybe_update(old_pointer: *mut Self, new_set: Self) -> bool { maybe_update_set!(old_pointer, new_set) } } unsafe impl Update for indexmap::IndexMap where K: Update + Eq + Hash, V: Update, S: BuildHasher, { unsafe fn maybe_update(old_pointer: *mut Self, new_map: Self) -> bool { maybe_update_map!(old_pointer, new_map) } } unsafe impl Update for indexmap::IndexSet where K: Update + Eq + Hash, S: BuildHasher, { unsafe fn maybe_update(old_pointer: *mut Self, new_set: Self) -> bool { maybe_update_set!(old_pointer, new_set) } } unsafe impl Update for BTreeMap where K: Update + Eq + Ord, V: Update, { unsafe fn maybe_update(old_pointer: *mut Self, new_map: Self) -> bool { maybe_update_map!(old_pointer, new_map) } } unsafe impl Update for Box where T: Update, { unsafe fn maybe_update(old_pointer: *mut Self, new_box: Self) -> bool { let old_box: &mut Box = unsafe { &mut *old_pointer }; unsafe { T::maybe_update(&mut **old_box, *new_box) } } } unsafe impl Update for Box<[T]> where T: Update, { unsafe fn maybe_update(old_pointer: *mut Self, new_box: Self) -> bool { let old_box: &mut Box<[T]> = unsafe { &mut *old_pointer }; if old_box.len() == new_box.len() { let mut changed = false; for (old_element, new_element) in old_box.iter_mut().zip(new_box) { changed |= unsafe { T::maybe_update(old_element, new_element) }; } changed } else { *old_box = new_box; true } } } unsafe impl Update for Arc where T: Update, { unsafe fn maybe_update(old_pointer: *mut Self, new_arc: Self) -> bool { let old_arc: &mut Arc = unsafe { &mut *old_pointer }; if Arc::ptr_eq(old_arc, &new_arc) { return false; } if let Some(inner) = Arc::get_mut(old_arc) { match Arc::try_unwrap(new_arc) { Ok(new_inner) => unsafe { T::maybe_update(inner, new_inner) }, Err(new_arc) => { // We can't unwrap the new arc, so we have to update the old one in place. *old_arc = new_arc; true } } } else { unsafe { *old_pointer = new_arc }; true } } } unsafe impl Update for [T; N] where T: Update, { unsafe fn maybe_update(old_pointer: *mut Self, new_vec: Self) -> bool { let old_pointer: *mut T = unsafe { std::ptr::addr_of_mut!((*old_pointer)[0]) }; let mut changed = false; for (new_element, i) in new_vec.into_iter().zip(0..) { changed |= unsafe { T::maybe_update(old_pointer.add(i), new_element) }; } changed } } unsafe impl Update for Result where T: Update, E: Update, { unsafe fn maybe_update(old_pointer: *mut Self, new_value: Self) -> bool { let old_value = unsafe { &mut *old_pointer }; match (old_value, new_value) { (Ok(old), Ok(new)) => unsafe { T::maybe_update(old, new) }, (Err(old), Err(new)) => unsafe { E::maybe_update(old, new) }, (old_value, new_value) => { *old_value = new_value; true } } } } #[cfg(feature = "rayon")] unsafe impl Update for Either where L: Update, R: Update, { unsafe fn maybe_update(old_pointer: *mut Self, new_value: Self) -> bool { let old_value = unsafe { &mut *old_pointer }; match (old_value, new_value) { (Either::Left(old), Either::Left(new)) => unsafe { L::maybe_update(old, new) }, (Either::Right(old), Either::Right(new)) => unsafe { R::maybe_update(old, new) }, (old_value, new_value) => { *old_value = new_value; true } } } } macro_rules! fallback_impl { ($($t:ty,)*) => { $( unsafe impl Update for $t { unsafe fn maybe_update(old_pointer: *mut Self, new_value: Self) -> bool { unsafe { update_fallback(old_pointer, new_value) } } } )* } } fallback_impl! { String, i64, u64, i32, u32, i16, u16, i8, u8, bool, f32, f64, usize, isize, PathBuf, } #[cfg(feature = "compact_str")] fallback_impl! { compact_str::CompactString, } macro_rules! tuple_impl { ($($t:ident),*; $($u:ident),*) => { unsafe impl<$($t),*> Update for ($($t,)*) where $($t: Update,)* { #[allow(non_snake_case)] unsafe fn maybe_update(old_pointer: *mut Self, new_value: Self) -> bool { let ($($t,)*) = new_value; let ($($u,)*) = unsafe { &mut *old_pointer }; #[allow(unused_mut)] let mut changed = false; $( unsafe { changed |= Update::maybe_update($u, $t); } )* changed } } } } // Create implementations for tuples up to arity 12 tuple_impl!(;); tuple_impl!(A; a); tuple_impl!(A, B; a, b); tuple_impl!(A, B, C; a, b, c); tuple_impl!(A, B, C, D; a, b, c, d); tuple_impl!(A, B, C, D, E; a, b, c, d, e); tuple_impl!(A, B, C, D, E, F; a, b, c, d, e, f); tuple_impl!(A, B, C, D, E, F, G; a, b, c, d, e, f, g); tuple_impl!(A, B, C, D, E, F, G, H; a, b, c, d, e, f, g, h); tuple_impl!(A, B, C, D, E, F, G, H, I; a, b, c, d, e, f, g, h, i); tuple_impl!(A, B, C, D, E, F, G, H, I, J; a, b, c, d, e, f, g, h, i, j); tuple_impl!(A, B, C, D, E, F, G, H, I, J, K; a, b, c, d, e, f, g, h, i, j, k); tuple_impl!(A, B, C, D, E, F, G, H, I, J, K, L; a, b, c, d, e, f, g, h, i, j, k, l); unsafe impl Update for Option where T: Update, { unsafe fn maybe_update(old_pointer: *mut Self, new_value: Self) -> bool { let old_value = unsafe { &mut *old_pointer }; match (old_value, new_value) { (Some(old), Some(new)) => unsafe { T::maybe_update(old, new) }, (None, None) => false, (old_value, new_value) => { *old_value = new_value; true } } } } unsafe impl Update for PhantomData { unsafe fn maybe_update(_old_pointer: *mut Self, _new_value: Self) -> bool { false } } salsa-0.23.0/src/views.rs000064400000000000000000000111701046102023000133030ustar 00000000000000use std::any::{Any, TypeId}; use crate::Database; /// A `Views` struct is associated with some specific database type /// (a `DatabaseImpl` for some existential `U`). It contains functions /// to downcast from `dyn Database` to `dyn DbView` for various traits `DbView` via this specific /// database type. /// None of these types are known at compilation time, they are all checked /// dynamically through `TypeId` magic. pub struct Views { source_type_id: TypeId, view_casters: boxcar::Vec, } struct ViewCaster { /// The id of the target type `dyn DbView` that we can cast to. target_type_id: TypeId, /// The name of the target type `dyn DbView` that we can cast to. type_name: &'static str, /// Type-erased function pointer that downcasts from `dyn Database` to `dyn DbView`. cast: ErasedDatabaseDownCasterSig, } impl ViewCaster { fn new(func: unsafe fn(&dyn Database) -> &DbView) -> ViewCaster { ViewCaster { target_type_id: TypeId::of::(), type_name: std::any::type_name::(), // SAFETY: We are type erasing for storage, taking care of unerasing before we call // the function pointer. cast: unsafe { std::mem::transmute::, ErasedDatabaseDownCasterSig>( func, ) }, } } } type ErasedDatabaseDownCasterSig = unsafe fn(&dyn Database) -> *const (); type DatabaseDownCasterSig = unsafe fn(&dyn Database) -> &DbView; pub struct DatabaseDownCaster(TypeId, DatabaseDownCasterSig); impl DatabaseDownCaster { pub fn downcast<'db>(&self, db: &'db dyn Database) -> &'db DbView { assert_eq!( self.0, db.type_id(), "Database type does not match the expected type for this `Views` instance" ); // SAFETY: We've asserted that the database is correct. unsafe { (self.1)(db) } } /// Downcast `db` to `DbView`. /// /// # Safety /// /// The caller must ensure that `db` is of the correct type. pub unsafe fn downcast_unchecked<'db>(&self, db: &'db dyn Database) -> &'db DbView { // SAFETY: The caller must ensure that `db` is of the correct type. unsafe { (self.1)(db) } } } impl Views { pub(crate) fn new() -> Self { let source_type_id = TypeId::of::(); let view_casters = boxcar::Vec::new(); // special case the no-op transformation, that way we skip out on reconstructing the wide pointer view_casters.push(ViewCaster::new::(|db| db)); Self { source_type_id, view_casters, } } /// Add a new downcaster from `dyn Database` to `dyn DbView`. pub fn add(&self, func: DatabaseDownCasterSig) { let target_type_id = TypeId::of::(); if self .view_casters .iter() .any(|(_, u)| u.target_type_id == target_type_id) { return; } self.view_casters.push(ViewCaster::new::(func)); } /// Retrieve an downcaster function from `dyn Database` to `dyn DbView`. /// /// # Panics /// /// If the underlying type of `db` is not the same as the database type this upcasts was created for. pub fn downcaster_for(&self) -> DatabaseDownCaster { let view_type_id = TypeId::of::(); for (_idx, view) in self.view_casters.iter() { if view.target_type_id == view_type_id { // SAFETY: We are unerasing the type erased function pointer having made sure the // TypeId matches. return DatabaseDownCaster(self.source_type_id, unsafe { std::mem::transmute::>( view.cast, ) }); } } panic!( "No downcaster registered for type `{}` in `Views`", std::any::type_name::(), ); } } impl std::fmt::Debug for Views { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("Views") .field("view_casters", &self.view_casters) .finish() } } impl std::fmt::Debug for ViewCaster { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_tuple("DynViewCaster") .field(&self.type_name) .finish() } } salsa-0.23.0/src/zalsa.rs000064400000000000000000000462021046102023000132640ustar 00000000000000use std::any::{Any, TypeId}; use std::hash::BuildHasherDefault; use std::marker::PhantomData; use std::mem; use std::num::NonZeroU32; use std::panic::RefUnwindSafe; use rustc_hash::FxHashMap; use crate::hash::TypeIdHasher; use crate::ingredient::{Ingredient, Jar}; use crate::nonce::{Nonce, NonceGenerator}; use crate::runtime::Runtime; use crate::sync::atomic::{AtomicU64, Ordering}; use crate::sync::{papaya, Mutex, RwLock}; use crate::table::memo::MemoTableWithTypes; use crate::table::Table; use crate::views::Views; use crate::zalsa_local::ZalsaLocal; use crate::{Database, Durability, Id, Revision}; /// Internal plumbing trait. /// /// [`ZalsaDatabase`] is created automatically when [`#[salsa::db]`](`crate::db`) /// is attached to a database struct. it Contains methods that give access /// to the internal data from the `storage` field. /// /// # Safety /// /// The system assumes this is implemented by a salsa procedural macro /// which makes use of private data from the [`Storage`](`crate::storage::Storage`) struct. /// Do not implement this yourself, instead, apply the [`#[salsa::db]`](`crate::db`) macro /// to your database. pub unsafe trait ZalsaDatabase: Any { /// Plumbing method: access both zalsa and zalsa-local at once. /// More efficient if you need both as it does only a single vtable dispatch. #[doc(hidden)] fn zalsas(&self) -> (&Zalsa, &ZalsaLocal) { (self.zalsa(), self.zalsa_local()) } /// Plumbing method: Access the internal salsa methods. #[doc(hidden)] fn zalsa(&self) -> &Zalsa; /// Plumbing method: Access the internal salsa methods for mutating the database. /// /// **WARNING:** Triggers cancellation to other database handles. /// This can lead to deadlock! #[doc(hidden)] fn zalsa_mut(&mut self) -> &mut Zalsa; /// Access the thread-local state associated with this database #[doc(hidden)] fn zalsa_local(&self) -> &ZalsaLocal; /// Clone the database. #[doc(hidden)] fn fork_db(&self) -> Box; } pub fn views(db: &Db) -> &Views { db.zalsa().views() } /// Nonce type representing the underlying database storage. #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] pub struct StorageNonce; // Generator for storage nonces. static NONCE: NonceGenerator = NonceGenerator::new(); /// An ingredient index identifies a particular [`Ingredient`] in the database. /// /// The database contains a number of jars, and each jar contains a number of ingredients. /// Each ingredient is given a unique index as the database is being created. #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] pub struct IngredientIndex(u32); impl IngredientIndex { /// The maximum supported ingredient index. /// /// This reserves one bit for an optional tag. const MAX_INDEX: u32 = 0x7FFF_FFFF; /// Create an ingredient index from a `usize`. pub(crate) fn from(v: usize) -> Self { assert!(v <= Self::MAX_INDEX as usize); Self(v as u32) } /// Convert the ingredient index back into a `u32`. pub(crate) fn as_u32(self) -> u32 { self.0 } pub fn successor(self, index: usize) -> Self { IngredientIndex(self.0 + 1 + index as u32) } /// Returns a new `IngredientIndex` with the tag bit set to the provided value. pub(crate) fn with_tag(mut self, tag: bool) -> IngredientIndex { self.0 &= Self::MAX_INDEX; self.0 |= (tag as u32) << 31; self } /// Returns the value of the tag bit. pub(crate) fn tag(self) -> bool { self.0 & !Self::MAX_INDEX != 0 } } /// A special secondary index *just* for ingredients that attach /// "memos" to salsa structs (currently: just tracked functions). #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] pub struct MemoIngredientIndex(u32); impl MemoIngredientIndex { pub(crate) fn from_usize(u: usize) -> Self { assert!(u <= u32::MAX as usize); MemoIngredientIndex(u as u32) } #[inline] pub(crate) fn as_usize(self) -> usize { self.0 as usize } } /// The "plumbing interface" to the Salsa database. Stores all the ingredients and other data. /// /// **NOT SEMVER STABLE.** pub struct Zalsa { views_of: Views, nonce: Nonce, /// Map from the [`IngredientIndex::as_usize`][] of a salsa struct to a list of /// [ingredient-indices](`IngredientIndex`) for tracked functions that have this salsa struct /// as input. memo_ingredient_indices: RwLock>>, /// Map from the type-id of an `impl Jar` to the index of its first ingredient. jar_map: papaya::HashMap>, /// The write-lock for `jar_map`. jar_map_lock: Mutex<()>, /// A map from the `IngredientIndex` to the `TypeId` of its ID struct. /// /// Notably this is not the reverse mapping of `jar_map`. ingredient_to_id_struct_type_id_map: RwLock>, /// Vector of ingredients. /// /// Immutable unless the mutex on `ingredients_map` is held. ingredients_vec: boxcar::Vec>, /// Indices of ingredients that require reset when a new revision starts. ingredients_requiring_reset: boxcar::Vec, /// The runtime for this particular salsa database handle. /// Each handle gets its own runtime, but the runtimes have shared state between them. runtime: Runtime, event_callback: Option>, } /// All fields on Zalsa are locked behind [`Mutex`]es and [`RwLock`]s and cannot enter /// inconsistent states. The contents of said fields are largely ID mappings, with the exception /// of [`Runtime::dependency_graph`]. However, [`Runtime::dependency_graph`] does not /// invoke any queries and as such there will be no panic from code downstream of Salsa. It can only /// panic if an assertion inside of Salsa fails. impl RefUnwindSafe for Zalsa {} impl Zalsa { pub(crate) fn new( event_callback: Option>, ) -> Self { Self { views_of: Views::new::(), nonce: NONCE.nonce(), jar_map: papaya::HashMap::default(), jar_map_lock: Mutex::default(), ingredient_to_id_struct_type_id_map: Default::default(), ingredients_vec: boxcar::Vec::new(), ingredients_requiring_reset: boxcar::Vec::new(), runtime: Runtime::default(), memo_ingredient_indices: Default::default(), event_callback, } } pub(crate) fn nonce(&self) -> Nonce { self.nonce } pub(crate) fn runtime(&self) -> &Runtime { &self.runtime } pub(crate) fn runtime_mut(&mut self) -> &mut Runtime { &mut self.runtime } /// Returns the [`Table`] used to store the value of salsa structs #[inline] pub(crate) fn table(&self) -> &Table { self.runtime.table() } /// Returns the [`MemoTable`][] for the salsa struct with the given id pub(crate) fn memo_table_for(&self, id: Id) -> MemoTableWithTypes<'_> { let table = self.table(); // SAFETY: We are supplying the correct current revision unsafe { table.memos(id, self.current_revision()) } } #[inline] pub(crate) fn lookup_ingredient(&self, index: IngredientIndex) -> &dyn Ingredient { let index = index.as_u32() as usize; self.ingredients_vec .get(index) .unwrap_or_else(|| panic!("index `{index}` is uninitialized")) .as_ref() } pub(crate) fn ingredient_index_for_memo( &self, struct_ingredient_index: IngredientIndex, memo_ingredient_index: MemoIngredientIndex, ) -> IngredientIndex { self.memo_ingredient_indices.read()[struct_ingredient_index.as_u32() as usize] [memo_ingredient_index.as_usize()] } #[cfg(feature = "salsa_unstable")] pub(crate) fn ingredients(&self) -> impl Iterator { self.ingredients_vec .iter() .map(|(_, ingredient)| ingredient.as_ref()) } /// Starts unwinding the stack if the current revision is cancelled. /// /// This method can be called by query implementations that perform /// potentially expensive computations, in order to speed up propagation of /// cancellation. /// /// Cancellation will automatically be triggered by salsa on any query /// invocation. #[inline] pub(crate) fn unwind_if_revision_cancelled(&self, zalsa_local: &ZalsaLocal) { self.event(&|| crate::Event::new(crate::EventKind::WillCheckCancellation)); if self.runtime().load_cancellation_flag() { zalsa_local.unwind_cancelled(self.current_revision()); } } pub(crate) fn next_memo_ingredient_index( &self, struct_ingredient_index: IngredientIndex, ingredient_index: IngredientIndex, ) -> MemoIngredientIndex { let mut memo_ingredients = self.memo_ingredient_indices.write(); let idx = struct_ingredient_index.as_u32() as usize; let memo_ingredients = if let Some(memo_ingredients) = memo_ingredients.get_mut(idx) { memo_ingredients } else { memo_ingredients.resize_with(idx + 1, Vec::new); memo_ingredients.get_mut(idx).unwrap() }; let mi = MemoIngredientIndex::from_usize(memo_ingredients.len()); memo_ingredients.push(ingredient_index); mi } } /// Semver unstable APIs used by the macro expansions impl Zalsa { /// **NOT SEMVER STABLE** pub fn views(&self) -> &Views { &self.views_of } /// **NOT SEMVER STABLE** #[inline] pub fn lookup_page_type_id(&self, id: Id) -> TypeId { let ingredient_index = self.ingredient_index(id); *self .ingredient_to_id_struct_type_id_map .read() .get(&ingredient_index) .expect("should have the ingredient index available") } /// **NOT SEMVER STABLE** #[doc(hidden)] #[inline] pub fn lookup_jar_by_type(&self) -> JarEntry<'_, J> { let jar_type_id = TypeId::of::(); let guard = self.jar_map.guard(); match self.jar_map.get(&jar_type_id, &guard) { Some(index) => JarEntry::Occupied(index), None => JarEntry::Vacant { guard, zalsa: self, _jar: PhantomData, }, } } #[cold] #[inline(never)] fn add_or_lookup_jar_by_type(&self, guard: &papaya::LocalGuard<'_>) -> IngredientIndex { let jar_type_id = TypeId::of::(); let dependencies = J::create_dependencies(self); let jar_map_lock = self.jar_map_lock.lock(); let index = IngredientIndex::from(self.ingredients_vec.count()); // Someone made it earlier than us. if let Some(index) = self.jar_map.get(&jar_type_id, guard) { return index; }; let ingredients = J::create_ingredients(self, index, dependencies); for ingredient in ingredients { let expected_index = ingredient.ingredient_index(); if ingredient.requires_reset_for_new_revision() { self.ingredients_requiring_reset.push(expected_index); } let actual_index = self.ingredients_vec.push(ingredient); assert_eq!( expected_index.as_u32() as usize, actual_index, "ingredient `{:?}` was predicted to have index `{:?}` but actually has index `{:?}`", self.ingredients_vec[actual_index], expected_index.as_u32(), actual_index, ); } // Insert the index after all ingredients are inserted to avoid exposing // partially initialized jars to readers. self.jar_map.insert(jar_type_id, index, guard); drop(jar_map_lock); self.ingredient_to_id_struct_type_id_map .write() .insert(index, J::id_struct_type_id()); index } /// **NOT SEMVER STABLE** #[doc(hidden)] pub fn lookup_ingredient_mut( &mut self, index: IngredientIndex, ) -> (&mut dyn Ingredient, &mut Runtime) { let index = index.as_u32() as usize; let ingredient = self .ingredients_vec .get_mut(index) .unwrap_or_else(|| panic!("index `{index}` is uninitialized")); (ingredient.as_mut(), &mut self.runtime) } /// **NOT SEMVER STABLE** #[doc(hidden)] #[inline] pub fn current_revision(&self) -> Revision { self.runtime.current_revision() } /// **NOT SEMVER STABLE** #[doc(hidden)] #[inline] pub fn last_changed_revision(&self, durability: Durability) -> Revision { self.runtime.last_changed_revision(durability) } /// **NOT SEMVER STABLE** /// Triggers a new revision. #[doc(hidden)] pub fn new_revision(&mut self) -> Revision { let new_revision = self.runtime.new_revision(); let _span = tracing::debug_span!("new_revision", ?new_revision).entered(); for (_, index) in self.ingredients_requiring_reset.iter() { let index = index.as_u32() as usize; let ingredient = self .ingredients_vec .get_mut(index) .unwrap_or_else(|| panic!("index `{index}` is uninitialized")); ingredient.reset_for_new_revision(self.runtime.table_mut()); } new_revision } /// **NOT SEMVER STABLE** #[doc(hidden)] pub fn evict_lru(&mut self) { let _span = tracing::debug_span!("evict_lru").entered(); for (_, index) in self.ingredients_requiring_reset.iter() { let index = index.as_u32() as usize; self.ingredients_vec .get_mut(index) .unwrap_or_else(|| panic!("index `{index}` is uninitialized")) .reset_for_new_revision(self.runtime.table_mut()); } } #[inline] pub fn ingredient_index(&self, id: Id) -> IngredientIndex { self.table().ingredient_index(id) } #[inline(always)] pub fn event(&self, event: &dyn Fn() -> crate::Event) { if let Some(event_callback) = &self.event_callback { event_callback(event()); } } } pub enum JarEntry<'a, J> { Occupied(IngredientIndex), Vacant { zalsa: &'a Zalsa, guard: papaya::LocalGuard<'a>, _jar: PhantomData, }, } impl JarEntry<'_, J> where J: Jar, { #[inline] pub fn get(&self) -> Option { match *self { JarEntry::Occupied(index) => Some(index), JarEntry::Vacant { .. } => None, } } #[inline] pub fn get_or_create(&self) -> IngredientIndex { match self { JarEntry::Occupied(index) => *index, JarEntry::Vacant { zalsa, guard, _jar } => zalsa.add_or_lookup_jar_by_type::(guard), } } } /// Caches a pointer to an ingredient in a database. /// Optimized for the case of a single database. pub struct IngredientCache where I: Ingredient, { // A packed representation of `Option<(Nonce, IngredientIndex)>`. // // This allows us to replace a lock in favor of an atomic load. This works thanks to `Nonce` // having a niche, which means the entire type can fit into an `AtomicU64`. cached_data: AtomicU64, phantom: PhantomData I>, } impl Default for IngredientCache where I: Ingredient, { fn default() -> Self { Self::new() } } impl IngredientCache where I: Ingredient, { const UNINITIALIZED: u64 = 0; /// Create a new cache pub const fn new() -> Self { Self { cached_data: AtomicU64::new(Self::UNINITIALIZED), phantom: PhantomData, } } /// Get a reference to the ingredient in the database. /// If the ingredient is not already in the cache, it will be created. #[inline(always)] pub fn get_or_create<'db>( &self, zalsa: &'db Zalsa, create_index: impl Fn() -> IngredientIndex, ) -> &'db I { let index = self.get_or_create_index(zalsa, create_index); zalsa.lookup_ingredient(index).assert_type::() } /// Get a reference to the ingredient in the database. /// If the ingredient is not already in the cache, it will be created. #[inline(always)] pub fn get_or_create_index( &self, zalsa: &Zalsa, create_index: impl Fn() -> IngredientIndex, ) -> IngredientIndex { const _: () = assert!( mem::size_of::<(Nonce, IngredientIndex)>() == mem::size_of::() ); let cached_data = self.cached_data.load(Ordering::Acquire); if cached_data == Self::UNINITIALIZED { #[cold] #[inline(never)] fn get_or_create_index_slow( this: &IngredientCache, zalsa: &Zalsa, create_index: impl Fn() -> IngredientIndex, ) -> IngredientIndex { let index = create_index(); let nonce = zalsa.nonce().into_u32().get() as u64; let packed = (nonce << u32::BITS) | (index.as_u32() as u64); debug_assert_ne!(packed, IngredientCache::::UNINITIALIZED); // Discard the result, whether we won over the cache or not does not matter // we know that something has been cached now _ = this.cached_data.compare_exchange( IngredientCache::::UNINITIALIZED, packed, Ordering::Release, Ordering::Acquire, ); // and we already have our index computed so we can just use that index } return get_or_create_index_slow(self, zalsa, create_index); }; // unpack our u64 // SAFETY: We've checked against `UNINITIALIZED` (0) above and so the upper bits must be non-zero let nonce = Nonce::::from_u32(unsafe { NonZeroU32::new_unchecked((cached_data >> u32::BITS) as u32) }); let mut index = IngredientIndex(cached_data as u32); if zalsa.nonce() != nonce { index = create_index(); } index } } /// Given a wide pointer `T`, extracts the data pointer (typed as `U`). /// /// # Safety /// /// `U` must be correct type for the data pointer. pub unsafe fn transmute_data_ptr(t: &T) -> &U { let t: *const T = t; let u: *const U = t as *const U; // SAFETY: the caller must guarantee that `T` is a wide pointer for `U` unsafe { &*u } } /// Given a wide pointer `T`, extracts the data pointer (typed as `U`). /// /// # Safety /// /// `U` must be correct type for the data pointer. pub(crate) unsafe fn transmute_data_mut_ptr(t: &mut T) -> &mut U { let t: *mut T = t; let u: *mut U = t as *mut U; // SAFETY: the caller must guarantee that `T` is a wide pointer for `U` unsafe { &mut *u } } salsa-0.23.0/src/zalsa_local.rs000064400000000000000000001056521046102023000144430ustar 00000000000000use std::cell::RefCell; use std::panic::UnwindSafe; use std::ptr::{self, NonNull}; use rustc_hash::FxHashMap; use thin_vec::ThinVec; use tracing::debug; use crate::accumulator::accumulated_map::{AccumulatedMap, AtomicInputAccumulatedValues}; use crate::active_query::QueryStack; use crate::cycle::{empty_cycle_heads, CycleHeads, IterationCount}; use crate::durability::Durability; use crate::key::DatabaseKeyIndex; use crate::runtime::Stamp; use crate::sync::atomic::AtomicBool; use crate::table::{PageIndex, Slot, Table}; use crate::tracked_struct::{Disambiguator, Identity, IdentityHash, IdentityMap}; use crate::zalsa::{IngredientIndex, Zalsa}; use crate::{Accumulator, Cancelled, Id, Revision}; /// State that is specific to a single execution thread. /// /// Internally, this type uses ref-cells. /// /// **Note also that all mutations to the database handle (and hence /// to the local-state) must be undone during unwinding.** pub struct ZalsaLocal { /// Vector of active queries. /// /// Unwinding note: pushes onto this vector must be popped -- even /// during unwinding. query_stack: RefCell, /// Stores the most recent page for a given ingredient. /// This is thread-local to avoid contention. most_recent_pages: RefCell>, } impl ZalsaLocal { pub(crate) fn new() -> Self { ZalsaLocal { query_stack: RefCell::new(QueryStack::default()), most_recent_pages: RefCell::new(FxHashMap::default()), } } pub(crate) fn record_unfilled_pages(&mut self, table: &Table) { let most_recent_pages = self.most_recent_pages.get_mut(); most_recent_pages .drain() .for_each(|(ingredient, page)| table.record_unfilled_page(ingredient, page)); } /// Allocate a new id in `table` for the given ingredient /// storing `value`. Remembers the most recent page from this /// thread and attempts to reuse it. pub(crate) fn allocate( &self, zalsa: &Zalsa, ingredient: IngredientIndex, mut value: impl FnOnce(Id) -> T, ) -> Id { let memo_types = || { zalsa .lookup_ingredient(ingredient) .memo_table_types() .clone() }; // Find the most recent page, pushing a page if needed let mut page = *self .most_recent_pages .borrow_mut() .entry(ingredient) .or_insert_with(|| { zalsa .table() .fetch_or_push_page::(ingredient, memo_types) }); loop { // Try to allocate an entry on that page let page_ref = zalsa.table().page::(page); match page_ref.allocate(page, value) { // If successful, return Ok(id) => return id, // Otherwise, create a new page and try again // Note that we could try fetching a page again, but as we just filled one up // it is unlikely that there is a non-full one available. Err(v) => { value = v; page = zalsa.table().push_page::(ingredient, memo_types()); self.most_recent_pages.borrow_mut().insert(ingredient, page); } } } } #[inline] pub(crate) fn push_query( &self, database_key_index: DatabaseKeyIndex, iteration_count: IterationCount, ) -> ActiveQueryGuard<'_> { let mut query_stack = self.query_stack.borrow_mut(); query_stack.push_new_query(database_key_index, iteration_count); ActiveQueryGuard { local_state: self, database_key_index, #[cfg(debug_assertions)] push_len: query_stack.len(), } } /// Executes a closure within the context of the current active query stacks (mutable). #[inline(always)] pub(crate) fn with_query_stack_mut( &self, c: impl UnwindSafe + FnOnce(&mut QueryStack) -> R, ) -> R { c(&mut self.query_stack.borrow_mut()) } #[inline(always)] pub(crate) fn with_query_stack(&self, c: impl UnwindSafe + FnOnce(&QueryStack) -> R) -> R { c(&mut self.query_stack.borrow()) } #[inline(always)] pub(crate) fn try_with_query_stack( &self, c: impl UnwindSafe + FnOnce(&QueryStack) -> R, ) -> Option { self.query_stack .try_borrow() .ok() .as_ref() .map(|stack| c(stack)) } /// Returns the index of the active query along with its *current* durability/changed-at /// information. As the query continues to execute, naturally, that information may change. pub(crate) fn active_query(&self) -> Option<(DatabaseKeyIndex, Stamp)> { self.with_query_stack(|stack| { stack .last() .map(|active_query| (active_query.database_key_index, active_query.stamp())) }) } /// Add an output to the current query's list of dependencies /// /// Returns `Err` if not in a query. pub(crate) fn accumulate( &self, index: IngredientIndex, value: A, ) -> Result<(), ()> { self.with_query_stack_mut(|stack| { if let Some(top_query) = stack.last_mut() { top_query.accumulate(index, value); Ok(()) } else { Err(()) } }) } /// Add an output to the current query's list of dependencies pub(crate) fn add_output(&self, entity: DatabaseKeyIndex) { self.with_query_stack_mut(|stack| { if let Some(top_query) = stack.last_mut() { top_query.add_output(entity) } }) } /// Check whether `entity` is an output of the currently active query (if any) pub(crate) fn is_output_of_active_query(&self, entity: DatabaseKeyIndex) -> bool { self.with_query_stack_mut(|stack| { if let Some(top_query) = stack.last_mut() { top_query.is_output(entity) } else { false } }) } /// Register that currently active query reads the given input #[inline(always)] pub(crate) fn report_tracked_read( &self, input: DatabaseKeyIndex, durability: Durability, changed_at: Revision, has_accumulated: bool, accumulated_inputs: &AtomicInputAccumulatedValues, cycle_heads: &CycleHeads, ) { debug!( "report_tracked_read(input={:?}, durability={:?}, changed_at={:?})", input, durability, changed_at ); self.with_query_stack_mut(|stack| { if let Some(top_query) = stack.last_mut() { top_query.add_read( input, durability, changed_at, has_accumulated, accumulated_inputs, cycle_heads, ); } }) } /// Register that currently active query reads the given input #[inline(always)] pub(crate) fn report_tracked_read_simple( &self, input: DatabaseKeyIndex, durability: Durability, changed_at: Revision, ) { debug!( "report_tracked_read(input={:?}, durability={:?}, changed_at={:?})", input, durability, changed_at ); self.with_query_stack_mut(|stack| { if let Some(top_query) = stack.last_mut() { top_query.add_read_simple(input, durability, changed_at); } }) } /// Register that the current query read an untracked value /// /// # Parameters /// /// * `current_revision`, the current revision #[inline(always)] pub(crate) fn report_untracked_read(&self, current_revision: Revision) { self.with_query_stack_mut(|stack| { if let Some(top_query) = stack.last_mut() { top_query.add_untracked_read(current_revision); } }) } /// Update the top query on the stack to act as though it read a value /// of durability `durability` which changed in `revision`. // FIXME: Use or remove this. #[allow(dead_code)] pub(crate) fn report_synthetic_read(&self, durability: Durability, revision: Revision) { self.with_query_stack_mut(|stack| { if let Some(top_query) = stack.last_mut() { top_query.add_synthetic_read(durability, revision); } }) } /// Called when the active queries creates an index from the /// entity table with the index `entity_index`. Has the following effects: /// /// * Add a query read on `DatabaseKeyIndex::for_table(entity_index)` /// * Identify a unique disambiguator for the hash within the current query, /// adding the hash to the current query's disambiguator table. /// * Returns a tuple of: /// * the id of the current query /// * the current dependencies (durability, changed_at) of current query /// * the disambiguator index #[track_caller] pub(crate) fn disambiguate(&self, key: IdentityHash) -> (Stamp, Disambiguator) { self.with_query_stack_mut(|stack| { let top_query = stack.last_mut().expect( "cannot create a tracked struct disambiguator outside of a tracked function", ); let disambiguator = top_query.disambiguate(key); (top_query.stamp(), disambiguator) }) } #[track_caller] pub(crate) fn tracked_struct_id(&self, identity: &Identity) -> Option { self.with_query_stack(|stack| { let top_query = stack .last() .expect("cannot create a tracked struct ID outside of a tracked function"); top_query.tracked_struct_ids().get(identity) }) } #[track_caller] pub(crate) fn store_tracked_struct_id(&self, identity: Identity, id: Id) { self.with_query_stack_mut(|stack| { let top_query = stack .last_mut() .expect("cannot store a tracked struct ID outside of a tracked function"); top_query.tracked_struct_ids_mut().insert(identity, id); }) } #[cold] pub(crate) fn unwind_cancelled(&self, current_revision: Revision) { // Why is this reporting an untracked read? We do not store the query revisions on unwind do we? self.report_untracked_read(current_revision); Cancelled::PendingWrite.throw(); } } // Okay to implement as `ZalsaLocal`` is !Sync // - `most_recent_pages` can't observe broken states as we cannot panic such that we enter an // inconsistent state // - neither can `query_stack` as we require the closures accessing it to be `UnwindSafe` impl std::panic::RefUnwindSafe for ZalsaLocal {} /// Summarizes "all the inputs that a query used" /// and "all the outputs it has written to" #[derive(Debug)] // #[derive(Clone)] cloning this is expensive, so we don't derive pub(crate) struct QueryRevisions { /// The most revision in which some input changed. pub(crate) changed_at: Revision, /// Minimum durability of the inputs to this query. pub(crate) durability: Durability, /// How was this query computed? pub(crate) origin: QueryOrigin, /// [`InputAccumulatedValues::Empty`] if any input read during the query's execution /// has any direct or indirect accumulated values. /// /// Note that this field could be in `QueryRevisionsExtra` as it is only relevant /// for accumulators, but we get it for free anyways due to padding. pub(super) accumulated_inputs: AtomicInputAccumulatedValues, /// Are the `cycle_heads` verified to not be provisional anymore? /// /// Note that this field could be in `QueryRevisionsExtra` as it is only /// relevant for queries that participate in a cycle, but we get it for /// free anyways due to padding. pub(super) verified_final: AtomicBool, /// Lazily allocated state. pub(super) extra: QueryRevisionsExtra, } impl QueryRevisions { #[cfg(feature = "salsa_unstable")] pub(crate) fn allocation_size(&self) -> usize { let QueryRevisions { changed_at: _, durability: _, accumulated_inputs: _, verified_final: _, origin, extra, } = self; let mut memory = 0; if let QueryOriginRef::Derived(query_edges) | QueryOriginRef::DerivedUntracked(query_edges) = origin.as_ref() { memory += std::mem::size_of_val(query_edges); } if let Some(extra) = extra.0.as_ref() { memory += std::mem::size_of::(); memory += extra.allocation_size(); } memory } } /// Data on `QueryRevisions` that is lazily allocated to save memory /// in the common case. /// /// In particular, not all queries create tracked structs, participate /// in cycles, or create accumulators. #[derive(Debug, Default)] pub(crate) struct QueryRevisionsExtra(Option>); impl QueryRevisionsExtra { pub fn new( accumulated: AccumulatedMap, tracked_struct_ids: IdentityMap, cycle_heads: CycleHeads, iteration: IterationCount, ) -> Self { let inner = if tracked_struct_ids.is_empty() && cycle_heads.is_empty() && accumulated.is_empty() && iteration.is_initial() { None } else { Some(Box::new(QueryRevisionsExtraInner { accumulated, cycle_heads, tracked_struct_ids: tracked_struct_ids.into_thin_vec(), iteration, })) }; Self(inner) } } #[derive(Debug)] struct QueryRevisionsExtraInner { accumulated: AccumulatedMap, /// The ids of tracked structs created by this query. /// /// This table plays an important role when queries are /// re-executed: /// * A clone of this field is used as the initial set of /// `TrackedStructId`s for the query on the next execution. /// * The query will thus re-use the same ids if it creates /// tracked structs with the same `KeyStruct` as before. /// It may also create new tracked structs. /// * One tricky case involves deleted structs. If /// the old revision created a struct S but the new /// revision did not, there will still be a map entry /// for S. This is because queries only ever grow the map /// and they start with the same entries as from the /// previous revision. To handle this, `diff_outputs` compares /// the structs from the old/new revision and retains /// only entries that appeared in the new revision. tracked_struct_ids: ThinVec<(Identity, Id)>, /// This result was computed based on provisional values from /// these cycle heads. The "cycle head" is the query responsible /// for managing a fixpoint iteration. In a cycle like /// `--> A --> B --> C --> A`, the cycle head is query `A`: it is /// the query whose value is requested while it is executing, /// which must provide the initial provisional value and decide, /// after each iteration, whether the cycle has converged or must /// iterate again. cycle_heads: CycleHeads, iteration: IterationCount, } impl QueryRevisionsExtraInner { #[cfg(feature = "salsa_unstable")] fn allocation_size(&self) -> usize { let QueryRevisionsExtraInner { accumulated, tracked_struct_ids, cycle_heads, iteration: _, } = self; accumulated.allocation_size() + cycle_heads.allocation_size() + std::mem::size_of_val(tracked_struct_ids.as_slice()) } } #[cfg(not(feature = "shuttle"))] #[cfg(target_pointer_width = "64")] const _: [(); std::mem::size_of::()] = [(); std::mem::size_of::<[usize; 4]>()]; #[cfg(not(feature = "shuttle"))] #[cfg(target_pointer_width = "64")] const _: [(); std::mem::size_of::()] = [(); std::mem::size_of::<[usize; 7]>()]; impl QueryRevisions { pub(crate) fn fixpoint_initial(query: DatabaseKeyIndex) -> Self { Self { changed_at: Revision::start(), durability: Durability::MAX, origin: QueryOrigin::fixpoint_initial(), accumulated_inputs: Default::default(), verified_final: AtomicBool::new(false), extra: QueryRevisionsExtra::new( AccumulatedMap::default(), IdentityMap::default(), CycleHeads::initial(query), IterationCount::initial(), ), } } /// Returns a reference to the `AccumulatedMap` for this query, or `None` if the map is empty. pub(crate) fn accumulated(&self) -> Option<&AccumulatedMap> { self.extra .0 .as_ref() .map(|extra| &extra.accumulated) .filter(|map| !map.is_empty()) } /// Returns a reference to the `CycleHeads` for this query. pub(crate) fn cycle_heads(&self) -> &CycleHeads { match &self.extra.0 { Some(extra) => &extra.cycle_heads, None => empty_cycle_heads(), } } /// Returns a mutable reference to the `CycleHeads` for this query, or `None` if the list is empty. pub(crate) fn cycle_heads_mut(&mut self) -> Option<&mut CycleHeads> { self.extra .0 .as_mut() .map(|extra| &mut extra.cycle_heads) .filter(|cycle_heads| !cycle_heads.is_empty()) } /// Sets the `CycleHeads` for this query. pub(crate) fn set_cycle_heads(&mut self, cycle_heads: CycleHeads) { match &mut self.extra.0 { Some(extra) => extra.cycle_heads = cycle_heads, None => { self.extra = QueryRevisionsExtra::new( AccumulatedMap::default(), IdentityMap::default(), cycle_heads, IterationCount::default(), ); } }; } pub(crate) const fn iteration(&self) -> IterationCount { match &self.extra.0 { Some(extra) => extra.iteration, None => IterationCount::initial(), } } /// Updates the iteration count if this query has any cycle heads. Otherwise it's a no-op. pub(crate) fn update_iteration_count(&mut self, iteration_count: IterationCount) { if let Some(extra) = &mut self.extra.0 { extra.iteration = iteration_count } } /// Returns a reference to the `IdentityMap` for this query, or `None` if the map is empty. pub fn tracked_struct_ids(&self) -> Option<&[(Identity, Id)]> { self.extra .0 .as_ref() .map(|extra| &*extra.tracked_struct_ids) .filter(|tracked_struct_ids| !tracked_struct_ids.is_empty()) } /// Returns a mutable reference to the `IdentityMap` for this query, or `None` if the map is empty. pub fn tracked_struct_ids_mut(&mut self) -> Option<&mut ThinVec<(Identity, Id)>> { self.extra .0 .as_mut() .map(|extra| &mut extra.tracked_struct_ids) .filter(|tracked_struct_ids| !tracked_struct_ids.is_empty()) } } /// Tracks the way that a memoized value for a query was created. /// /// This is a read-only reference to a `PackedQueryOrigin`. #[derive(Debug, Clone, Copy)] #[repr(u8)] pub enum QueryOriginRef<'a> { /// The value was assigned as the output of another query (e.g., using `specify`). /// The `DatabaseKeyIndex` is the identity of the assigning query. Assigned(DatabaseKeyIndex) = QueryOriginKind::Assigned as u8, /// The value was derived by executing a function /// and we were able to track ALL of that function's inputs. /// Those inputs are described in [`QueryEdges`]. Derived(&'a [QueryEdge]) = QueryOriginKind::Derived as u8, /// The value was derived by executing a function /// but that function also reported that it read untracked inputs. /// The [`QueryEdges`] argument contains a listing of all the inputs we saw /// (but we know there were more). DerivedUntracked(&'a [QueryEdge]) = QueryOriginKind::DerivedUntracked as u8, /// The value is an initial provisional value for a query that supports fixpoint iteration. FixpointInitial = QueryOriginKind::FixpointInitial as u8, } impl<'a> QueryOriginRef<'a> { /// Indices for queries *read* by this query #[inline] pub(crate) fn inputs(self) -> impl DoubleEndedIterator + use<'a> { let opt_edges = match self { QueryOriginRef::Derived(edges) | QueryOriginRef::DerivedUntracked(edges) => Some(edges), QueryOriginRef::Assigned(_) | QueryOriginRef::FixpointInitial => None, }; opt_edges.into_iter().flat_map(input_edges) } /// Indices for queries *written* by this query (if any) pub(crate) fn outputs(self) -> impl DoubleEndedIterator + use<'a> { let opt_edges = match self { QueryOriginRef::Derived(edges) | QueryOriginRef::DerivedUntracked(edges) => Some(edges), QueryOriginRef::Assigned(_) | QueryOriginRef::FixpointInitial => None, }; opt_edges.into_iter().flat_map(output_edges) } #[inline] pub(crate) fn edges(self) -> &'a [QueryEdge] { let opt_edges = match self { QueryOriginRef::Derived(edges) | QueryOriginRef::DerivedUntracked(edges) => Some(edges), QueryOriginRef::Assigned(_) | QueryOriginRef::FixpointInitial => None, }; opt_edges.unwrap_or_default() } } // Note: The discriminant assignment is intentional, // we want to group `Derived` and `DerivedUntracked` together on a same bit (the second LSB) // as we tend to match against both of them in the same branch. #[derive(Clone, Copy)] #[repr(u8)] enum QueryOriginKind { /// An initial provisional value. /// /// This will occur occur in queries that support fixpoint iteration. FixpointInitial = 0b00, /// The value was assigned as the output of another query. /// /// This can, for example, can occur when `specify` is used. Assigned = 0b01, /// The value was derived by executing a function /// _and_ Salsa was able to track all of said function's inputs. Derived = 0b11, /// The value was derived by executing a function /// but that function also reported that it read untracked inputs. DerivedUntracked = 0b10, } /// Tracks how a memoized value for a given query was created. /// /// This type is a manual enum packed to 13 bytes to reduce the size of `QueryRevisions`. #[repr(Rust, packed)] pub struct QueryOrigin { /// The tag of this enum. /// /// Note that this tag only requires two bits and could likely be packed into /// some other field. However, we get this byte for free thanks to alignment. kind: QueryOriginKind, /// The data portion of this enum. data: QueryOriginData, /// The metadata of this enum. /// /// For `QueryOriginKind::Derived` and `QueryOriginKind::DerivedUntracked`, this /// is the length of the `input_outputs` allocation. /// /// For `QueryOriginKind::Assigned`, this is the `IngredientIndex` of assigning query. /// Combined with the `Id` data, this forms a complete `DatabaseKeyIndex`. /// /// For `QueryOriginKind::FixpointInitial`, this field is zero. metadata: u32, } /// The data portion of `PackedQueryOrigin`. union QueryOriginData { /// Query edges for `QueryOriginKind::Derived` or `QueryOriginKind::DerivedUntracked`. /// /// The query edges are between a memoized value and other queries in the dependency graph, /// including both dependency edges (e.g., when creating the memoized value for Q0 /// executed another function Q1) and output edges (e.g., when Q0 specified the value /// for another query Q2). /// /// Note that we always track input dependencies even when there are untracked reads. /// Untracked reads mean that Salsa can't verify values, so the list of inputs is unused. /// However, Salsa still uses these edges to find the transitive inputs to an accumulator. /// /// You can access the input/output list via the methods [`inputs`] and [`outputs`] respectively. /// /// Important: /// /// * The inputs must be in **execution order** for the red-green algorithm to work. input_outputs: NonNull, /// The identity of the assigning query for `QueryOriginKind::Assigned`. index: Id, /// `QueryOriginKind::FixpointInitial` holds no data. empty: (), } /// SAFETY: The `input_outputs` pointer is owned and not accessed or shared concurrently. unsafe impl Send for QueryOriginData {} /// SAFETY: Same as above. unsafe impl Sync for QueryOriginData {} impl QueryOrigin { /// Create a query origin of type `QueryOriginKind::FixpointInitial`. pub fn fixpoint_initial() -> QueryOrigin { QueryOrigin { kind: QueryOriginKind::FixpointInitial, metadata: 0, data: QueryOriginData { empty: () }, } } /// Create a query origin of type `QueryOriginKind::Derived`, with the given edges. pub fn derived(input_outputs: impl IntoIterator) -> QueryOrigin { let input_outputs = input_outputs.into_iter().collect::>(); // Exceeding `u32::MAX` query edges should never happen in real-world usage. let length = u32::try_from(input_outputs.len()) .expect("exceeded more than `u32::MAX` query edges; this should never happen."); // SAFETY: `Box::into_raw` returns a non-null pointer. let input_outputs = unsafe { NonNull::new_unchecked(Box::into_raw(input_outputs).cast::()) }; QueryOrigin { kind: QueryOriginKind::Derived, metadata: length, data: QueryOriginData { input_outputs }, } } /// Create a query origin of type `QueryOriginKind::DerivedUntracked`, with the given edges. pub fn derived_untracked(input_outputs: impl IntoIterator) -> QueryOrigin { let mut origin = QueryOrigin::derived(input_outputs); origin.kind = QueryOriginKind::DerivedUntracked; origin } /// Create a query origin of type `QueryOriginKind::Assigned`, with the given key. pub fn assigned(key: DatabaseKeyIndex) -> QueryOrigin { QueryOrigin { kind: QueryOriginKind::Assigned, metadata: key.ingredient_index().as_u32(), data: QueryOriginData { index: key.key_index(), }, } } /// Return a read-only reference to this query origin. pub fn as_ref(&self) -> QueryOriginRef<'_> { match self.kind { QueryOriginKind::Assigned => { // SAFETY: `data.index` is initialized when the tag is `QueryOriginKind::Assigned`. let index = unsafe { self.data.index }; let ingredient_index = IngredientIndex::from(self.metadata as usize); QueryOriginRef::Assigned(DatabaseKeyIndex::new(ingredient_index, index)) } QueryOriginKind::Derived => { // SAFETY: `data.input_outputs` is initialized when the tag is `QueryOriginKind::Derived`. let input_outputs = unsafe { self.data.input_outputs }; let length = self.metadata as usize; // SAFETY: `input_outputs` and `self.metadata` form a valid slice when the // tag is `QueryOriginKind::Derived`. let input_outputs = unsafe { std::slice::from_raw_parts(input_outputs.as_ptr(), length) }; QueryOriginRef::Derived(input_outputs) } QueryOriginKind::DerivedUntracked => { // SAFETY: `data.input_outputs` is initialized when the tag is `QueryOriginKind::DerivedUntracked`. let input_outputs = unsafe { self.data.input_outputs }; let length = self.metadata as usize; // SAFETY: `input_outputs` and `self.metadata` form a valid slice when the // tag is `QueryOriginKind::DerivedUntracked`. let input_outputs = unsafe { std::slice::from_raw_parts(input_outputs.as_ptr(), length) }; QueryOriginRef::DerivedUntracked(input_outputs) } QueryOriginKind::FixpointInitial => QueryOriginRef::FixpointInitial, } } } impl Drop for QueryOrigin { fn drop(&mut self) { match self.kind { QueryOriginKind::Derived | QueryOriginKind::DerivedUntracked => { // SAFETY: `data.input_outputs` is initialized when the tag is `QueryOriginKind::Derived` // or `QueryOriginKind::DerivedUntracked`. let input_outputs = unsafe { self.data.input_outputs }; let length = self.metadata as usize; // SAFETY: `input_outputs` and `self.metadata` form a valid slice when the // tag is `QueryOriginKind::DerivedUntracked` or `QueryOriginKind::DerivedUntracked`, // and we have `&mut self`. let _input_outputs: Box<[QueryEdge]> = unsafe { Box::from_raw(ptr::slice_from_raw_parts_mut( input_outputs.as_ptr(), length, )) }; } // The data stored for this variants is `Copy`. QueryOriginKind::FixpointInitial | QueryOriginKind::Assigned => {} } } } impl std::fmt::Debug for QueryOrigin { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { self.as_ref().fmt(f) } } /// An input or output query edge. /// /// This type is a packed version of `QueryEdgeKind`, tagging the `IngredientIndex` /// in `key` with a discriminator for the input and output variants without increasing /// the size of the type. Notably, this type is 12 bytes as opposed to the 16 byte /// `QueryEdgeKind`, which is meaningful as inputs and outputs are stored contiguously. #[derive(Copy, Clone, PartialEq, Eq, Hash)] pub struct QueryEdge { key: DatabaseKeyIndex, } impl QueryEdge { /// Create an input query edge with the given index. pub fn input(key: DatabaseKeyIndex) -> QueryEdge { Self { key } } /// Create an output query edge with the given index. pub fn output(key: DatabaseKeyIndex) -> QueryEdge { let ingredient_index = key.ingredient_index().with_tag(true); Self { key: DatabaseKeyIndex::new(ingredient_index, key.key_index()), } } /// Returns the kind of this query edge. pub fn kind(self) -> QueryEdgeKind { // Clear the tag to restore the original index. let untagged = DatabaseKeyIndex::new( self.key.ingredient_index().with_tag(false), self.key.key_index(), ); if self.key.ingredient_index().tag() { QueryEdgeKind::Output(untagged) } else { QueryEdgeKind::Input(untagged) } } } impl std::fmt::Debug for QueryEdge { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { self.kind().fmt(f) } } #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] pub enum QueryEdgeKind { Input(DatabaseKeyIndex), Output(DatabaseKeyIndex), } /// Returns the (tracked) inputs that were executed in computing this memoized value. /// /// These will always be in execution order. pub(crate) fn input_edges( input_outputs: &[QueryEdge], ) -> impl DoubleEndedIterator + use<'_> { input_outputs.iter().filter_map(|&edge| match edge.kind() { QueryEdgeKind::Input(dependency_index) => Some(dependency_index), QueryEdgeKind::Output(_) => None, }) } /// Returns the (tracked) outputs that were executed in computing this memoized value. /// /// These will always be in execution order. pub(crate) fn output_edges( input_outputs: &[QueryEdge], ) -> impl DoubleEndedIterator + use<'_> { input_outputs.iter().filter_map(|&edge| match edge.kind() { QueryEdgeKind::Output(dependency_index) => Some(dependency_index), QueryEdgeKind::Input(_) => None, }) } /// When a query is pushed onto the `active_query` stack, this guard /// is returned to represent its slot. The guard can be used to pop /// the query from the stack -- in the case of unwinding, the guard's /// destructor will also remove the query. pub(crate) struct ActiveQueryGuard<'me> { local_state: &'me ZalsaLocal, #[cfg(debug_assertions)] push_len: usize, pub(crate) database_key_index: DatabaseKeyIndex, } impl ActiveQueryGuard<'_> { /// Initialize the tracked struct ids with the values from the prior execution. pub(crate) fn seed_tracked_struct_ids(&self, tracked_struct_ids: &[(Identity, Id)]) { self.local_state.with_query_stack_mut(|stack| { #[cfg(debug_assertions)] assert_eq!(stack.len(), self.push_len); let frame = stack.last_mut().unwrap(); assert!(frame.tracked_struct_ids().is_empty()); frame .tracked_struct_ids_mut() .clone_from_slice(tracked_struct_ids); }) } /// Append the given `outputs` to the query's output list. pub(crate) fn seed_iteration(&self, previous: &QueryRevisions) { let durability = previous.durability; let changed_at = previous.changed_at; let edges = previous.origin.as_ref().edges(); let untracked_read = matches!( previous.origin.as_ref(), QueryOriginRef::DerivedUntracked(_) ); self.local_state.with_query_stack_mut(|stack| { #[cfg(debug_assertions)] assert_eq!(stack.len(), self.push_len); let frame = stack.last_mut().unwrap(); frame.seed_iteration(durability, changed_at, edges, untracked_read); }) } /// Invoked when the query has successfully completed execution. fn complete(self) -> QueryRevisions { let query = self.local_state.with_query_stack_mut(|stack| { stack.pop_into_revisions( self.database_key_index, #[cfg(debug_assertions)] self.push_len, ) }); std::mem::forget(self); query } /// Pops an active query from the stack. Returns the [`QueryRevisions`] /// which summarizes the other queries that were accessed during this /// query's execution. #[inline] pub(crate) fn pop(self) -> QueryRevisions { self.complete() } } impl Drop for ActiveQueryGuard<'_> { fn drop(&mut self) { self.local_state.with_query_stack_mut(|stack| { stack.pop( self.database_key_index, #[cfg(debug_assertions)] self.push_len, ); }); } } salsa-0.23.0/tests/accumulate-chain.rs000064400000000000000000000022421046102023000157240ustar 00000000000000//! Test that when having nested tracked functions //! we don't drop any values when accumulating. mod common; use expect_test::expect; use salsa::{Accumulator, Database, DatabaseImpl}; use test_log::test; #[salsa::accumulator] #[derive(Debug)] struct Log(#[allow(dead_code)] String); #[salsa::tracked] fn push_logs(db: &dyn Database) { push_a_logs(db); } #[salsa::tracked] fn push_a_logs(db: &dyn Database) { Log("log a".to_string()).accumulate(db); push_b_logs(db); } #[salsa::tracked] fn push_b_logs(db: &dyn Database) { // No logs push_c_logs(db); } #[salsa::tracked] fn push_c_logs(db: &dyn Database) { // No logs push_d_logs(db); } #[salsa::tracked] fn push_d_logs(db: &dyn Database) { Log("log d".to_string()).accumulate(db); } #[test] fn accumulate_chain() { DatabaseImpl::new().attach(|db| { let logs = push_logs::accumulated::(db); // Check that we get all the logs. expect![[r#" [ Log( "log a", ), Log( "log d", ), ]"#]] .assert_eq(&format!("{logs:#?}")); }) } salsa-0.23.0/tests/accumulate-custom-debug.rs000064400000000000000000000017101046102023000172370ustar 00000000000000mod common; use expect_test::expect; use salsa::{Accumulator, Database}; use test_log::test; #[salsa::input(debug)] struct MyInput { count: u32, } #[salsa::accumulator] struct Log(String); impl std::fmt::Debug for Log { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_tuple("CustomLog").field(&self.0).finish() } } #[salsa::tracked] fn push_logs(db: &dyn salsa::Database, input: MyInput) { for i in 0..input.count(db) { Log(format!("#{i}")).accumulate(db); } } #[test] fn accumulate_custom_debug() { salsa::DatabaseImpl::new().attach(|db| { let input = MyInput::new(db, 2); let logs = push_logs::accumulated::(db, input); expect![[r##" [ CustomLog( "#0", ), CustomLog( "#1", ), ] "##]] .assert_debug_eq(&logs); }) } salsa-0.23.0/tests/accumulate-dag.rs000064400000000000000000000030741046102023000154010ustar 00000000000000mod common; use expect_test::expect; use salsa::{Accumulator, Database}; use test_log::test; #[salsa::input(debug)] struct MyInput { field_a: u32, field_b: u32, } #[salsa::accumulator] #[derive(Debug)] struct Log(#[allow(dead_code)] String); #[salsa::tracked] fn push_logs(db: &dyn Database, input: MyInput) { push_a_logs(db, input); push_b_logs(db, input); } #[salsa::tracked] fn push_a_logs(db: &dyn Database, input: MyInput) { let count = input.field_a(db); for i in 0..count { Log(format!("log_a({i} of {count})")).accumulate(db); } } #[salsa::tracked] fn push_b_logs(db: &dyn Database, input: MyInput) { // Note that b calls a push_a_logs(db, input); let count = input.field_b(db); for i in 0..count { Log(format!("log_b({i} of {count})")).accumulate(db); } } #[test] fn accumulate_a_called_twice() { salsa::DatabaseImpl::new().attach(|db| { let input = MyInput::new(db, 2, 3); let logs = push_logs::accumulated::(db, input); // Check that we don't see logs from `a` appearing twice in the input. expect![[r#" [ Log( "log_a(0 of 2)", ), Log( "log_a(1 of 2)", ), Log( "log_b(0 of 3)", ), Log( "log_b(1 of 3)", ), Log( "log_b(2 of 3)", ), ]"#]] .assert_eq(&format!("{logs:#?}")); }) } salsa-0.23.0/tests/accumulate-execution-order.rs000064400000000000000000000026151046102023000177620ustar 00000000000000//! Demonstrates that accumulation is done in the order //! in which things were originally executed. mod common; use expect_test::expect; use salsa::{Accumulator, Database}; use test_log::test; #[salsa::accumulator] #[derive(Debug)] struct Log(#[allow(dead_code)] String); #[salsa::tracked] fn push_logs(db: &dyn Database) { push_a_logs(db); } #[salsa::tracked] fn push_a_logs(db: &dyn Database) { Log("log a".to_string()).accumulate(db); push_b_logs(db); push_c_logs(db); push_d_logs(db); } #[salsa::tracked] fn push_b_logs(db: &dyn Database) { Log("log b".to_string()).accumulate(db); push_d_logs(db); } #[salsa::tracked] fn push_c_logs(db: &dyn Database) { Log("log c".to_string()).accumulate(db); } #[salsa::tracked] fn push_d_logs(db: &dyn Database) { Log("log d".to_string()).accumulate(db); } #[test] fn accumulate_execution_order() { salsa::DatabaseImpl::new().attach(|db| { let logs = push_logs::accumulated::(db); // Check that we get logs in execution order expect![[r#" [ Log( "log a", ), Log( "log b", ), Log( "log d", ), Log( "log c", ), ]"#]] .assert_eq(&format!("{logs:#?}")); }) } salsa-0.23.0/tests/accumulate-from-tracked-fn.rs000064400000000000000000000033071046102023000176240ustar 00000000000000//! Accumulate values from within a tracked function. //! Then mutate the values so that the tracked function re-executes. //! Check that we accumulate the appropriate, new values. use expect_test::expect; use salsa::{Accumulator, Setter}; use test_log::test; #[salsa::input(debug)] struct List { value: u32, next: Option, } #[salsa::accumulator] #[derive(Copy, Clone, Debug)] struct Integers(u32); #[salsa::tracked] fn compute(db: &dyn salsa::Database, input: List) { eprintln!( "{:?}(value={:?}, next={:?})", input, input.value(db), input.next(db) ); let result = if let Some(next) = input.next(db) { let next_integers = compute::accumulated::(db, next); eprintln!("{next_integers:?}"); let v = input.value(db) + next_integers.iter().map(|a| a.0).sum::(); eprintln!("input={:?} v={:?}", input.value(db), v); v } else { input.value(db) }; Integers(result).accumulate(db); eprintln!("pushed result {result:?}"); } #[test] fn test1() { let mut db = salsa::DatabaseImpl::new(); let l0 = List::new(&db, 1, None); let l1 = List::new(&db, 10, Some(l0)); compute(&db, l1); expect![[r#" [ Integers( 11, ), Integers( 1, ), ] "#]] .assert_debug_eq(&compute::accumulated::(&db, l1)); l0.set_value(&mut db).to(2); compute(&db, l1); expect![[r#" [ Integers( 12, ), Integers( 2, ), ] "#]] .assert_debug_eq(&compute::accumulated::(&db, l1)); } salsa-0.23.0/tests/accumulate-no-duplicates.rs000064400000000000000000000041301046102023000174070ustar 00000000000000//! Test that we don't get duplicate accumulated values mod common; use expect_test::expect; use salsa::{Accumulator, Database}; use test_log::test; // A(1) { // B // B // C { // D { // A(2) { // B // } // B // } // E // } // B // } #[salsa::accumulator] #[derive(Debug)] struct Log(#[allow(dead_code)] String); #[salsa::input(debug)] struct MyInput { n: u32, } #[salsa::tracked] fn push_logs(db: &dyn Database) { push_a_logs(db, MyInput::new(db, 1)); } #[salsa::tracked] fn push_a_logs(db: &dyn Database, input: MyInput) { Log("log a".to_string()).accumulate(db); if input.n(db) == 1 { push_b_logs(db); push_b_logs(db); push_c_logs(db); push_b_logs(db); } else { push_b_logs(db); } } #[salsa::tracked] fn push_b_logs(db: &dyn Database) { Log("log b".to_string()).accumulate(db); } #[salsa::tracked] fn push_c_logs(db: &dyn Database) { Log("log c".to_string()).accumulate(db); push_d_logs(db); push_e_logs(db); } // Note this isn't tracked fn push_d_logs(db: &dyn Database) { Log("log d".to_string()).accumulate(db); push_a_logs(db, MyInput::new(db, 2)); push_b_logs(db); } #[salsa::tracked] fn push_e_logs(db: &dyn Database) { Log("log e".to_string()).accumulate(db); } #[test] fn accumulate_no_duplicates() { salsa::DatabaseImpl::new().attach(|db| { let logs = push_logs::accumulated::(db); // Test that there aren't duplicate B logs. // Note that log A appears twice, because they both come // from different inputs. expect![[r#" [ Log( "log a", ), Log( "log b", ), Log( "log c", ), Log( "log d", ), Log( "log a", ), Log( "log e", ), ]"#]] .assert_eq(&format!("{logs:#?}")); }) } salsa-0.23.0/tests/accumulate-reuse-workaround.rs000064400000000000000000000040441046102023000201600ustar 00000000000000//! Demonstrates the workaround of wrapping calls to //! `accumulated` in a tracked function to get better //! reuse. mod common; use common::{LogDatabase, LoggerDatabase}; use expect_test::expect; use salsa::{Accumulator, Setter}; use test_log::test; #[salsa::input(debug)] struct List { value: u32, next: Option, } #[salsa::accumulator] #[derive(Copy, Clone, Debug)] struct Integers(u32); #[salsa::tracked] fn compute(db: &dyn LogDatabase, input: List) -> u32 { db.push_log(format!("compute({input:?})",)); // always pushes 0 Integers(0).accumulate(db); let result = if let Some(next) = input.next(db) { let next_integers = accumulated(db, next); let v = input.value(db) + next_integers.iter().sum::(); v } else { input.value(db) }; // return value changes result } #[salsa::tracked(returns(ref))] fn accumulated(db: &dyn LogDatabase, input: List) -> Vec { db.push_log(format!("accumulated({input:?})")); compute::accumulated::(db, input) .into_iter() .map(|a| a.0) .collect() } #[test] fn test1() { let mut db = LoggerDatabase::default(); let l1 = List::new(&db, 1, None); let l2 = List::new(&db, 2, Some(l1)); assert_eq!(compute(&db, l2), 2); db.assert_logs(expect![[r#" [ "compute(List { [salsa id]: Id(1), value: 2, next: Some(List { [salsa id]: Id(0), value: 1, next: None }) })", "accumulated(List { [salsa id]: Id(0), value: 1, next: None })", "compute(List { [salsa id]: Id(0), value: 1, next: None })", ]"#]]); // When we mutate `l1`, we should re-execute `compute` for `l1`, // and we re-execute accumulated for `l1`, but we do NOT re-execute // `compute` for `l2`. l1.set_value(&mut db).to(2); assert_eq!(compute(&db, l2), 2); db.assert_logs(expect![[r#" [ "accumulated(List { [salsa id]: Id(0), value: 2, next: None })", "compute(List { [salsa id]: Id(0), value: 2, next: None })", ]"#]]); } salsa-0.23.0/tests/accumulate-reuse.rs000064400000000000000000000035311046102023000157670ustar 00000000000000//! Accumulator re-use test. //! //! Tests behavior when a query's only inputs //! are the accumulated values from another query. mod common; use common::{LogDatabase, LoggerDatabase}; use expect_test::expect; use salsa::{Accumulator, Setter}; use test_log::test; #[salsa::input(debug)] struct List { value: u32, next: Option, } #[salsa::accumulator] struct Integers(u32); #[salsa::tracked] fn compute(db: &dyn LogDatabase, input: List) -> u32 { db.push_log(format!("compute({input:?})",)); // always pushes 0 Integers(0).accumulate(db); let result = if let Some(next) = input.next(db) { let next_integers = compute::accumulated::(db, next); let v = input.value(db) + next_integers.iter().map(|i| i.0).sum::(); v } else { input.value(db) }; // return value changes result } #[test] fn test1() { let mut db = LoggerDatabase::default(); let l1 = List::new(&db, 1, None); let l2 = List::new(&db, 2, Some(l1)); assert_eq!(compute(&db, l2), 2); db.assert_logs(expect![[r#" [ "compute(List { [salsa id]: Id(1), value: 2, next: Some(List { [salsa id]: Id(0), value: 1, next: None }) })", "compute(List { [salsa id]: Id(0), value: 1, next: None })", ]"#]]); // When we mutate `l1`, we should re-execute `compute` for `l1`, // but we should not have to re-execute `compute` for `l2`. // The only input for `compute(l1)` is the accumulated values from `l1`, // which have not changed. l1.set_value(&mut db).to(2); assert_eq!(compute(&db, l2), 2); db.assert_logs(expect![[r#" [ "compute(List { [salsa id]: Id(1), value: 2, next: Some(List { [salsa id]: Id(0), value: 2, next: None }) })", "compute(List { [salsa id]: Id(0), value: 2, next: None })", ]"#]]); } salsa-0.23.0/tests/accumulate.rs000064400000000000000000000136421046102023000146520ustar 00000000000000mod common; use common::{LogDatabase, LoggerDatabase}; use expect_test::expect; use salsa::{Accumulator, Setter}; use test_log::test; #[salsa::input(debug)] struct MyInput { field_a: u32, field_b: u32, } #[salsa::accumulator] #[derive(Debug)] struct Log(#[allow(dead_code)] String); #[salsa::tracked] fn push_logs(db: &dyn LogDatabase, input: MyInput) { db.push_log(format!( "push_logs(a = {}, b = {})", input.field_a(db), input.field_b(db) )); // We don't invoke `push_a_logs` (or `push_b_logs`) with a value of 0. // This allows us to test what happens a change in inputs causes a function not to be called at all. if input.field_a(db) > 0 { push_a_logs(db, input); } if input.field_b(db) > 0 { push_b_logs(db, input); } } #[salsa::tracked] fn push_a_logs(db: &dyn LogDatabase, input: MyInput) { let field_a = input.field_a(db); db.push_log(format!("push_a_logs({field_a})")); for i in 0..field_a { Log(format!("log_a({i} of {field_a})")).accumulate(db); } } #[salsa::tracked] fn push_b_logs(db: &dyn LogDatabase, input: MyInput) { let field_a = input.field_b(db); db.push_log(format!("push_b_logs({field_a})")); for i in 0..field_a { Log(format!("log_b({i} of {field_a})")).accumulate(db); } } #[test] fn accumulate_once() { let db = common::LoggerDatabase::default(); // Just call accumulate on a base input to see what happens. let input = MyInput::new(&db, 2, 3); let logs = push_logs::accumulated::(&db, input); db.assert_logs(expect![[r#" [ "push_logs(a = 2, b = 3)", "push_a_logs(2)", "push_b_logs(3)", ]"#]]); // Check that we see logs from `a` first and then logs from `b` // (execution order). expect![[r#" [ Log( "log_a(0 of 2)", ), Log( "log_a(1 of 2)", ), Log( "log_b(0 of 3)", ), Log( "log_b(1 of 3)", ), Log( "log_b(2 of 3)", ), ]"#]] .assert_eq(&format!("{logs:#?}")); } #[test] fn change_a_from_2_to_0() { let mut db = common::LoggerDatabase::default(); // Accumulate logs for `a = 2` and `b = 3` let input = MyInput::new(&db, 2, 3); let logs = push_logs::accumulated::(&db, input); expect![[r#" [ Log( "log_a(0 of 2)", ), Log( "log_a(1 of 2)", ), Log( "log_b(0 of 3)", ), Log( "log_b(1 of 3)", ), Log( "log_b(2 of 3)", ), ]"#]] .assert_eq(&format!("{logs:#?}")); db.assert_logs(expect![[r#" [ "push_logs(a = 2, b = 3)", "push_a_logs(2)", "push_b_logs(3)", ]"#]]); // Change to `a = 0`, which means `push_logs` does not call `push_a_logs` at all input.set_field_a(&mut db).to(0); let logs = push_logs::accumulated::(&db, input); expect![[r#" [ Log( "log_b(0 of 3)", ), Log( "log_b(1 of 3)", ), Log( "log_b(2 of 3)", ), ]"#]] .assert_eq(&format!("{logs:#?}")); db.assert_logs(expect![[r#" [ "push_logs(a = 0, b = 3)", ]"#]]); } #[test] fn change_a_from_2_to_1() { let mut db = LoggerDatabase::default(); // Accumulate logs for `a = 2` and `b = 3` let input = MyInput::new(&db, 2, 3); let logs = push_logs::accumulated::(&db, input); expect![[r#" [ Log( "log_a(0 of 2)", ), Log( "log_a(1 of 2)", ), Log( "log_b(0 of 3)", ), Log( "log_b(1 of 3)", ), Log( "log_b(2 of 3)", ), ]"#]] .assert_eq(&format!("{logs:#?}")); db.assert_logs(expect![[r#" [ "push_logs(a = 2, b = 3)", "push_a_logs(2)", "push_b_logs(3)", ]"#]]); // Change to `a = 1`, which means `push_logs` does not call `push_a_logs` at all input.set_field_a(&mut db).to(1); let logs = push_logs::accumulated::(&db, input); expect![[r#" [ Log( "log_a(0 of 1)", ), Log( "log_b(0 of 3)", ), Log( "log_b(1 of 3)", ), Log( "log_b(2 of 3)", ), ]"#]] .assert_eq(&format!("{logs:#?}")); db.assert_logs(expect![[r#" [ "push_logs(a = 1, b = 3)", "push_a_logs(1)", ]"#]]); } #[test] fn get_a_logs_after_changing_b() { let mut db = common::LoggerDatabase::default(); // Invoke `push_a_logs` with `a = 2` and `b = 3` (but `b` doesn't matter) let input = MyInput::new(&db, 2, 3); let logs = push_a_logs::accumulated::(&db, input); expect![[r#" [ Log( "log_a(0 of 2)", ), Log( "log_a(1 of 2)", ), ]"#]] .assert_eq(&format!("{logs:#?}")); db.assert_logs(expect![[r#" [ "push_a_logs(2)", ]"#]]); // Changing `b` does not cause `push_a_logs` to re-execute // and we still get the same result input.set_field_b(&mut db).to(5); let logs = push_a_logs::accumulated::(&db, input); expect![[r#" [ Log( "log_a(0 of 2)", ), Log( "log_a(1 of 2)", ), ] "#]] .assert_debug_eq(&logs); db.assert_logs(expect!["[]"]); } salsa-0.23.0/tests/accumulated_backdate.rs000064400000000000000000000033561046102023000166350ustar 00000000000000//! Tests that accumulated values are correctly accounted for //! when backdating a value. mod common; use common::LogDatabase; use expect_test::expect; use salsa::{Accumulator, Setter}; use test_log::test; #[salsa::input(debug)] struct File { content: String, } #[salsa::accumulator] #[derive(Debug)] struct Log(#[allow(dead_code)] String); #[salsa::tracked] fn compile(db: &dyn LogDatabase, input: File) -> u32 { parse(db, input) } #[salsa::tracked] fn parse(db: &dyn LogDatabase, input: File) -> u32 { let value: Result = input.content(db).parse(); match value { Ok(value) => value, Err(error) => { Log(error.to_string()).accumulate(db); 0 } } } #[test] fn backdate() { let mut db = common::LoggerDatabase::default(); let input = File::new(&db, "0".to_string()); let logs = compile::accumulated::(&db, input); expect![[r#"[]"#]].assert_eq(&format!("{logs:#?}")); input.set_content(&mut db).to("a".to_string()); let logs = compile::accumulated::(&db, input); expect![[r#" [ Log( "invalid digit found in string", ), ]"#]] .assert_eq(&format!("{logs:#?}")); } #[test] fn backdate_no_diagnostics() { let mut db = common::LoggerDatabase::default(); let input = File::new(&db, "a".to_string()); let logs = compile::accumulated::(&db, input); expect![[r#" [ Log( "invalid digit found in string", ), ]"#]] .assert_eq(&format!("{logs:#?}")); input.set_content(&mut db).to("0".to_string()); let logs = compile::accumulated::(&db, input); expect![[r#"[]"#]].assert_eq(&format!("{logs:#?}")); } salsa-0.23.0/tests/backtrace.rs000064400000000000000000000072161046102023000144460ustar 00000000000000use expect_test::expect; use salsa::{Backtrace, Database, DatabaseImpl}; use test_log::test; #[salsa::input(debug)] struct Thing { detailed: bool, } #[salsa::tracked] fn query_a(db: &dyn Database, thing: Thing) -> String { query_b(db, thing) } #[salsa::tracked] fn query_b(db: &dyn Database, thing: Thing) -> String { query_c(db, thing) } #[salsa::tracked] fn query_c(db: &dyn Database, thing: Thing) -> String { query_d(db, thing) } #[salsa::tracked] fn query_d(db: &dyn Database, thing: Thing) -> String { query_e(db, thing) } #[salsa::tracked] fn query_e(db: &dyn Database, thing: Thing) -> String { if thing.detailed(db) { format!("{:#}", Backtrace::capture().unwrap()) } else { format!("{}", Backtrace::capture().unwrap()) } } #[salsa::tracked] fn query_f(db: &dyn Database, thing: Thing) -> String { query_cycle(db, thing) } #[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=cycle_initial)] fn query_cycle(db: &dyn Database, thing: Thing) -> String { let backtrace = query_cycle(db, thing); if backtrace.is_empty() { query_e(db, thing) } else { backtrace } } fn cycle_initial(_db: &dyn salsa::Database, _thing: Thing) -> String { String::new() } fn cycle_fn( _db: &dyn salsa::Database, _value: &str, _count: u32, _thing: Thing, ) -> salsa::CycleRecoveryAction { salsa::CycleRecoveryAction::Iterate } #[test] fn backtrace_works() { let db = DatabaseImpl::default(); let backtrace = query_a(&db, Thing::new(&db, false)).replace("\\", "/"); expect![[r#" query stacktrace: 0: query_e(Id(0)) at tests/backtrace.rs:30 1: query_d(Id(0)) at tests/backtrace.rs:25 2: query_c(Id(0)) at tests/backtrace.rs:20 3: query_b(Id(0)) at tests/backtrace.rs:15 4: query_a(Id(0)) at tests/backtrace.rs:10 "#]] .assert_eq(&backtrace); let backtrace = query_a(&db, Thing::new(&db, true)).replace("\\", "/"); expect![[r#" query stacktrace: 0: query_e(Id(1)) -> (R1, Durability::LOW) at tests/backtrace.rs:30 1: query_d(Id(1)) -> (R1, Durability::HIGH) at tests/backtrace.rs:25 2: query_c(Id(1)) -> (R1, Durability::HIGH) at tests/backtrace.rs:20 3: query_b(Id(1)) -> (R1, Durability::HIGH) at tests/backtrace.rs:15 4: query_a(Id(1)) -> (R1, Durability::HIGH) at tests/backtrace.rs:10 "#]] .assert_eq(&backtrace); let backtrace = query_f(&db, Thing::new(&db, false)).replace("\\", "/"); expect![[r#" query stacktrace: 0: query_e(Id(2)) at tests/backtrace.rs:30 1: query_cycle(Id(2)) at tests/backtrace.rs:43 cycle heads: query_cycle(Id(2)) -> IterationCount(0) 2: query_f(Id(2)) at tests/backtrace.rs:38 "#]] .assert_eq(&backtrace); let backtrace = query_f(&db, Thing::new(&db, true)).replace("\\", "/"); expect![[r#" query stacktrace: 0: query_e(Id(3)) -> (R1, Durability::LOW) at tests/backtrace.rs:30 1: query_cycle(Id(3)) -> (R1, Durability::HIGH, iteration = IterationCount(0)) at tests/backtrace.rs:43 cycle heads: query_cycle(Id(3)) -> IterationCount(0) 2: query_f(Id(3)) -> (R1, Durability::HIGH) at tests/backtrace.rs:38 "#]] .assert_eq(&backtrace); } salsa-0.23.0/tests/check_auto_traits.rs000064400000000000000000000017251046102023000162210ustar 00000000000000//! Test that auto trait impls exist as expected. use std::panic::UnwindSafe; use salsa::Database; use test_log::test; #[salsa::input] struct MyInput { field: String, } #[salsa::tracked] struct MyTracked<'db> { field: MyInterned<'db>, } #[salsa::interned] struct MyInterned<'db> { field: String, } #[salsa::tracked] fn test(db: &dyn Database, input: MyInput) { let input = is_send(is_sync(input)); let interned = is_send(is_sync(MyInterned::new(db, input.field(db).clone()))); let _tracked_struct = is_send(is_sync(MyTracked::new(db, interned))); } fn is_send(t: T) -> T { t } fn is_sync(t: T) -> T { t } fn is_unwind_safe(t: T) -> T { t } #[test] fn execute() { let db = is_send(salsa::DatabaseImpl::new()); let _handle = is_send(is_sync(is_unwind_safe( db.storage().clone().into_zalsa_handle(), ))); let input = MyInput::new(&db, "Hello".to_string()); test(&db, input); } salsa-0.23.0/tests/common/mod.rs000064400000000000000000000123521046102023000145730ustar 00000000000000//! Utility for tests that lets us log when notable events happen. #![allow(dead_code, unused_imports)] use std::sync::{Arc, Mutex}; use salsa::{Database, Storage}; /// Logging userdata: provides [`LogDatabase`][] trait. /// /// If you wish to use it along with other userdata, /// you can also embed it in another struct and implement [`HasLogger`][] for that struct. #[derive(Clone, Default)] pub struct Logger { logs: Arc>>, } impl Logger { pub fn push_log(&self, string: String) { self.logs.lock().unwrap().push(string); } } /// Trait implemented by databases that lets them log events. pub trait HasLogger { /// Return a reference to the logger from the database. fn logger(&self) -> &Logger; } #[salsa::db] pub trait LogDatabase: HasLogger + Database { /// Log an event from inside a tracked function. fn push_log(&self, string: String) { self.logger().logs.lock().unwrap().push(string); } /// Asserts what the (formatted) logs should look like, /// clearing the logged events. This takes `&mut self` because /// it is meant to be run from outside any tracked functions. fn assert_logs(&self, expected: expect_test::Expect) { let logs = std::mem::take(&mut *self.logger().logs.lock().unwrap()); expected.assert_eq(&format!("{logs:#?}")); } /// Asserts the length of the logs, /// clearing the logged events. This takes `&mut self` because /// it is meant to be run from outside any tracked functions. fn assert_logs_len(&self, expected: usize) { let logs = std::mem::take(&mut *self.logger().logs.lock().unwrap()); assert_eq!(logs.len(), expected, "Actual logs: {logs:#?}"); } } #[salsa::db] impl LogDatabase for Db {} /// Database that provides logging but does not log salsa event. #[salsa::db] #[derive(Clone, Default)] pub struct LoggerDatabase { storage: Storage, logger: Logger, } impl HasLogger for LoggerDatabase { fn logger(&self) -> &Logger { &self.logger } } #[salsa::db] impl Database for LoggerDatabase {} /// Database that provides logging and logs salsa events. #[salsa::db] #[derive(Clone)] pub struct EventLoggerDatabase { storage: Storage, logger: Logger, } impl Default for EventLoggerDatabase { fn default() -> Self { let logger = Logger::default(); Self { storage: Storage::new(Some(Box::new({ let logger = logger.clone(); move |event| logger.push_log(format!("{:?}", event.kind)) }))), logger, } } } #[salsa::db] impl Database for EventLoggerDatabase {} impl HasLogger for EventLoggerDatabase { fn logger(&self) -> &Logger { &self.logger } } #[salsa::db] #[derive(Clone)] pub struct DiscardLoggerDatabase { storage: Storage, logger: Logger, } impl Default for DiscardLoggerDatabase { fn default() -> Self { let logger = Logger::default(); Self { storage: Storage::new(Some(Box::new({ let logger = logger.clone(); move |event| match event.kind { salsa::EventKind::WillDiscardStaleOutput { .. } | salsa::EventKind::DidDiscard { .. } => { logger.push_log(format!("salsa_event({:?})", event.kind)); } _ => {} } }))), logger, } } } #[salsa::db] impl Database for DiscardLoggerDatabase {} impl HasLogger for DiscardLoggerDatabase { fn logger(&self) -> &Logger { &self.logger } } #[salsa::db] #[derive(Clone)] pub struct ExecuteValidateLoggerDatabase { storage: Storage, logger: Logger, } impl Default for ExecuteValidateLoggerDatabase { fn default() -> Self { let logger = Logger::default(); Self { storage: Storage::new(Some(Box::new({ let logger = logger.clone(); move |event| match event.kind { salsa::EventKind::WillExecute { .. } | salsa::EventKind::WillIterateCycle { .. } | salsa::EventKind::DidValidateMemoizedValue { .. } => { logger.push_log(format!("salsa_event({:?})", event.kind)); } _ => {} } }))), logger, } } } impl Database for ExecuteValidateLoggerDatabase {} impl HasLogger for ExecuteValidateLoggerDatabase { fn logger(&self) -> &Logger { &self.logger } } /// Trait implemented by databases that lets them provide a fixed u32 value. pub trait HasValue { fn get_value(&self) -> u32; } #[salsa::db] pub trait ValueDatabase: HasValue + Database {} #[salsa::db] impl ValueDatabase for Db {} #[salsa::db] #[derive(Clone, Default)] pub struct DatabaseWithValue { storage: Storage, value: u32, } impl HasValue for DatabaseWithValue { fn get_value(&self) -> u32 { self.value } } #[salsa::db] impl Database for DatabaseWithValue {} impl DatabaseWithValue { pub fn new(value: u32) -> Self { Self { storage: Default::default(), value, } } } salsa-0.23.0/tests/compile-fail/accumulator_incompatibles.rs000064400000000000000000000012241046102023000223110ustar 00000000000000#[salsa::accumulator(returns(ref))] struct AccWithRetRef(u32); #[salsa::accumulator(specify)] struct AccWithSpecify(u32); #[salsa::accumulator(no_eq)] struct AccWithNoEq(u32); #[salsa::accumulator(data = MyAcc)] struct AccWithData(u32); #[salsa::accumulator(db = Db)] struct AcWithcDb(u32); #[salsa::accumulator(recover_fn = recover)] struct AccWithRecover(u32); #[salsa::accumulator(lru = 12)] struct AccWithLru(u32); #[salsa::accumulator(revisions = 12)] struct AccWithRevisions(u32); #[salsa::accumulator(constructor = Constructor)] struct AccWithConstructor(u32); #[salsa::accumulator(heap_size = size)] struct AccWithHeapSize(u32); fn main() {} salsa-0.23.0/tests/compile-fail/accumulator_incompatibles.stderr000064400000000000000000000034101046102023000231670ustar 00000000000000error: `returns` option not allowed here --> tests/compile-fail/accumulator_incompatibles.rs:1:22 | 1 | #[salsa::accumulator(returns(ref))] | ^^^^^^^ error: `specify` option not allowed here --> tests/compile-fail/accumulator_incompatibles.rs:4:22 | 4 | #[salsa::accumulator(specify)] | ^^^^^^^ error: `no_eq` option not allowed here --> tests/compile-fail/accumulator_incompatibles.rs:7:22 | 7 | #[salsa::accumulator(no_eq)] | ^^^^^ error: `data` option not allowed here --> tests/compile-fail/accumulator_incompatibles.rs:10:22 | 10 | #[salsa::accumulator(data = MyAcc)] | ^^^^ error: `db` option not allowed here --> tests/compile-fail/accumulator_incompatibles.rs:13:22 | 13 | #[salsa::accumulator(db = Db)] | ^^ error: unrecognized option `recover_fn` --> tests/compile-fail/accumulator_incompatibles.rs:16:22 | 16 | #[salsa::accumulator(recover_fn = recover)] | ^^^^^^^^^^ error: `lru` option not allowed here --> tests/compile-fail/accumulator_incompatibles.rs:19:22 | 19 | #[salsa::accumulator(lru = 12)] | ^^^ error: `revisions` option not allowed here --> tests/compile-fail/accumulator_incompatibles.rs:22:22 | 22 | #[salsa::accumulator(revisions = 12)] | ^^^^^^^^^ error: `constructor` option not allowed here --> tests/compile-fail/accumulator_incompatibles.rs:25:22 | 25 | #[salsa::accumulator(constructor = Constructor)] | ^^^^^^^^^^^ error: `heap_size` option not allowed here --> tests/compile-fail/accumulator_incompatibles.rs:28:22 | 28 | #[salsa::accumulator(heap_size = size)] | ^^^^^^^^^ salsa-0.23.0/tests/compile-fail/derive_update_expansion_failure.rs000064400000000000000000000003311046102023000234720ustar 00000000000000#[derive(salsa::Update)] union U { field: i32, } #[derive(salsa::Update)] struct S { #[update(with(missing_unsafe))] bad: i32, } fn missing_unsafe(_: *mut i32, _: i32) -> bool { true } fn main() {} salsa-0.23.0/tests/compile-fail/derive_update_expansion_failure.stderr000064400000000000000000000004501046102023000243530ustar 00000000000000error: `derive(Update)` does not support `union` --> tests/compile-fail/derive_update_expansion_failure.rs:2:1 | 2 | union U { | ^^^^^ error: expected `unsafe` --> tests/compile-fail/derive_update_expansion_failure.rs:8:14 | 8 | #[update(with(missing_unsafe))] | ^^^^ salsa-0.23.0/tests/compile-fail/get-on-private-interned-field.rs000064400000000000000000000003201046102023000226050ustar 00000000000000mod a { #[salsa::interned] pub struct MyInterned<'db> { field: u32, } } fn test<'db>(db: &'db dyn salsa::Database, interned: a::MyInterned<'db>) { interned.field(db); } fn main() {} salsa-0.23.0/tests/compile-fail/get-on-private-interned-field.stderr000064400000000000000000000004021046102023000234650ustar 00000000000000error[E0624]: method `field` is private --> tests/compile-fail/get-on-private-interned-field.rs:9:14 | 2 | #[salsa::interned] | ------------------ private method defined here ... 9 | interned.field(db); | ^^^^^ private method salsa-0.23.0/tests/compile-fail/get-on-private-tracked-field.rs000064400000000000000000000003131046102023000224140ustar 00000000000000mod a { #[salsa::tracked] pub struct MyTracked<'db> { field: u32, } } fn test<'db>(db: &'db dyn salsa::Database, tracked: a::MyTracked<'db>) { tracked.field(db); } fn main() {} salsa-0.23.0/tests/compile-fail/get-on-private-tracked-field.stderr000064400000000000000000000003751046102023000233030ustar 00000000000000error[E0624]: method `field` is private --> tests/compile-fail/get-on-private-tracked-field.rs:9:13 | 2 | #[salsa::tracked] | ----------------- private method defined here ... 9 | tracked.field(db); | ^^^^^ private method salsa-0.23.0/tests/compile-fail/get-set-on-private-input-field.rs000064400000000000000000000003671046102023000227400ustar 00000000000000mod a { #[salsa::input] pub struct MyInput { field: u32, } } fn main() { let mut db = salsa::DatabaseImpl::new(); let input = a::MyInput::new(&mut db, 22); input.field(&db); input.set_field(&mut db).to(23); } salsa-0.23.0/tests/compile-fail/get-set-on-private-input-field.stderr000064400000000000000000000010261046102023000236100ustar 00000000000000error[E0624]: method `field` is private --> tests/compile-fail/get-set-on-private-input-field.rs:12:11 | 2 | #[salsa::input] | --------------- private method defined here ... 12 | input.field(&db); | ^^^^^ private method error[E0624]: method `set_field` is private --> tests/compile-fail/get-set-on-private-input-field.rs:13:11 | 2 | #[salsa::input] | --------------- private method defined here ... 13 | input.set_field(&mut db).to(23); | ^^^^^^^^^ private method salsa-0.23.0/tests/compile-fail/input_struct_incompatibles.rs000064400000000000000000000010641046102023000225370ustar 00000000000000#[salsa::input(returns(ref))] struct InputWithRetRef(u32); #[salsa::input(specify)] struct InputWithSpecify(u32); #[salsa::input(no_eq)] struct InputNoWithEq(u32); #[salsa::input(db = Db)] struct InputWithDb(u32); #[salsa::input(recover_fn = recover)] struct InputWithRecover(u32); #[salsa::input(lru =12)] struct InputWithLru(u32); #[salsa::input(revisions = 12)] struct InputWithRevisions(u32); #[salsa::input] struct InputWithTrackedField { #[tracked] field: u32, } #[salsa::input(heap_size = size)] struct InputWithHeapSize(u32); fn main() {} salsa-0.23.0/tests/compile-fail/input_struct_incompatibles.stderr000064400000000000000000000034251046102023000234210ustar 00000000000000error: `returns` option not allowed here --> tests/compile-fail/input_struct_incompatibles.rs:1:16 | 1 | #[salsa::input(returns(ref))] | ^^^^^^^ error: `specify` option not allowed here --> tests/compile-fail/input_struct_incompatibles.rs:4:16 | 4 | #[salsa::input(specify)] | ^^^^^^^ error: `no_eq` option not allowed here --> tests/compile-fail/input_struct_incompatibles.rs:7:16 | 7 | #[salsa::input(no_eq)] | ^^^^^ error: `db` option not allowed here --> tests/compile-fail/input_struct_incompatibles.rs:10:16 | 10 | #[salsa::input(db = Db)] | ^^ error: unrecognized option `recover_fn` --> tests/compile-fail/input_struct_incompatibles.rs:13:16 | 13 | #[salsa::input(recover_fn = recover)] | ^^^^^^^^^^ error: `lru` option not allowed here --> tests/compile-fail/input_struct_incompatibles.rs:16:16 | 16 | #[salsa::input(lru =12)] | ^^^ error: `revisions` option not allowed here --> tests/compile-fail/input_struct_incompatibles.rs:19:16 | 19 | #[salsa::input(revisions = 12)] | ^^^^^^^^^ error: `#[tracked]` cannot be used with `#[salsa::input]` --> tests/compile-fail/input_struct_incompatibles.rs:24:5 | 24 | / #[tracked] 25 | | field: u32, | |______________^ error: `heap_size` option not allowed here --> tests/compile-fail/input_struct_incompatibles.rs:28:16 | 28 | #[salsa::input(heap_size = size)] | ^^^^^^^^^ error: cannot find attribute `tracked` in this scope --> tests/compile-fail/input_struct_incompatibles.rs:24:7 | 24 | #[tracked] | ^^^^^^^ | help: consider importing one of these attribute macros | 1 + use salsa::tracked; | 1 + use salsa_macros::tracked; | salsa-0.23.0/tests/compile-fail/input_struct_unknown_attributes.rs000064400000000000000000000002041046102023000236460ustar 00000000000000#[salsa::input] struct InputWithUnknownAttrs { /// Doc comment field: u32, #[anything] field2: u32, } fn main() {} salsa-0.23.0/tests/compile-fail/input_struct_unknown_attributes.stderr000064400000000000000000000002401046102023000245250ustar 00000000000000error: cannot find attribute `anything` in this scope --> tests/compile-fail/input_struct_unknown_attributes.rs:5:7 | 5 | #[anything] | ^^^^^^^^ salsa-0.23.0/tests/compile-fail/interned_struct_incompatibles.rs000064400000000000000000000013311046102023000232050ustar 00000000000000#[salsa::interned(returns(ref))] struct InternedWithRetRef { field: u32, } #[salsa::interned(specify)] struct InternedWithSpecify { field: u32, } #[salsa::interned(no_eq)] struct InternedWithNoEq { field: u32, } #[salsa::interned(db = Db)] struct InternedWithDb { field: u32, } #[salsa::interned(recover_fn = recover)] struct InternedWithRecover { field: u32, } #[salsa::interned(lru = 12)] struct InternedWithLru { field: u32, } #[salsa::interned] struct InternedWithTrackedField { #[tracked] field: u32, } #[salsa::interned(revisions = 0)] struct InternedWithZeroRevisions { field: u32, } #[salsa::interned(heap_size = size)] struct AccWithHeapSize { field: u32, } fn main() {} salsa-0.23.0/tests/compile-fail/interned_struct_incompatibles.stderr000064400000000000000000000032621046102023000240710ustar 00000000000000error: `returns` option not allowed here --> tests/compile-fail/interned_struct_incompatibles.rs:1:19 | 1 | #[salsa::interned(returns(ref))] | ^^^^^^^ error: `specify` option not allowed here --> tests/compile-fail/interned_struct_incompatibles.rs:6:19 | 6 | #[salsa::interned(specify)] | ^^^^^^^ error: `no_eq` option not allowed here --> tests/compile-fail/interned_struct_incompatibles.rs:11:19 | 11 | #[salsa::interned(no_eq)] | ^^^^^ error: `db` option not allowed here --> tests/compile-fail/interned_struct_incompatibles.rs:16:19 | 16 | #[salsa::interned(db = Db)] | ^^ error: unrecognized option `recover_fn` --> tests/compile-fail/interned_struct_incompatibles.rs:21:19 | 21 | #[salsa::interned(recover_fn = recover)] | ^^^^^^^^^^ error: `lru` option not allowed here --> tests/compile-fail/interned_struct_incompatibles.rs:26:19 | 26 | #[salsa::interned(lru = 12)] | ^^^ error: `#[tracked]` cannot be used with `#[salsa::interned]` --> tests/compile-fail/interned_struct_incompatibles.rs:33:5 | 33 | / #[tracked] 34 | | field: u32, | |______________^ error: `heap_size` option not allowed here --> tests/compile-fail/interned_struct_incompatibles.rs:42:19 | 42 | #[salsa::interned(heap_size = size)] | ^^^^^^^^^ error: cannot find attribute `tracked` in this scope --> tests/compile-fail/interned_struct_incompatibles.rs:33:7 | 33 | #[tracked] | ^^^^^^^ | help: consider importing one of these attribute macros | 1 + use salsa::tracked; | 1 + use salsa_macros::tracked; | salsa-0.23.0/tests/compile-fail/interned_struct_unknown_attribute.rs000064400000000000000000000003041046102023000241350ustar 00000000000000#[salsa::interned] struct UnknownAttributeInterned { /// Test doc comment field: bool, #[unknown_attr] field2: bool, #[salsa::tracked] wrong_tracked: bool, } fn main() {} salsa-0.23.0/tests/compile-fail/interned_struct_unknown_attribute.stderr000064400000000000000000000007421046102023000250220ustar 00000000000000error: only a single lifetime parameter is accepted --> tests/compile-fail/interned_struct_unknown_attribute.rs:1:1 | 1 | #[salsa::interned] | ^^^^^^^^^^^^^^^^^^ | = note: this error originates in the attribute macro `salsa::interned` (in Nightly builds, run with -Z macro-backtrace for more info) error: cannot find attribute `unknown_attr` in this scope --> tests/compile-fail/interned_struct_unknown_attribute.rs:5:7 | 5 | #[unknown_attr] | ^^^^^^^^^^^^ salsa-0.23.0/tests/compile-fail/invalid_return_mode.rs000064400000000000000000000005431046102023000211150ustar 00000000000000use salsa::Database as Db; #[salsa::input] struct MyInput { #[returns(clone)] text: String, } #[salsa::tracked(returns(not_a_return_mode))] fn tracked_fn_invalid_return_mode(db: &dyn Db, input: MyInput) -> String { input.text(db) } #[salsa::input] struct MyInvalidInput { #[returns(not_a_return_mode)] text: String, } fn main() { }salsa-0.23.0/tests/compile-fail/invalid_return_mode.stderr000064400000000000000000000012311046102023000217670ustar 00000000000000error: Invalid return mode. Allowed modes are: ["copy", "clone", "ref", "deref", "as_ref", "as_deref"] --> tests/compile-fail/invalid_return_mode.rs:9:26 | 9 | #[salsa::tracked(returns(not_a_return_mode))] | ^^^^^^^^^^^^^^^^^ error: Invalid return mode. Allowed modes are: ["copy", "clone", "ref", "deref", "as_ref", "as_deref"] --> tests/compile-fail/invalid_return_mode.rs:16:15 | 16 | #[returns(not_a_return_mode)] | ^^^^^^^^^^^^^^^^^ error: cannot find attribute `returns` in this scope --> tests/compile-fail/invalid_return_mode.rs:16:7 | 16 | #[returns(not_a_return_mode)] | ^^^^^^^ salsa-0.23.0/tests/compile-fail/invalid_update_field.rs000064400000000000000000000001141046102023000212110ustar 00000000000000#[derive(salsa::Update)] struct S2<'a> { bad2: &'a str, } fn main() {} salsa-0.23.0/tests/compile-fail/invalid_update_field.stderr000064400000000000000000000006131046102023000220740ustar 00000000000000error: lifetime may not live long enough --> tests/compile-fail/invalid_update_field.rs:1:10 | 1 | #[derive(salsa::Update)] | ^^^^^^^^^^^^^ requires that `'a` must outlive `'static` 2 | struct S2<'a> { | -- lifetime `'a` defined here | = note: this error originates in the derive macro `salsa::Update` (in Nightly builds, run with -Z macro-backtrace for more info) salsa-0.23.0/tests/compile-fail/invalid_update_with.rs000064400000000000000000000006461046102023000211130ustar 00000000000000#[derive(salsa::Update)] struct S2 { #[update(unsafe(with(my_wrong_update)))] bad: i32, #[update(unsafe(with(my_wrong_update2)))] bad2: i32, #[update(unsafe(with(my_wrong_update3)))] bad3: i32, #[update(unsafe(with(true)))] bad4: &'static str, } fn my_wrong_update() {} fn my_wrong_update2(_: (), _: ()) -> bool { true } fn my_wrong_update3(_: *mut i32, _: i32) -> () {} fn main() {} salsa-0.23.0/tests/compile-fail/invalid_update_with.stderr000064400000000000000000000032121046102023000217620ustar 00000000000000error[E0308]: mismatched types --> tests/compile-fail/invalid_update_with.rs:3:26 | 3 | #[update(unsafe(with(my_wrong_update)))] | ---- ^^^^^^^^^^^^^^^ incorrect number of function parameters | | | expected due to this | = note: expected fn pointer `unsafe fn(*mut i32, i32) -> bool` found fn item `fn() -> () {my_wrong_update}` error[E0308]: mismatched types --> tests/compile-fail/invalid_update_with.rs:5:26 | 5 | #[update(unsafe(with(my_wrong_update2)))] | ---- ^^^^^^^^^^^^^^^^ expected fn pointer, found fn item | | | expected due to this | = note: expected fn pointer `unsafe fn(*mut i32, i32) -> bool` found fn item `fn((), ()) -> bool {my_wrong_update2}` error[E0308]: mismatched types --> tests/compile-fail/invalid_update_with.rs:7:26 | 7 | #[update(unsafe(with(my_wrong_update3)))] | ---- ^^^^^^^^^^^^^^^^ expected fn pointer, found fn item | | | expected due to this | = note: expected fn pointer `unsafe fn(*mut i32, i32) -> bool` found fn item `fn(*mut i32, i32) -> () {my_wrong_update3}` error[E0308]: mismatched types --> tests/compile-fail/invalid_update_with.rs:9:26 | 9 | #[update(unsafe(with(true)))] | ---- ^^^^ expected fn pointer, found `bool` | | | expected due to this | = note: expected fn pointer `unsafe fn(*mut &'static str, &'static str) -> bool` found type `bool` salsa-0.23.0/tests/compile-fail/lru_can_not_be_used_with_specify.rs000064400000000000000000000003231046102023000236260ustar 00000000000000#[salsa::input] struct MyInput { field: u32, } #[salsa::tracked(lru = 3, specify)] fn lru_can_not_be_used_with_specify(db: &dyn salsa::Database, input: MyInput) -> u32 { input.field(db) } fn main() {} salsa-0.23.0/tests/compile-fail/lru_can_not_be_used_with_specify.stderr000064400000000000000000000003221046102023000245040ustar 00000000000000error: the `specify` and `lru` options cannot be used together --> tests/compile-fail/lru_can_not_be_used_with_specify.rs:6:27 | 6 | #[salsa::tracked(lru = 3, specify)] | ^^^^^^^ salsa-0.23.0/tests/compile-fail/panic-when-reading-fields-of-tracked-structs-from-older-revisions.rs000064400000000000000000000010151046102023000315700ustar 00000000000000use salsa::prelude::*; #[salsa::input] struct MyInput { field: u32, } #[salsa::tracked] struct MyTracked<'db> { field: u32, } #[salsa::tracked] fn tracked_fn<'db>(db: &'db dyn salsa::Database, input: MyInput) -> MyTracked<'db> { MyTracked::new(db, input.field(db) / 2) } fn main() { let mut db = salsa::DatabaseImpl::new(); let input = MyInput::new(&db, 22); let tracked = tracked_fn(&db, input); input.set_field(&mut db).to(24); tracked.field(&db); // tracked comes from prior revision } ././@LongLink00006440000000000000000000000151000000000000007770Lustar salsa-0.23.0/tests/compile-fail/panic-when-reading-fields-of-tracked-structs-from-older-revisions.stderrsalsa-0.23.0/tests/compile-fail/panic-when-reading-fields-of-tracked-structs-from-older-revisions.st000064400000000000000000000010121046102023000315670ustar 00000000000000error[E0502]: cannot borrow `db` as mutable because it is also borrowed as immutable --> tests/compile-fail/panic-when-reading-fields-of-tracked-structs-from-older-revisions.rs:22:21 | 21 | let tracked = tracked_fn(&db, input); | --- immutable borrow occurs here 22 | input.set_field(&mut db).to(24); | ^^^^^^^ mutable borrow occurs here 23 | tracked.field(&db); // tracked comes from prior revision | ------- immutable borrow later used here salsa-0.23.0/tests/compile-fail/salsa_fields_incompatibles.rs000064400000000000000000000003051046102023000224220ustar 00000000000000// Banned field name: `from` #[salsa::input] struct InputWithBannedName1 { from: u32, } // Banned field name: `new` #[salsa::input] struct InputWithBannedName2 { new: u32, } fn main() {} salsa-0.23.0/tests/compile-fail/salsa_fields_incompatibles.stderr000064400000000000000000000004671046102023000233120ustar 00000000000000error: the field name `from` is disallowed in salsa structs --> tests/compile-fail/salsa_fields_incompatibles.rs:4:5 | 4 | from: u32, | ^^^^ error: the field name `new` is disallowed in salsa structs --> tests/compile-fail/salsa_fields_incompatibles.rs:10:5 | 10 | new: u32, | ^^^ salsa-0.23.0/tests/compile-fail/singleton_only_for_input.rs000064400000000000000000000010631046102023000222120ustar 00000000000000//! Compile Singleton struct test: //! //! Singleton flags are only allowed for input structs. If applied on any other Salsa struct compilation must fail #[salsa::input(singleton)] struct MyInput { field: u32, } #[salsa::tracked(singleton)] struct MyTracked<'db> { field: u32, } #[salsa::tracked(singleton)] fn create_tracked_structs(db: &dyn salsa::Database, input: MyInput) -> Vec { (0..input.field(db)) .map(|i| MyTracked::new(db, i)) .collect() } #[salsa::accumulator(singleton)] struct Integers(u32); fn main() {} salsa-0.23.0/tests/compile-fail/singleton_only_for_input.stderr000064400000000000000000000005431046102023000230730ustar 00000000000000error: `singleton` option not allowed here --> tests/compile-fail/singleton_only_for_input.rs:15:18 | 15 | #[salsa::tracked(singleton)] | ^^^^^^^^^ error: `singleton` option not allowed here --> tests/compile-fail/singleton_only_for_input.rs:22:22 | 22 | #[salsa::accumulator(singleton)] | ^^^^^^^^^ salsa-0.23.0/tests/compile-fail/span-input-setter.rs000064400000000000000000000003151046102023000204630ustar 00000000000000#[salsa::input] pub struct MyInput { field: u32, } fn main() { let mut db = salsa::DatabaseImpl::new(); let input = MyInput::new(&mut db, 22); input.field(&db); input.set_field(22); } salsa-0.23.0/tests/compile-fail/span-input-setter.stderr000064400000000000000000000012051046102023000213410ustar 00000000000000error[E0308]: mismatched types --> tests/compile-fail/span-input-setter.rs:10:21 | 10 | input.set_field(22); | --------- ^^ expected `&mut _`, found integer | | | arguments to this method are incorrect | = note: expected mutable reference `&mut _` found type `{integer}` note: method defined here --> tests/compile-fail/span-input-setter.rs:3:5 | 1 | #[salsa::input] | --------------- 2 | pub struct MyInput { 3 | field: u32, | ^^^^^ help: consider mutably borrowing here | 10 | input.set_field(&mut 22); | ++++ salsa-0.23.0/tests/compile-fail/span-tracked-getter.rs000064400000000000000000000003731046102023000207310ustar 00000000000000#[salsa::tracked] pub struct MyTracked<'db> { field: u32, } #[salsa::tracked] fn my_fn(db: &dyn salsa::Database) { let x = MyTracked::new(db, 22); x.field(22); } fn main() { let mut db = salsa::DatabaseImpl::new(); my_fn(&db); } salsa-0.23.0/tests/compile-fail/span-tracked-getter.stderr000064400000000000000000000015101046102023000216020ustar 00000000000000error[E0308]: mismatched types --> tests/compile-fail/span-tracked-getter.rs:9:13 | 9 | x.field(22); | ----- ^^ expected `&_`, found integer | | | arguments to this method are incorrect | = note: expected reference `&_` found type `{integer}` note: method defined here --> tests/compile-fail/span-tracked-getter.rs:3:5 | 1 | #[salsa::tracked] | ----------------- 2 | pub struct MyTracked<'db> { 3 | field: u32, | ^^^^^ help: consider borrowing here | 9 | x.field(&22); | + warning: variable does not need to be mutable --> tests/compile-fail/span-tracked-getter.rs:13:9 | 13 | let mut db = salsa::DatabaseImpl::new(); | ----^^ | | | help: remove this `mut` | = note: `#[warn(unused_mut)]` on by default salsa-0.23.0/tests/compile-fail/specify-does-not-work-if-the-key-is-a-salsa-input.rs000064400000000000000000000006151046102023000262610ustar 00000000000000//! Test that `specify` does not work if the key is a `salsa::input` //! compilation fails #![allow(warnings)] #[salsa::input] struct MyInput { field: u32, } #[salsa::tracked] struct MyTracked<'db> { field: u32, } #[salsa::tracked(specify)] fn tracked_fn<'db>(db: &'db dyn salsa::Database, input: MyInput) -> MyTracked<'db> { MyTracked::new(db, input.field(db) * 2) } fn main() {} salsa-0.23.0/tests/compile-fail/specify-does-not-work-if-the-key-is-a-salsa-input.stderr000064400000000000000000000021571046102023000271430ustar 00000000000000error[E0277]: the trait bound `MyInput: TrackedStructInDb` is not satisfied --> tests/compile-fail/specify-does-not-work-if-the-key-is-a-salsa-input.rs:15:1 | 15 | #[salsa::tracked(specify)] | ^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `TrackedStructInDb` is not implemented for `MyInput` | = help: the trait `TrackedStructInDb` is implemented for `MyTracked<'_>` note: required by a bound in `salsa::function::specify::>::specify_and_record` --> src/function/specify.rs | | pub fn specify_and_record<'db>(&'db self, db: &'db C::DbView, key: Id, value: C::Output<'db>) | ------------------ required by a bound in this associated function | where | C::Input<'db>: TrackedStructInDb, | ^^^^^^^^^^^^^^^^^ required by this bound in `salsa::function::specify::>::specify_and_record` = note: this error originates in the macro `salsa::plumbing::setup_tracked_fn` which comes from the expansion of the attribute macro `salsa::tracked` (in Nightly builds, run with -Z macro-backtrace for more info) salsa-0.23.0/tests/compile-fail/specify-does-not-work-if-the-key-is-a-salsa-interned.rs000064400000000000000000000006431046102023000267330ustar 00000000000000//! Test that `specify` does not work if the key is a `salsa::interned` //! compilation fails #![allow(warnings)] #[salsa::interned] struct MyInterned<'db> { field: u32, } #[salsa::tracked] struct MyTracked<'db> { field: u32, } #[salsa::tracked(specify)] fn tracked_fn<'db>(db: &'db dyn salsa::Database, input: MyInterned<'db>) -> MyTracked<'db> { MyTracked::new(db, input.field(db) * 2) } fn main() {} salsa-0.23.0/tests/compile-fail/specify-does-not-work-if-the-key-is-a-salsa-interned.stderr000064400000000000000000000022001046102023000276010ustar 00000000000000error[E0277]: the trait bound `MyInterned<'_>: TrackedStructInDb` is not satisfied --> tests/compile-fail/specify-does-not-work-if-the-key-is-a-salsa-interned.rs:15:1 | 15 | #[salsa::tracked(specify)] | ^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `TrackedStructInDb` is not implemented for `MyInterned<'_>` | = help: the trait `TrackedStructInDb` is implemented for `MyTracked<'_>` note: required by a bound in `salsa::function::specify::>::specify_and_record` --> src/function/specify.rs | | pub fn specify_and_record<'db>(&'db self, db: &'db C::DbView, key: Id, value: C::Output<'db>) | ------------------ required by a bound in this associated function | where | C::Input<'db>: TrackedStructInDb, | ^^^^^^^^^^^^^^^^^ required by this bound in `salsa::function::specify::>::specify_and_record` = note: this error originates in the macro `salsa::plumbing::setup_tracked_fn` which comes from the expansion of the attribute macro `salsa::tracked` (in Nightly builds, run with -Z macro-backtrace for more info) salsa-0.23.0/tests/compile-fail/tracked_fn_incompatibles.rs000064400000000000000000000031511046102023000220730ustar 00000000000000use salsa::Database as Db; #[salsa::input] struct MyInput { field: u32, } #[salsa::tracked(data = Data)] fn tracked_fn_with_data(db: &dyn Db, input: MyInput) -> u32 { input.field(db) * 2 } #[salsa::tracked(db = Db)] fn tracked_fn_with_db(db: &dyn Db, input: MyInput) -> u32 { input.field(db) * 2 } #[salsa::tracked(revisions = 12)] fn tracked_fn_with_revisions(db: &dyn Db, input: MyInput) -> u32 { input.field(db) * 2 } #[salsa::tracked(constructor = TrackedFn3)] fn tracked_fn_with_constructor(db: &dyn Db, input: MyInput) -> u32 { input.field(db) * 2 } #[salsa::tracked] fn tracked_fn_with_one_input(db: &dyn Db) -> u32 {} #[salsa::tracked] fn tracked_fn_with_receiver_not_applied_to_impl_block(&self, db: &dyn Db) -> u32 {} #[salsa::tracked(specify)] fn tracked_fn_with_too_many_arguments_for_specify( db: &dyn Db, input: MyInput, input: MyInput, ) -> u32 { } #[salsa::interned] struct MyInterned<'db> { field: u32, } #[salsa::tracked] fn tracked_fn_with_lt_param_and_elided_lt_on_db_arg1<'db>( db: &dyn Db, interned: MyInterned<'db>, ) -> u32 { interned.field(db) * 2 } #[salsa::tracked] fn tracked_fn_with_lt_param_and_elided_lt_on_db_arg2<'db_lifetime>( db: &dyn Db, interned: MyInterned<'db_lifetime>, ) -> u32 { interned.field(db) * 2 } #[salsa::tracked] fn tracked_fn_with_lt_param_and_elided_lt_on_input<'db>( db: &'db dyn Db, interned: MyInterned, ) -> u32 { interned.field(db) * 2 } #[salsa::tracked] fn tracked_fn_with_multiple_lts<'db1, 'db2>(db: &'db1 dyn Db, interned: MyInterned<'db2>) -> u32 { interned.field(db) * 2 } fn main() {} salsa-0.23.0/tests/compile-fail/tracked_fn_incompatibles.stderr000064400000000000000000000072741046102023000227640ustar 00000000000000error: `data` option not allowed here --> tests/compile-fail/tracked_fn_incompatibles.rs:8:18 | 8 | #[salsa::tracked(data = Data)] | ^^^^ error: `db` option not allowed here --> tests/compile-fail/tracked_fn_incompatibles.rs:13:18 | 13 | #[salsa::tracked(db = Db)] | ^^ error: `revisions` option not allowed here --> tests/compile-fail/tracked_fn_incompatibles.rs:18:18 | 18 | #[salsa::tracked(revisions = 12)] | ^^^^^^^^^ error: `constructor` option not allowed here --> tests/compile-fail/tracked_fn_incompatibles.rs:23:18 | 23 | #[salsa::tracked(constructor = TrackedFn3)] | ^^^^^^^^^^^ error: #[salsa::tracked] must also be applied to the impl block for tracked methods --> tests/compile-fail/tracked_fn_incompatibles.rs:32:55 | 32 | fn tracked_fn_with_receiver_not_applied_to_impl_block(&self, db: &dyn Db) -> u32 {} | ^^^^^ error: only functions with a single salsa struct as their input can be specified --> tests/compile-fail/tracked_fn_incompatibles.rs:34:18 | 34 | #[salsa::tracked(specify)] | ^^^^^^^ error: must have a `'db` lifetime --> tests/compile-fail/tracked_fn_incompatibles.rs:49:9 | 49 | db: &dyn Db, | ^ error: must have a `'db_lifetime` lifetime --> tests/compile-fail/tracked_fn_incompatibles.rs:57:9 | 57 | db: &dyn Db, | ^ error: only a single lifetime parameter is accepted --> tests/compile-fail/tracked_fn_incompatibles.rs:72:39 | 72 | fn tracked_fn_with_multiple_lts<'db1, 'db2>(db: &'db1 dyn Db, interned: MyInterned<'db2>) -> u32 { | ^^^^ error: `self` parameter is only allowed in associated functions --> tests/compile-fail/tracked_fn_incompatibles.rs:32:55 | 32 | fn tracked_fn_with_receiver_not_applied_to_impl_block(&self, db: &dyn Db) -> u32 {} | ^^^^^ not semantically valid as function parameter | = note: associated functions are those in `impl` or `trait` definitions error[E0415]: identifier `input` is bound more than once in this parameter list --> tests/compile-fail/tracked_fn_incompatibles.rs:38:5 | 38 | input: MyInput, | ^^^^^ used as parameter more than once error[E0106]: missing lifetime specifier --> tests/compile-fail/tracked_fn_incompatibles.rs:66:15 | 66 | interned: MyInterned, | ^^^^^^^^^^ expected named lifetime parameter | help: consider using the `'db` lifetime | 66 | interned: MyInterned<'db>, | +++++ error[E0308]: mismatched types --> tests/compile-fail/tracked_fn_incompatibles.rs:29:46 | 28 | #[salsa::tracked] | ----------------- implicitly returns `()` as its body has no tail or `return` expression 29 | fn tracked_fn_with_one_input(db: &dyn Db) -> u32 {} | ^^^ expected `u32`, found `()` error[E0308]: mismatched types --> tests/compile-fail/tracked_fn_incompatibles.rs:32:78 | 32 | fn tracked_fn_with_receiver_not_applied_to_impl_block(&self, db: &dyn Db) -> u32 {} | -------------------------------------------------- ^^^ expected `u32`, found `()` | | | implicitly returns `()` as its body has no tail or `return` expression error[E0308]: mismatched types --> tests/compile-fail/tracked_fn_incompatibles.rs:39:6 | 35 | fn tracked_fn_with_too_many_arguments_for_specify( | ---------------------------------------------- implicitly returns `()` as its body has no tail or `return` expression ... 39 | ) -> u32 { | ^^^ expected `u32`, found `()` salsa-0.23.0/tests/compile-fail/tracked_fn_return_not_update.rs000064400000000000000000000003621046102023000230040ustar 00000000000000use salsa::Database as Db; #[salsa::input] struct MyInput {} #[derive(Clone, Debug)] struct NotUpdate; #[salsa::tracked] fn tracked_fn<'db>(db: &'db dyn Db, input: MyInput) -> NotUpdate { _ = (db, input); NotUpdate } fn main() {} salsa-0.23.0/tests/compile-fail/tracked_fn_return_not_update.stderr000064400000000000000000000034521046102023000236660ustar 00000000000000error[E0369]: binary operation `==` cannot be applied to type `&NotUpdate` --> tests/compile-fail/tracked_fn_return_not_update.rs:10:56 | 10 | fn tracked_fn<'db>(db: &'db dyn Db, input: MyInput) -> NotUpdate { | ^^^^^^^^^ | note: an implementation of `PartialEq` might be missing for `NotUpdate` --> tests/compile-fail/tracked_fn_return_not_update.rs:7:1 | 7 | struct NotUpdate; | ^^^^^^^^^^^^^^^^ must implement `PartialEq` help: consider annotating `NotUpdate` with `#[derive(PartialEq)]` | 7 + #[derive(PartialEq)] 8 | struct NotUpdate; | error[E0599]: the function or associated item `maybe_update` exists for struct `UpdateDispatch`, but its trait bounds were not satisfied --> tests/compile-fail/tracked_fn_return_not_update.rs:10:56 | 7 | struct NotUpdate; | ---------------- doesn't satisfy `NotUpdate: PartialEq` or `NotUpdate: Update` ... 10 | fn tracked_fn<'db>(db: &'db dyn Db, input: MyInput) -> NotUpdate { | ^^^^^^^^^ function or associated item cannot be called on `UpdateDispatch` due to unsatisfied trait bounds | ::: src/update.rs | | pub struct Dispatch(PhantomData); | ---------------------- doesn't satisfy `_: UpdateFallback` | = note: the following trait bounds were not satisfied: `NotUpdate: Update` `NotUpdate: PartialEq` which is required by `UpdateDispatch: UpdateFallback` note: the trait `Update` must be implemented --> src/update.rs | | pub unsafe trait Update { | ^^^^^^^^^^^^^^^^^^^^^^^ help: consider annotating `NotUpdate` with `#[derive(PartialEq)]` | 7 + #[derive(PartialEq)] 8 | struct NotUpdate; | salsa-0.23.0/tests/compile-fail/tracked_fn_return_ref.rs000064400000000000000000000016211046102023000214150ustar 00000000000000use salsa::Database as Db; #[salsa::input] struct MyInput { #[returns(ref)] text: String, } #[derive(Clone, Debug, PartialEq, Eq)] struct ContainsRef<'db> { text: &'db str, } #[salsa::tracked] fn tracked_fn_return_ref<'db>(db: &'db dyn Db, input: MyInput) -> &'db str { input.text(db) } #[salsa::tracked] fn tracked_fn_return_struct_containing_ref<'db>( db: &'db dyn Db, input: MyInput, ) -> ContainsRef<'db> { ContainsRef { text: input.text(db), } } #[salsa::tracked] fn tracked_fn_return_struct_containing_ref_elided_implicit<'db>( db: &'db dyn Db, input: MyInput, ) -> ContainsRef { ContainsRef { text: input.text(db), } } #[salsa::tracked] fn tracked_fn_return_struct_containing_ref_elided_explicit<'db>( db: &'db dyn Db, input: MyInput, ) -> ContainsRef<'_> { ContainsRef { text: input.text(db), } } fn main() {} salsa-0.23.0/tests/compile-fail/tracked_fn_return_ref.stderr000064400000000000000000000040331046102023000222740ustar 00000000000000error[E0106]: missing lifetime specifier --> tests/compile-fail/tracked_fn_return_ref.rs:33:6 | 33 | ) -> ContainsRef { | ^^^^^^^^^^^ expected named lifetime parameter | help: consider using the `'db` lifetime | 33 | ) -> ContainsRef<'db> { | +++++ warning: elided lifetime has a name --> tests/compile-fail/tracked_fn_return_ref.rs:33:6 | 30 | fn tracked_fn_return_struct_containing_ref_elided_implicit<'db>( | --- lifetime `'db` declared here ... 33 | ) -> ContainsRef { | ^^^^^^^^^^^ this elided lifetime gets resolved as `'db` | = note: `#[warn(elided_named_lifetimes)]` on by default warning: elided lifetime has a name --> tests/compile-fail/tracked_fn_return_ref.rs:43:18 | 40 | fn tracked_fn_return_struct_containing_ref_elided_explicit<'db>( | --- lifetime `'db` declared here ... 43 | ) -> ContainsRef<'_> { | ^^ this elided lifetime gets resolved as `'db` error: lifetime may not live long enough --> tests/compile-fail/tracked_fn_return_ref.rs:15:67 | 15 | fn tracked_fn_return_ref<'db>(db: &'db dyn Db, input: MyInput) -> &'db str { | --- lifetime `'db` defined here ^ requires that `'db` must outlive `'static` error: lifetime may not live long enough --> tests/compile-fail/tracked_fn_return_ref.rs:23:6 | 20 | fn tracked_fn_return_struct_containing_ref<'db>( | --- lifetime `'db` defined here ... 23 | ) -> ContainsRef<'db> { | ^^^^^^^^^^^ requires that `'db` must outlive `'static` error: lifetime may not live long enough --> tests/compile-fail/tracked_fn_return_ref.rs:43:6 | 40 | fn tracked_fn_return_struct_containing_ref_elided_explicit<'db>( | --- lifetime `'db` defined here ... 43 | ) -> ContainsRef<'_> { | ^^^^^^^^^^^ requires that `'db` must outlive `'static` salsa-0.23.0/tests/compile-fail/tracked_impl_incompatibles.rs000064400000000000000000000023131046102023000224300ustar 00000000000000#[salsa::tracked] struct MyTracked<'db> { field: u32, } #[salsa::tracked(returns(ref))] impl<'db> std::default::Default for MyTracked<'db> { fn default() -> Self {} } #[salsa::tracked(specify)] impl<'db> std::default::Default for MyTracked<'db> { fn default() -> Self {} } #[salsa::tracked(no_eq)] impl<'db> std::default::Default for MyTracked<'db> { fn default() -> Self {} } #[salsa::tracked(data = Data)] impl<'db> std::default::Default for MyTracked<'db> { fn default() -> Self {} } #[salsa::tracked(db = Db)] impl<'db> std::default::Default for MyTracked<'db> { fn default() -> Self {} } #[salsa::tracked(recover_fn = recover)] impl<'db> std::default::Default for MyTracked<'db> { fn default() -> Self {} } #[salsa::tracked(lru = 32)] impl<'db> std::default::Default for MyTracked<'db> { fn default() -> Self {} } #[salsa::tracked(revisions = 32)] impl<'db> std::default::Default for MyTracked<'db> { fn default() -> Self {} } #[salsa::tracked(constructor = Constructor)] impl<'db> std::default::Default for MyTracked<'db> { fn default() -> Self {} } #[salsa::tracked] impl<'db> std::default::Default for [MyTracked<'db>; 12] { fn default() -> Self {} } fn main() {} salsa-0.23.0/tests/compile-fail/tracked_impl_incompatibles.stderr000064400000000000000000000211471046102023000233150ustar 00000000000000error: unexpected token --> tests/compile-fail/tracked_impl_incompatibles.rs:6:18 | 6 | #[salsa::tracked(returns(ref))] | ^^^^^^^ error: unexpected token --> tests/compile-fail/tracked_impl_incompatibles.rs:11:18 | 11 | #[salsa::tracked(specify)] | ^^^^^^^ error: unexpected token --> tests/compile-fail/tracked_impl_incompatibles.rs:16:18 | 16 | #[salsa::tracked(no_eq)] | ^^^^^ error: unexpected token --> tests/compile-fail/tracked_impl_incompatibles.rs:21:18 | 21 | #[salsa::tracked(data = Data)] | ^^^^ error: unexpected token --> tests/compile-fail/tracked_impl_incompatibles.rs:26:18 | 26 | #[salsa::tracked(db = Db)] | ^^ error: unexpected token --> tests/compile-fail/tracked_impl_incompatibles.rs:31:18 | 31 | #[salsa::tracked(recover_fn = recover)] | ^^^^^^^^^^ error: unexpected token --> tests/compile-fail/tracked_impl_incompatibles.rs:36:18 | 36 | #[salsa::tracked(lru = 32)] | ^^^ error: unexpected token --> tests/compile-fail/tracked_impl_incompatibles.rs:41:18 | 41 | #[salsa::tracked(revisions = 32)] | ^^^^^^^^^ error: unexpected token --> tests/compile-fail/tracked_impl_incompatibles.rs:46:18 | 46 | #[salsa::tracked(constructor = Constructor)] | ^^^^^^^^^^^ error[E0119]: conflicting implementations of trait `Default` for type `MyTracked<'_>` --> tests/compile-fail/tracked_impl_incompatibles.rs:12:1 | 7 | impl<'db> std::default::Default for MyTracked<'db> { | -------------------------------------------------- first implementation here ... 12 | impl<'db> std::default::Default for MyTracked<'db> { | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ conflicting implementation for `MyTracked<'_>` error[E0119]: conflicting implementations of trait `Default` for type `MyTracked<'_>` --> tests/compile-fail/tracked_impl_incompatibles.rs:17:1 | 7 | impl<'db> std::default::Default for MyTracked<'db> { | -------------------------------------------------- first implementation here ... 17 | impl<'db> std::default::Default for MyTracked<'db> { | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ conflicting implementation for `MyTracked<'_>` error[E0119]: conflicting implementations of trait `Default` for type `MyTracked<'_>` --> tests/compile-fail/tracked_impl_incompatibles.rs:22:1 | 7 | impl<'db> std::default::Default for MyTracked<'db> { | -------------------------------------------------- first implementation here ... 22 | impl<'db> std::default::Default for MyTracked<'db> { | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ conflicting implementation for `MyTracked<'_>` error[E0119]: conflicting implementations of trait `Default` for type `MyTracked<'_>` --> tests/compile-fail/tracked_impl_incompatibles.rs:27:1 | 7 | impl<'db> std::default::Default for MyTracked<'db> { | -------------------------------------------------- first implementation here ... 27 | impl<'db> std::default::Default for MyTracked<'db> { | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ conflicting implementation for `MyTracked<'_>` error[E0119]: conflicting implementations of trait `Default` for type `MyTracked<'_>` --> tests/compile-fail/tracked_impl_incompatibles.rs:32:1 | 7 | impl<'db> std::default::Default for MyTracked<'db> { | -------------------------------------------------- first implementation here ... 32 | impl<'db> std::default::Default for MyTracked<'db> { | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ conflicting implementation for `MyTracked<'_>` error[E0119]: conflicting implementations of trait `Default` for type `MyTracked<'_>` --> tests/compile-fail/tracked_impl_incompatibles.rs:37:1 | 7 | impl<'db> std::default::Default for MyTracked<'db> { | -------------------------------------------------- first implementation here ... 37 | impl<'db> std::default::Default for MyTracked<'db> { | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ conflicting implementation for `MyTracked<'_>` error[E0119]: conflicting implementations of trait `Default` for type `MyTracked<'_>` --> tests/compile-fail/tracked_impl_incompatibles.rs:42:1 | 7 | impl<'db> std::default::Default for MyTracked<'db> { | -------------------------------------------------- first implementation here ... 42 | impl<'db> std::default::Default for MyTracked<'db> { | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ conflicting implementation for `MyTracked<'_>` error[E0119]: conflicting implementations of trait `Default` for type `MyTracked<'_>` --> tests/compile-fail/tracked_impl_incompatibles.rs:47:1 | 7 | impl<'db> std::default::Default for MyTracked<'db> { | -------------------------------------------------- first implementation here ... 47 | impl<'db> std::default::Default for MyTracked<'db> { | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ conflicting implementation for `MyTracked<'_>` error[E0117]: only traits defined in the current crate can be implemented for arbitrary types --> tests/compile-fail/tracked_impl_incompatibles.rs:52:1 | 52 | impl<'db> std::default::Default for [MyTracked<'db>; 12] { | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^-------------------- | | | this is not defined in the current crate because arrays are always foreign | = note: impl doesn't have any local type before any uncovered type parameters = note: for more information see https://doc.rust-lang.org/reference/items/implementations.html#orphan-rules = note: define and implement a trait or new type instead error[E0308]: mismatched types --> tests/compile-fail/tracked_impl_incompatibles.rs:8:21 | 8 | fn default() -> Self {} | ------- ^^^^ expected `MyTracked<'_>`, found `()` | | | implicitly returns `()` as its body has no tail or `return` expression error[E0308]: mismatched types --> tests/compile-fail/tracked_impl_incompatibles.rs:13:21 | 13 | fn default() -> Self {} | ------- ^^^^ expected `MyTracked<'_>`, found `()` | | | implicitly returns `()` as its body has no tail or `return` expression error[E0308]: mismatched types --> tests/compile-fail/tracked_impl_incompatibles.rs:18:21 | 18 | fn default() -> Self {} | ------- ^^^^ expected `MyTracked<'_>`, found `()` | | | implicitly returns `()` as its body has no tail or `return` expression error[E0308]: mismatched types --> tests/compile-fail/tracked_impl_incompatibles.rs:23:21 | 23 | fn default() -> Self {} | ------- ^^^^ expected `MyTracked<'_>`, found `()` | | | implicitly returns `()` as its body has no tail or `return` expression error[E0308]: mismatched types --> tests/compile-fail/tracked_impl_incompatibles.rs:28:21 | 28 | fn default() -> Self {} | ------- ^^^^ expected `MyTracked<'_>`, found `()` | | | implicitly returns `()` as its body has no tail or `return` expression error[E0308]: mismatched types --> tests/compile-fail/tracked_impl_incompatibles.rs:33:21 | 33 | fn default() -> Self {} | ------- ^^^^ expected `MyTracked<'_>`, found `()` | | | implicitly returns `()` as its body has no tail or `return` expression error[E0308]: mismatched types --> tests/compile-fail/tracked_impl_incompatibles.rs:38:21 | 38 | fn default() -> Self {} | ------- ^^^^ expected `MyTracked<'_>`, found `()` | | | implicitly returns `()` as its body has no tail or `return` expression error[E0308]: mismatched types --> tests/compile-fail/tracked_impl_incompatibles.rs:43:21 | 43 | fn default() -> Self {} | ------- ^^^^ expected `MyTracked<'_>`, found `()` | | | implicitly returns `()` as its body has no tail or `return` expression error[E0308]: mismatched types --> tests/compile-fail/tracked_impl_incompatibles.rs:48:21 | 48 | fn default() -> Self {} | ------- ^^^^ expected `MyTracked<'_>`, found `()` | | | implicitly returns `()` as its body has no tail or `return` expression error[E0308]: mismatched types --> tests/compile-fail/tracked_impl_incompatibles.rs:53:21 | 53 | fn default() -> Self {} | ------- ^^^^ expected `[MyTracked<'_>; 12]`, found `()` | | | implicitly returns `()` as its body has no tail or `return` expression | = note: expected array `[MyTracked<'db>; 12]` found unit type `()` salsa-0.23.0/tests/compile-fail/tracked_method_incompatibles.rs000064400000000000000000000011601046102023000227460ustar 00000000000000#[salsa::tracked] struct Tracked<'db> { field: u32, } #[salsa::tracked] impl<'db> Tracked<'db> { #[salsa::tracked] fn ref_self(&self, db: &dyn salsa::Database) {} } #[salsa::tracked] impl<'db> Tracked<'db> { #[salsa::tracked] fn ref_mut_self(&mut self, db: &dyn salsa::Database) {} } #[salsa::tracked] impl<'db> Tracked<'db> { #[salsa::tracked] fn multiple_lifetimes<'db1>(&mut self, db: &'db1 dyn salsa::Database) {} } #[salsa::tracked] impl<'db> Tracked<'db> { #[salsa::tracked] fn type_generics(&mut self, db: &dyn salsa::Database) -> T { panic!() } } fn main() {} salsa-0.23.0/tests/compile-fail/tracked_method_incompatibles.stderr000064400000000000000000000060671046102023000236400ustar 00000000000000error: #[salsa::tracked] must also be applied to the impl block for tracked methods --> tests/compile-fail/tracked_method_incompatibles.rs:9:17 | 9 | fn ref_self(&self, db: &dyn salsa::Database) {} | ^^^^^ error: tracked methods's first argument must be declared as `self`, not `&self` or `&mut self` --> tests/compile-fail/tracked_method_incompatibles.rs:9:17 | 9 | fn ref_self(&self, db: &dyn salsa::Database) {} | ^ error: #[salsa::tracked] must also be applied to the impl block for tracked methods --> tests/compile-fail/tracked_method_incompatibles.rs:15:21 | 15 | fn ref_mut_self(&mut self, db: &dyn salsa::Database) {} | ^^^^^^^^^ error: tracked methods's first argument must be declared as `self`, not `&self` or `&mut self` --> tests/compile-fail/tracked_method_incompatibles.rs:15:21 | 15 | fn ref_mut_self(&mut self, db: &dyn salsa::Database) {} | ^ error: #[salsa::tracked] must also be applied to the impl block for tracked methods --> tests/compile-fail/tracked_method_incompatibles.rs:21:33 | 21 | fn multiple_lifetimes<'db1>(&mut self, db: &'db1 dyn salsa::Database) {} | ^^^^^^^^^ error: tracked method already has a lifetime parameter in scope --> tests/compile-fail/tracked_method_incompatibles.rs:21:27 | 21 | fn multiple_lifetimes<'db1>(&mut self, db: &'db1 dyn salsa::Database) {} | ^^^^ error: only a single lifetime parameter is accepted --> tests/compile-fail/tracked_method_incompatibles.rs:27:22 | 27 | fn type_generics(&mut self, db: &dyn salsa::Database) -> T { | ^ error: tracked methods cannot have non-lifetime generic parameters --> tests/compile-fail/tracked_method_incompatibles.rs:27:22 | 27 | fn type_generics(&mut self, db: &dyn salsa::Database) -> T { | ^ warning: unused variable: `db` --> tests/compile-fail/tracked_method_incompatibles.rs:9:24 | 9 | fn ref_self(&self, db: &dyn salsa::Database) {} | ^^ help: if this is intentional, prefix it with an underscore: `_db` | = note: `#[warn(unused_variables)]` on by default warning: unused variable: `db` --> tests/compile-fail/tracked_method_incompatibles.rs:15:32 | 15 | fn ref_mut_self(&mut self, db: &dyn salsa::Database) {} | ^^ help: if this is intentional, prefix it with an underscore: `_db` warning: unused variable: `db` --> tests/compile-fail/tracked_method_incompatibles.rs:21:44 | 21 | fn multiple_lifetimes<'db1>(&mut self, db: &'db1 dyn salsa::Database) {} | ^^ help: if this is intentional, prefix it with an underscore: `_db` warning: unused variable: `db` --> tests/compile-fail/tracked_method_incompatibles.rs:27:36 | 27 | fn type_generics(&mut self, db: &dyn salsa::Database) -> T { | ^^ help: if this is intentional, prefix it with an underscore: `_db` salsa-0.23.0/tests/compile-fail/tracked_method_on_untracked_impl.rs000064400000000000000000000003131046102023000236110ustar 00000000000000#[salsa::input] struct MyInput { field: u32, } impl MyInput { #[salsa::tracked] fn tracked_method_on_untracked_impl(self, db: &dyn Db) -> u32 { input.field(db) } } fn main() {} salsa-0.23.0/tests/compile-fail/tracked_method_on_untracked_impl.stderr000064400000000000000000000013521046102023000244740ustar 00000000000000error: #[salsa::tracked] must also be applied to the impl block for tracked methods --> tests/compile-fail/tracked_method_on_untracked_impl.rs:8:41 | 8 | fn tracked_method_on_untracked_impl(self, db: &dyn Db) -> u32 { | ^^^^ error[E0405]: cannot find trait `Db` in this scope --> tests/compile-fail/tracked_method_on_untracked_impl.rs:8:56 | 8 | fn tracked_method_on_untracked_impl(self, db: &dyn Db) -> u32 { | ^^ not found in this scope error[E0425]: cannot find value `input` in this scope --> tests/compile-fail/tracked_method_on_untracked_impl.rs:9:9 | 9 | input.field(db) | ^^^^^ not found in this scope salsa-0.23.0/tests/compile-fail/tracked_struct_incompatibles.rs000064400000000000000000000012351046102023000230150ustar 00000000000000#[salsa::tracked(returns(ref))] struct TrackedWithRetRef { field: u32, } #[salsa::tracked(specify)] struct TrackedSructWithSpecify { field: u32, } #[salsa::tracked(no_eq)] struct TrackedStructWithNoEq { field: u32, } #[salsa::tracked(db = Db)] struct TrackedStructWithDb { field: u32, } #[salsa::tracked(recover_fn = recover)] struct TrackedStructWithRecover { field: u32, } #[salsa::tracked(lru = 12)] struct TrackedStructWithLru { field: u32, } #[salsa::tracked(revisions = 12)] struct TrackedStructWithRevisions { field: u32, } #[salsa::tracked(heap_size = size)] struct TrackedStructWithHeapSize { field: u32, } fn main() {} salsa-0.23.0/tests/compile-fail/tracked_struct_incompatibles.stderr000064400000000000000000000025521046102023000236770ustar 00000000000000error: `returns` option not allowed here --> tests/compile-fail/tracked_struct_incompatibles.rs:1:18 | 1 | #[salsa::tracked(returns(ref))] | ^^^^^^^ error: `specify` option not allowed here --> tests/compile-fail/tracked_struct_incompatibles.rs:6:18 | 6 | #[salsa::tracked(specify)] | ^^^^^^^ error: `no_eq` option not allowed here --> tests/compile-fail/tracked_struct_incompatibles.rs:11:18 | 11 | #[salsa::tracked(no_eq)] | ^^^^^ error: `db` option not allowed here --> tests/compile-fail/tracked_struct_incompatibles.rs:16:18 | 16 | #[salsa::tracked(db = Db)] | ^^ error: unrecognized option `recover_fn` --> tests/compile-fail/tracked_struct_incompatibles.rs:21:18 | 21 | #[salsa::tracked(recover_fn = recover)] | ^^^^^^^^^^ error: `lru` option not allowed here --> tests/compile-fail/tracked_struct_incompatibles.rs:26:18 | 26 | #[salsa::tracked(lru = 12)] | ^^^ error: `revisions` option not allowed here --> tests/compile-fail/tracked_struct_incompatibles.rs:31:18 | 31 | #[salsa::tracked(revisions = 12)] | ^^^^^^^^^ error: `heap_size` option not allowed here --> tests/compile-fail/tracked_struct_incompatibles.rs:36:18 | 36 | #[salsa::tracked(heap_size = size)] | ^^^^^^^^^ salsa-0.23.0/tests/compile-fail/tracked_struct_not_update.rs000064400000000000000000000001771046102023000223320ustar 00000000000000#[salsa::tracked] struct MyInput<'db> { field: NotUpdate, } #[derive(Clone, Debug, Hash)] struct NotUpdate; fn main() {} salsa-0.23.0/tests/compile-fail/tracked_struct_not_update.stderr000064400000000000000000000026571046102023000232160ustar 00000000000000error[E0599]: the function or associated item `maybe_update` exists for struct `UpdateDispatch`, but its trait bounds were not satisfied --> tests/compile-fail/tracked_struct_not_update.rs:1:1 | 1 | #[salsa::tracked] | ^^^^^^^^^^^^^^^^^ function or associated item cannot be called on `UpdateDispatch` due to unsatisfied trait bounds ... 7 | struct NotUpdate; | ---------------- doesn't satisfy `NotUpdate: PartialEq` or `NotUpdate: Update` | ::: src/update.rs | | pub struct Dispatch(PhantomData); | ---------------------- doesn't satisfy `_: UpdateFallback` | note: if you're trying to build a new `UpdateDispatch`, consider using `UpdateDispatch::::new` which returns `UpdateDispatch<_>` --> src/update.rs | | pub fn new() -> Self { | ^^^^^^^^^^^^^^^^^^^^ = note: the following trait bounds were not satisfied: `NotUpdate: Update` `NotUpdate: PartialEq` which is required by `UpdateDispatch: UpdateFallback` note: the trait `Update` must be implemented --> src/update.rs | | pub unsafe trait Update { | ^^^^^^^^^^^^^^^^^^^^^^^ = note: this error originates in the attribute macro `salsa::tracked` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider annotating `NotUpdate` with `#[derive(PartialEq)]` | 7 + #[derive(PartialEq)] 8 | struct NotUpdate; | salsa-0.23.0/tests/compile-fail/tracked_struct_unknown_attribute.rs000064400000000000000000000004131046102023000237430ustar 00000000000000#[salsa::tracked] struct UnknownAttributeTrackedStruct<'db> { #[tracked] tracked: bool, #[unknown_attr] field: bool, #[salsa::tracked] wrong_tracked: bool, /// TestDocComment /// TestDocComment field_with_doc: bool } fn main() {} salsa-0.23.0/tests/compile-fail/tracked_struct_unknown_attribute.stderr000064400000000000000000000007351046102023000246310ustar 00000000000000error: only a single lifetime parameter is accepted --> tests/compile-fail/tracked_struct_unknown_attribute.rs:1:1 | 1 | #[salsa::tracked] | ^^^^^^^^^^^^^^^^^ | = note: this error originates in the attribute macro `salsa::tracked` (in Nightly builds, run with -Z macro-backtrace for more info) error: cannot find attribute `unknown_attr` in this scope --> tests/compile-fail/tracked_struct_unknown_attribute.rs:5:7 | 5 | #[unknown_attr] | ^^^^^^^^^^^^ salsa-0.23.0/tests/compile_fail.rs000064400000000000000000000002361046102023000151450ustar 00000000000000#[rustversion::all(stable, since(1.84))] #[test] fn compile_fail() { let t = trybuild::TestCases::new(); t.compile_fail("tests/compile-fail/*.rs"); } salsa-0.23.0/tests/cycle.rs000064400000000000000000001000721046102023000136200ustar 00000000000000//! Test cases for fixpoint iteration cycle resolution. //! //! These test cases use a generic query setup that allows constructing arbitrary dependency //! graphs, and attempts to achieve good coverage of various cases. mod common; use common::{ExecuteValidateLoggerDatabase, LogDatabase}; use expect_test::expect; use salsa::{CycleRecoveryAction, Database as Db, DatabaseImpl as DbImpl, Durability, Setter}; #[cfg(not(miri))] use test_log::test; #[derive(Clone, Copy, Debug, PartialEq, Eq, salsa::Update)] enum Value { N(u8), OutOfBounds, TooManyIterations, } impl Value { fn to_value(self) -> Option { if let Self::N(val) = self { Some(val) } else { None } } } /// A vector of inputs a query can evaluate to get an iterator of values to operate on. /// /// This allows creating arbitrary query graphs between the four queries below (`min_iterate`, /// `max_iterate`, `min_panic`, `max_panic`) for testing cycle behaviors. #[salsa::input] struct Inputs { #[returns(ref)] inputs: Vec, } impl Inputs { fn values(self, db: &dyn Db) -> impl Iterator + use<'_> { self.inputs(db).iter().map(|input| input.eval(db)) } } /// A single input, evaluating to a single [`Value`]. #[derive(Clone)] enum Input { /// a simple value Value(Value), /// a simple value, reported as an untracked read UntrackedRead(Value), /// minimum of the given inputs, with fixpoint iteration on cycles MinIterate(Inputs), /// maximum of the given inputs, with fixpoint iteration on cycles MaxIterate(Inputs), /// minimum of the given inputs, panicking on cycles MinPanic(Inputs), /// maximum of the given inputs, panicking on cycles MaxPanic(Inputs), /// value of the given input, plus one; propagates error values Successor(Box), /// successor, converts error values to zero SuccessorOrZero(Box), } impl Input { fn eval(&self, db: &dyn Db) -> Value { match *self { Self::Value(value) => value, Self::UntrackedRead(value) => { db.report_untracked_read(); value } Self::MinIterate(inputs) => min_iterate(db, inputs), Self::MaxIterate(inputs) => max_iterate(db, inputs), Self::MinPanic(inputs) => min_panic(db, inputs), Self::MaxPanic(inputs) => max_panic(db, inputs), Self::Successor(ref input) => match input.eval(db) { Value::N(num) => Value::N(num + 1), other => other, }, Self::SuccessorOrZero(ref input) => match input.eval(db) { Value::N(num) => Value::N(num + 1), _ => Value::N(0), }, } } fn assert(&self, db: &dyn Db, expected: Value) { assert_eq!(self.eval(db), expected) } fn assert_value(&self, db: &dyn Db, expected: u8) { self.assert(db, Value::N(expected)) } fn assert_bounds(&self, db: &dyn Db) { self.assert(db, Value::OutOfBounds) } fn assert_count(&self, db: &dyn Db) { self.assert(db, Value::TooManyIterations) } } const MIN_VALUE: u8 = 10; const MAX_VALUE: u8 = 245; const MAX_ITERATIONS: u32 = 3; /// Recover from a cycle by falling back to `Value::OutOfBounds` if the value is out of bounds, /// `Value::TooManyIterations` if we've iterated more than `MAX_ITERATIONS` times, or else /// iterating again. fn cycle_recover( _db: &dyn Db, value: &Value, count: u32, _inputs: Inputs, ) -> CycleRecoveryAction { if value .to_value() .is_some_and(|val| val <= MIN_VALUE || val >= MAX_VALUE) { CycleRecoveryAction::Fallback(Value::OutOfBounds) } else if count > MAX_ITERATIONS { CycleRecoveryAction::Fallback(Value::TooManyIterations) } else { CycleRecoveryAction::Iterate } } /// Fold an iterator of `Value` into a `Value`, given some binary operator to apply to two `u8`. /// `Value::TooManyIterations` and `Value::OutOfBounds` will always propagate, with /// `Value::TooManyIterations` taking precedence. fn fold_values(values: impl IntoIterator, op: F) -> Value where F: Fn(u8, u8) -> u8, { values .into_iter() .fold(None, |accum, elem| { let Some(accum) = accum else { return Some(elem); }; match (accum, elem) { (Value::TooManyIterations, _) | (_, Value::TooManyIterations) => { Some(Value::TooManyIterations) } (Value::OutOfBounds, _) | (_, Value::OutOfBounds) => Some(Value::OutOfBounds), (Value::N(val1), Value::N(val2)) => Some(Value::N(op(val1, val2))), } }) .expect("inputs should not be empty") } /// Query minimum value of inputs, with cycle recovery. #[salsa::tracked(cycle_fn=cycle_recover, cycle_initial=min_initial)] fn min_iterate<'db>(db: &'db dyn Db, inputs: Inputs) -> Value { fold_values(inputs.values(db), u8::min) } fn min_initial(_db: &dyn Db, _inputs: Inputs) -> Value { Value::N(255) } /// Query maximum value of inputs, with cycle recovery. #[salsa::tracked(cycle_fn=cycle_recover, cycle_initial=max_initial)] fn max_iterate<'db>(db: &'db dyn Db, inputs: Inputs) -> Value { fold_values(inputs.values(db), u8::max) } fn max_initial(_db: &dyn Db, _inputs: Inputs) -> Value { Value::N(0) } /// Query minimum value of inputs, without cycle recovery. #[salsa::tracked] fn min_panic<'db>(db: &'db dyn Db, inputs: Inputs) -> Value { fold_values(inputs.values(db), u8::min) } /// Query maximum value of inputs, without cycle recovery. #[salsa::tracked] fn max_panic<'db>(db: &'db dyn Db, inputs: Inputs) -> Value { fold_values(inputs.values(db), u8::max) } fn untracked(num: u8) -> Input { Input::UntrackedRead(Value::N(num)) } fn value(num: u8) -> Input { Input::Value(Value::N(num)) } // Diagram nomenclature for nodes: Each node is represented as a:xx(ii), where `a` is a sequential // identifier from `a`, `b`, `c`..., xx is one of the four query kinds: // - `Ni` for `min_iterate` // - `Xi` for `max_iterate` // - `Np` for `min_panic` // - `Xp` for `max_panic` //\ // and `ii` is the inputs for that query, represented as a comma-separated list, with each // component representing an input: // - `a`, `b`, `c`... where the input is another node, // - `uXX` for `UntrackedRead(XX)` // - `vXX` for `Value(XX)` // - `sY` for `Successor(Y)` // - `zY` for `SuccessorOrZero(Y)` // // We always enter from the top left node in the diagram. /// a:Np(a) -+ /// ^ | /// +--------+ /// /// Simple self-cycle, no iteration, should panic. #[test] #[should_panic(expected = "dependency graph cycle")] fn self_panic() { let mut db = DbImpl::new(); let a_in = Inputs::new(&db, vec![]); let a = Input::MinPanic(a_in); a_in.set_inputs(&mut db).to(vec![a.clone()]); a.eval(&db); } /// a:Np(u10, a) -+ /// ^ | /// +-------------+ /// /// Simple self-cycle with untracked read, no iteration, should panic. #[test] #[should_panic(expected = "dependency graph cycle")] fn self_untracked_panic() { let mut db = DbImpl::new(); let a_in = Inputs::new(&db, vec![]); let a = Input::MinPanic(a_in); a_in.set_inputs(&mut db).to(vec![untracked(10), a.clone()]); a.eval(&db); } /// a:Ni(a) -+ /// ^ | /// +--------+ /// /// Simple self-cycle, iteration converges on initial value. #[test] fn self_converge_initial_value() { let mut db = DbImpl::new(); let a_in = Inputs::new(&db, vec![]); let a = Input::MinIterate(a_in); a_in.set_inputs(&mut db).to(vec![a.clone()]); a.assert_value(&db, 255); } /// a:Ni(b) --> b:Np(a) /// ^ | /// +-----------------+ /// /// Two-query cycle, one with iteration and one without. /// If we enter from the one with iteration, we converge on its initial value. #[test] fn two_mixed_converge_initial_value() { let mut db = DbImpl::new(); let a_in = Inputs::new(&db, vec![]); let b_in = Inputs::new(&db, vec![]); let a = Input::MinIterate(a_in); let b = Input::MinPanic(b_in); a_in.set_inputs(&mut db).to(vec![b]); b_in.set_inputs(&mut db).to(vec![a.clone()]); a.assert_value(&db, 255); } /// a:Np(b) --> b:Ni(a) /// ^ | /// +-----------------+ /// /// Two-query cycle, one with iteration and one without. /// If we enter from the one with no iteration, we panic. #[test] #[should_panic(expected = "dependency graph cycle")] fn two_mixed_panic() { let mut db = DbImpl::new(); let a_in = Inputs::new(&db, vec![]); let b_in = Inputs::new(&db, vec![]); let a = Input::MinPanic(b_in); let b = Input::MinIterate(a_in); a_in.set_inputs(&mut db).to(vec![b]); b_in.set_inputs(&mut db).to(vec![a.clone()]); a.eval(&db); } /// a:Ni(b) --> b:Xi(a) /// ^ | /// +-----------------+ /// /// Two-query cycle, both with iteration. /// We converge on the initial value of whichever we first enter from. #[test] fn two_iterate_converge_initial_value() { let mut db = DbImpl::new(); let a_in = Inputs::new(&db, vec![]); let b_in = Inputs::new(&db, vec![]); let a = Input::MinIterate(a_in); let b = Input::MaxIterate(b_in); a_in.set_inputs(&mut db).to(vec![b.clone()]); b_in.set_inputs(&mut db).to(vec![a.clone()]); a.assert_value(&db, 255); b.assert_value(&db, 255); } /// a:Xi(b) --> b:Ni(a) /// ^ | /// +-----------------+ /// /// Two-query cycle, both with iteration. /// We converge on the initial value of whichever we enter from. /// (Same setup as above test, different query order.) #[test] fn two_iterate_converge_initial_value_2() { let mut db = DbImpl::new(); let a_in = Inputs::new(&db, vec![]); let b_in = Inputs::new(&db, vec![]); let a = Input::MaxIterate(a_in); let b = Input::MinIterate(b_in); a_in.set_inputs(&mut db).to(vec![b.clone()]); b_in.set_inputs(&mut db).to(vec![a.clone()]); a.assert_value(&db, 0); b.assert_value(&db, 0); } /// a:Np(b) --> b:Ni(c) --> c:Xp(b) /// ^ | /// +-----------------+ /// /// Two-query cycle, enter indirectly at node with iteration, converge on its initial value. #[test] fn two_indirect_iterate_converge_initial_value() { let mut db = DbImpl::new(); let a_in = Inputs::new(&db, vec![]); let b_in = Inputs::new(&db, vec![]); let c_in = Inputs::new(&db, vec![]); let a = Input::MinPanic(a_in); let b = Input::MinIterate(b_in); let c = Input::MaxPanic(c_in); a_in.set_inputs(&mut db).to(vec![b.clone()]); b_in.set_inputs(&mut db).to(vec![c]); c_in.set_inputs(&mut db).to(vec![b]); a.assert_value(&db, 255); } /// a:Xp(b) --> b:Np(c) --> c:Xi(b) /// ^ | /// +-----------------+ /// /// Two-query cycle, enter indirectly at node without iteration, panic. #[test] #[should_panic(expected = "dependency graph cycle")] fn two_indirect_panic() { let mut db = DbImpl::new(); let a_in = Inputs::new(&db, vec![]); let b_in = Inputs::new(&db, vec![]); let c_in = Inputs::new(&db, vec![]); let a = Input::MinPanic(a_in); let b = Input::MinPanic(b_in); let c = Input::MaxIterate(c_in); a_in.set_inputs(&mut db).to(vec![b.clone()]); b_in.set_inputs(&mut db).to(vec![c]); c_in.set_inputs(&mut db).to(vec![b]); a.eval(&db); } /// a:Np(b) -> b:Ni(v200,c) -> c:Xp(b) /// ^ | /// +---------------------+ /// /// Two-query cycle, converges to non-initial value. #[test] fn two_converge() { let mut db = DbImpl::new(); let a_in = Inputs::new(&db, vec![]); let b_in = Inputs::new(&db, vec![]); let c_in = Inputs::new(&db, vec![]); let a = Input::MinPanic(a_in); let b = Input::MinIterate(b_in); let c = Input::MaxPanic(c_in); a_in.set_inputs(&mut db).to(vec![b.clone()]); b_in.set_inputs(&mut db).to(vec![value(200), c]); c_in.set_inputs(&mut db).to(vec![b]); a.assert_value(&db, 200); } /// a:Xp(b) -> b:Xi(v20,c) -> c:Xp(sb) /// ^ | /// +---------------------+ /// /// Two-query cycle, falls back due to >3 iterations. #[test] fn two_fallback_count() { let mut db = DbImpl::new(); let a_in = Inputs::new(&db, vec![]); let b_in = Inputs::new(&db, vec![]); let c_in = Inputs::new(&db, vec![]); let a = Input::MaxPanic(a_in); let b = Input::MaxIterate(b_in); let c = Input::MaxPanic(c_in); a_in.set_inputs(&mut db).to(vec![b.clone()]); b_in.set_inputs(&mut db).to(vec![value(20), c]); c_in.set_inputs(&mut db) .to(vec![Input::Successor(Box::new(b))]); a.assert_count(&db); } /// a:Xp(b) -> b:Xi(v20,c) -> c:Xp(zb) /// ^ | /// +---------------------+ /// /// Two-query cycle, falls back but fallback does not converge. #[test] #[should_panic(expected = "fallback did not converge")] fn two_fallback_diverge() { let mut db = DbImpl::new(); let a_in = Inputs::new(&db, vec![]); let b_in = Inputs::new(&db, vec![]); let c_in = Inputs::new(&db, vec![]); let a = Input::MaxPanic(a_in); let b = Input::MaxIterate(b_in); let c = Input::MaxPanic(c_in); a_in.set_inputs(&mut db).to(vec![b.clone()]); b_in.set_inputs(&mut db).to(vec![value(20), c.clone()]); c_in.set_inputs(&mut db) .to(vec![Input::SuccessorOrZero(Box::new(b))]); a.assert_count(&db); } /// a:Xp(b) -> b:Xi(v244,c) -> c:Xp(sb) /// ^ | /// +---------------------+ /// /// Two-query cycle, falls back due to value reaching >MAX_VALUE (we start at 244 and each /// iteration increments until we reach >245). #[test] fn two_fallback_value() { let mut db = DbImpl::new(); let a_in = Inputs::new(&db, vec![]); let b_in = Inputs::new(&db, vec![]); let c_in = Inputs::new(&db, vec![]); let a = Input::MaxPanic(a_in); let b = Input::MaxIterate(b_in); let c = Input::MaxPanic(c_in); a_in.set_inputs(&mut db).to(vec![b.clone()]); b_in.set_inputs(&mut db).to(vec![value(244), c]); c_in.set_inputs(&mut db) .to(vec![Input::Successor(Box::new(b))]); a.assert_bounds(&db); } /// a:Ni(b) -> b:Np(a, c) -> c:Np(v25, a) /// ^ | | /// +----------+------------------------+ /// /// Three-query cycle, (b) and (c) both depend on (a). We converge on 25. #[test] fn three_fork_converge() { let mut db = DbImpl::new(); let a_in = Inputs::new(&db, vec![]); let b_in = Inputs::new(&db, vec![]); let c_in = Inputs::new(&db, vec![]); let a = Input::MinIterate(a_in); let b = Input::MinPanic(b_in); let c = Input::MinPanic(c_in); a_in.set_inputs(&mut db).to(vec![b]); b_in.set_inputs(&mut db).to(vec![a.clone(), c]); c_in.set_inputs(&mut db).to(vec![value(25), a.clone()]); a.assert_value(&db, 25); } /// a:Ni(b) -> b:Ni(a, c) -> c:Np(v25, b) /// ^ | ^ | /// +----------+ +----------+ /// /// Layered cycles. We converge on 25. #[test] fn layered_converge() { let mut db = DbImpl::new(); let a_in = Inputs::new(&db, vec![]); let b_in = Inputs::new(&db, vec![]); let c_in = Inputs::new(&db, vec![]); let a = Input::MinIterate(a_in); let b = Input::MinIterate(b_in); let c = Input::MinPanic(c_in); a_in.set_inputs(&mut db).to(vec![b.clone()]); b_in.set_inputs(&mut db).to(vec![a.clone(), c]); c_in.set_inputs(&mut db).to(vec![value(25), b]); a.assert_value(&db, 25); } /// a:Xi(b) -> b:Xi(a, c) -> c:Xp(v25, sb) /// ^ | ^ | /// +----------+ +----------+ /// /// Layered cycles. We hit max iterations and fall back. #[test] fn layered_fallback_count() { let mut db = DbImpl::new(); let a_in = Inputs::new(&db, vec![]); let b_in = Inputs::new(&db, vec![]); let c_in = Inputs::new(&db, vec![]); let a = Input::MaxIterate(a_in); let b = Input::MaxIterate(b_in); let c = Input::MaxPanic(c_in); a_in.set_inputs(&mut db).to(vec![b.clone()]); b_in.set_inputs(&mut db).to(vec![a.clone(), c]); c_in.set_inputs(&mut db) .to(vec![value(25), Input::Successor(Box::new(b))]); a.assert_count(&db); } /// a:Xi(b) -> b:Xi(a, c) -> c:Xp(v243, sb) /// ^ | ^ | /// +----------+ +----------+ /// /// Layered cycles. We hit max value and fall back. #[test] fn layered_fallback_value() { let mut db = DbImpl::new(); let a_in = Inputs::new(&db, vec![]); let b_in = Inputs::new(&db, vec![]); let c_in = Inputs::new(&db, vec![]); let a = Input::MaxIterate(a_in); let b = Input::MaxIterate(b_in); let c = Input::MaxPanic(c_in); a_in.set_inputs(&mut db).to(vec![b.clone()]); b_in.set_inputs(&mut db).to(vec![a.clone(), c]); c_in.set_inputs(&mut db) .to(vec![value(243), Input::Successor(Box::new(b))]); a.assert_bounds(&db); } /// a:Ni(b) -> b:Ni(c) -> c:Np(v25, a, b) /// ^ ^ | /// +----------+------------------------+ /// /// Nested cycles. We converge on 25. #[test] fn nested_converge() { let mut db = DbImpl::new(); let a_in = Inputs::new(&db, vec![]); let b_in = Inputs::new(&db, vec![]); let c_in = Inputs::new(&db, vec![]); let a = Input::MinIterate(a_in); let b = Input::MinIterate(b_in); let c = Input::MinPanic(c_in); a_in.set_inputs(&mut db).to(vec![b.clone()]); b_in.set_inputs(&mut db).to(vec![c]); c_in.set_inputs(&mut db).to(vec![value(25), a.clone(), b]); a.assert_value(&db, 25); } /// a:Ni(b) -> b:Ni(c) -> c:Np(v25, b, a) /// ^ ^ | /// +----------+------------------------+ /// /// Nested cycles, inner first. We converge on 25. #[test] fn nested_inner_first_converge() { let mut db = DbImpl::new(); let a_in = Inputs::new(&db, vec![]); let b_in = Inputs::new(&db, vec![]); let c_in = Inputs::new(&db, vec![]); let a = Input::MinIterate(a_in); let b = Input::MinIterate(b_in); let c = Input::MinPanic(c_in); a_in.set_inputs(&mut db).to(vec![b.clone()]); b_in.set_inputs(&mut db).to(vec![c]); c_in.set_inputs(&mut db).to(vec![value(25), b, a.clone()]); a.assert_value(&db, 25); } /// a:Xi(b) -> b:Xi(c) -> c:Xp(v25, a, sb) /// ^ ^ | /// +----------+-------------------------+ /// /// Nested cycles. We hit max iterations and fall back. #[test] fn nested_fallback_count() { let mut db = DbImpl::new(); let a_in = Inputs::new(&db, vec![]); let b_in = Inputs::new(&db, vec![]); let c_in = Inputs::new(&db, vec![]); let a = Input::MaxIterate(a_in); let b = Input::MaxIterate(b_in); let c = Input::MaxPanic(c_in); a_in.set_inputs(&mut db).to(vec![b.clone()]); b_in.set_inputs(&mut db).to(vec![c]); c_in.set_inputs(&mut db) .to(vec![value(25), a.clone(), Input::Successor(Box::new(b))]); a.assert_count(&db); } /// a:Xi(b) -> b:Xi(c) -> c:Xp(v25, b, sa) /// ^ ^ | /// +----------+-------------------------+ /// /// Nested cycles, inner first. We hit max iterations and fall back. #[test] fn nested_inner_first_fallback_count() { let mut db = DbImpl::new(); let a_in = Inputs::new(&db, vec![]); let b_in = Inputs::new(&db, vec![]); let c_in = Inputs::new(&db, vec![]); let a = Input::MaxIterate(a_in); let b = Input::MaxIterate(b_in); let c = Input::MaxPanic(c_in); a_in.set_inputs(&mut db).to(vec![b.clone()]); b_in.set_inputs(&mut db).to(vec![c]); c_in.set_inputs(&mut db) .to(vec![value(25), b, Input::Successor(Box::new(a.clone()))]); a.assert_count(&db); } /// a:Xi(b) -> b:Xi(c) -> c:Xp(v243, a, sb) /// ^ ^ | /// +----------+--------------------------+ /// /// Nested cycles. We hit max value and fall back. #[test] fn nested_fallback_value() { let mut db = DbImpl::new(); let a_in = Inputs::new(&db, vec![]); let b_in = Inputs::new(&db, vec![]); let c_in = Inputs::new(&db, vec![]); let a = Input::MaxIterate(a_in); let b = Input::MaxIterate(b_in); let c = Input::MaxPanic(c_in); a_in.set_inputs(&mut db).to(vec![b.clone()]); b_in.set_inputs(&mut db).to(vec![c.clone()]); c_in.set_inputs(&mut db).to(vec![ value(243), a.clone(), Input::Successor(Box::new(b.clone())), ]); a.assert_bounds(&db); b.assert_bounds(&db); c.assert_bounds(&db); } /// a:Xi(b) -> b:Xi(c) -> c:Xp(v243, b, sa) /// ^ ^ | /// +----------+--------------------------+ /// /// Nested cycles, inner first. We hit max value and fall back. #[test] fn nested_inner_first_fallback_value() { let mut db = DbImpl::new(); let a_in = Inputs::new(&db, vec![]); let b_in = Inputs::new(&db, vec![]); let c_in = Inputs::new(&db, vec![]); let a = Input::MaxIterate(a_in); let b = Input::MaxIterate(b_in); let c = Input::MaxPanic(c_in); a_in.set_inputs(&mut db).to(vec![b.clone()]); b_in.set_inputs(&mut db).to(vec![c]); c_in.set_inputs(&mut db) .to(vec![value(243), b, Input::Successor(Box::new(a.clone()))]); a.assert_bounds(&db); } /// a:Ni(b) -> b:Ni(c, a) -> c:Np(v25, a, b) /// ^ ^ | | /// +----------+--------|------------------+ /// | | /// +-------------------+ /// /// Nested cycles, double head. We converge on 25. #[test] fn nested_double_converge() { let mut db = DbImpl::new(); let a_in = Inputs::new(&db, vec![]); let b_in = Inputs::new(&db, vec![]); let c_in = Inputs::new(&db, vec![]); let a = Input::MinIterate(a_in); let b = Input::MinIterate(b_in); let c = Input::MinPanic(c_in); a_in.set_inputs(&mut db).to(vec![b.clone()]); b_in.set_inputs(&mut db).to(vec![c, a.clone()]); c_in.set_inputs(&mut db).to(vec![value(25), a.clone(), b]); a.assert_value(&db, 25); } // Multiple-revision cycles /// a:Ni(b) --> b:Np(a) /// ^ | /// +-----------------+ /// /// a:Ni(b) --> b:Np(v30) /// /// Cycle becomes not-a-cycle in next revision. #[test] fn cycle_becomes_non_cycle() { let mut db = DbImpl::new(); let a_in = Inputs::new(&db, vec![]); let b_in = Inputs::new(&db, vec![]); let a = Input::MinIterate(a_in); let b = Input::MinPanic(b_in); a_in.set_inputs(&mut db).to(vec![b]); b_in.set_inputs(&mut db).to(vec![a.clone()]); a.assert_value(&db, 255); b_in.set_inputs(&mut db).to(vec![value(30)]); a.assert_value(&db, 30); } /// a:Ni(b) --> b:Np(v30) /// /// a:Ni(b) --> b:Np(a) /// ^ | /// +-----------------+ /// /// Non-cycle becomes a cycle in next revision. #[test] fn non_cycle_becomes_cycle() { let mut db = DbImpl::new(); let a_in = Inputs::new(&db, vec![]); let b_in = Inputs::new(&db, vec![]); let a = Input::MinIterate(a_in); let b = Input::MinPanic(b_in); a_in.set_inputs(&mut db).to(vec![b]); b_in.set_inputs(&mut db).to(vec![value(30)]); a.assert_value(&db, 30); b_in.set_inputs(&mut db).to(vec![a.clone()]); a.assert_value(&db, 255); } /// a:Xi(b) -> b:Xi(c, a) -> c:Xp(v25, a, sb) /// ^ ^ | | /// +----------+--------|-------------------+ /// | | /// +-------------------+ /// /// Nested cycles, double head. We hit max iterations and fall back, then max value on the next /// revision, then converge on the next. #[test] fn nested_double_multiple_revisions() { let mut db = DbImpl::new(); let a_in = Inputs::new(&db, vec![]); let b_in = Inputs::new(&db, vec![]); let c_in = Inputs::new(&db, vec![]); let a = Input::MaxIterate(a_in); let b = Input::MaxIterate(b_in); let c = Input::MaxPanic(c_in); a_in.set_inputs(&mut db).to(vec![b.clone()]); b_in.set_inputs(&mut db).to(vec![c, a.clone()]); c_in.set_inputs(&mut db).to(vec![ value(25), a.clone(), Input::Successor(Box::new(b.clone())), ]); a.assert_count(&db); // next revision, we hit max value instead c_in.set_inputs(&mut db).to(vec![ value(243), a.clone(), Input::Successor(Box::new(b.clone())), ]); a.assert_bounds(&db); // and next revision, we converge c_in.set_inputs(&mut db) .to(vec![value(240), a.clone(), b.clone()]); a.assert_value(&db, 240); // one more revision, without relevant changes a_in.set_inputs(&mut db).to(vec![b]); a.assert_value(&db, 240); } /// a:Ni(b) -> b:Ni(c) -> c:Ni(a) /// ^ | /// +---------------------------+ /// /// In a cycle with some LOW durability and some HIGH durability inputs, changing a LOW durability /// input still re-executes the full cycle in the next revision. #[test] fn cycle_durability() { let mut db = DbImpl::new(); let a_in = Inputs::new(&db, vec![]); let b_in = Inputs::new(&db, vec![]); let c_in = Inputs::new(&db, vec![]); let a = Input::MinIterate(a_in); let b = Input::MinIterate(b_in); let c = Input::MinIterate(c_in); a_in.set_inputs(&mut db) .with_durability(Durability::LOW) .to(vec![b.clone()]); b_in.set_inputs(&mut db) .with_durability(Durability::HIGH) .to(vec![c]); c_in.set_inputs(&mut db) .with_durability(Durability::HIGH) .to(vec![a.clone()]); a.assert_value(&db, 255); // next revision, we converge instead a_in.set_inputs(&mut db) .with_durability(Durability::LOW) .to(vec![value(45), b]); a.assert_value(&db, 45); } /// a:Np(v59, b) -> b:Ni(v60, c) -> c:Np(b) /// ^ | /// +---------------------+ /// /// If nothing in a cycle changed in the new revision, no part of the cycle should re-execute. #[test] fn cycle_unchanged() { let mut db = ExecuteValidateLoggerDatabase::default(); let a_in = Inputs::new(&db, vec![]); let b_in = Inputs::new(&db, vec![]); let c_in = Inputs::new(&db, vec![]); let a = Input::MinPanic(a_in); let b = Input::MinIterate(b_in); let c = Input::MinPanic(c_in); a_in.set_inputs(&mut db).to(vec![value(59), b.clone()]); b_in.set_inputs(&mut db).to(vec![value(60), c]); c_in.set_inputs(&mut db).to(vec![b.clone()]); a.assert_value(&db, 59); b.assert_value(&db, 60); db.assert_logs_len(5); // next revision, we change only A, which is not part of the cycle and the cycle does not // depend on. a_in.set_inputs(&mut db).to(vec![value(45), b.clone()]); b.assert_value(&db, 60); db.assert_logs(expect![[r#" [ "salsa_event(DidValidateMemoizedValue { database_key: min_iterate(Id(1)) })", ]"#]]); a.assert_value(&db, 45); } /// a:Np(v59, b) -> b:Ni(v60, c) -> c:Np(d) -> d:Ni(v61, b, e) -> e:Np(d) /// ^ | ^ | /// +--------------------------+ +--------------+ /// /// If nothing in a nested cycle changed in the new revision, no part of the cycle should /// re-execute. #[test] fn cycle_unchanged_nested() { let mut db = ExecuteValidateLoggerDatabase::default(); let a_in = Inputs::new(&db, vec![]); let b_in = Inputs::new(&db, vec![]); let c_in = Inputs::new(&db, vec![]); let d_in = Inputs::new(&db, vec![]); let e_in = Inputs::new(&db, vec![]); let a = Input::MinPanic(a_in); let b = Input::MinIterate(b_in); let c = Input::MinPanic(c_in); let d = Input::MinIterate(d_in); let e = Input::MinPanic(e_in); a_in.set_inputs(&mut db).to(vec![value(59), b.clone()]); b_in.set_inputs(&mut db).to(vec![value(60), c.clone()]); c_in.set_inputs(&mut db).to(vec![d.clone()]); d_in.set_inputs(&mut db) .to(vec![value(61), b.clone(), e.clone()]); e_in.set_inputs(&mut db).to(vec![d.clone()]); a.assert_value(&db, 59); b.assert_value(&db, 60); db.assert_logs_len(13); // next revision, we change only A, which is not part of the cycle and the cycle does not // depend on. a_in.set_inputs(&mut db).to(vec![value(45), b.clone()]); b.assert_value(&db, 60); db.assert_logs(expect![[r#" [ "salsa_event(DidValidateMemoizedValue { database_key: min_iterate(Id(1)) })", ]"#]]); a.assert_value(&db, 45); } /// +--------------------------------+ /// | v /// a:Np(v59, b) -> b:Ni(v60, c) -> c:Np(d, e) -> d:Ni(v61, b, e) -> e:Ni(d) /// ^ | ^ | /// +-----------------------------+ +--------------+ /// /// If nothing in a nested cycle changed in the new revision, no part of the cycle should /// re-execute. #[test_log::test] fn cycle_unchanged_nested_intertwined() { // We run this test twice in order to catch some subtly different cases; see below. for i in 0..1 { let mut db = ExecuteValidateLoggerDatabase::default(); let a_in = Inputs::new(&db, vec![]); let b_in = Inputs::new(&db, vec![]); let c_in = Inputs::new(&db, vec![]); let d_in = Inputs::new(&db, vec![]); let e_in = Inputs::new(&db, vec![]); let a = Input::MinPanic(a_in); let b = Input::MinIterate(b_in); let c = Input::MinPanic(c_in); let d = Input::MinIterate(d_in); let e = Input::MinIterate(e_in); a_in.set_inputs(&mut db).to(vec![value(59), b.clone()]); b_in.set_inputs(&mut db).to(vec![value(60), c.clone()]); c_in.set_inputs(&mut db).to(vec![d.clone(), e.clone()]); d_in.set_inputs(&mut db) .to(vec![value(61), b.clone(), e.clone()]); e_in.set_inputs(&mut db).to(vec![d.clone()]); a.assert_value(&db, 59); b.assert_value(&db, 60); // First time we run this test, don't fetch c/d/e here; this means they won't get marked // `verified_final` in R6 (this revision), which will leave us in the next revision (R7) // with a chain of could-be-provisional memos from the previous revision which should be // final but were never confirmed as such; this triggers the case in `deep_verify_memo` // where we need to double-check `validate_provisional` after traversing dependencies. // // Second time we run this test, fetch everything in R6, to check the behavior of // `maybe_changed_after` with all validated-final memos. if i == 1 { c.assert_value(&db, 60); d.assert_value(&db, 60); e.assert_value(&db, 60); } db.assert_logs_len(15 + i); // next revision, we change only A, which is not part of the cycle and the cycle does not // depend on. a_in.set_inputs(&mut db).to(vec![value(45), b.clone()]); b.assert_value(&db, 60); db.assert_logs(expect![[r#" [ "salsa_event(DidValidateMemoizedValue { database_key: min_iterate(Id(1)) })", ]"#]]); a.assert_value(&db, 45); } } /// Provisional query results in a cycle should still be cached within a single iteration. /// /// a:Ni(v59, b) -> b:Np(v60, c, c, c) -> c:Np(a) /// ^ | /// +------------------------------------------+ #[test] fn repeat_provisional_query() { let mut db = ExecuteValidateLoggerDatabase::default(); let a_in = Inputs::new(&db, vec![]); let b_in = Inputs::new(&db, vec![]); let c_in = Inputs::new(&db, vec![]); let a = Input::MinIterate(a_in); let b = Input::MinPanic(b_in); let c = Input::MinPanic(c_in); a_in.set_inputs(&mut db).to(vec![value(59), b.clone()]); b_in.set_inputs(&mut db) .to(vec![value(60), c.clone(), c.clone(), c]); c_in.set_inputs(&mut db).to(vec![a.clone()]); a.assert_value(&db, 59); db.assert_logs(expect![[r#" [ "salsa_event(WillExecute { database_key: min_iterate(Id(0)) })", "salsa_event(WillExecute { database_key: min_panic(Id(1)) })", "salsa_event(WillExecute { database_key: min_panic(Id(2)) })", "salsa_event(WillIterateCycle { database_key: min_iterate(Id(0)), iteration_count: IterationCount(1), fell_back: false })", "salsa_event(WillExecute { database_key: min_panic(Id(1)) })", "salsa_event(WillExecute { database_key: min_panic(Id(2)) })", ]"#]]); } salsa-0.23.0/tests/cycle_accumulate.rs000064400000000000000000000214571046102023000160340ustar 00000000000000use std::collections::HashSet; mod common; use common::{LogDatabase, LoggerDatabase}; use expect_test::expect; use salsa::{Accumulator, Setter}; use test_log::test; #[salsa::input(debug)] struct File { name: String, dependencies: Vec, issues: Vec, } #[salsa::accumulator] #[derive(Debug)] struct Diagnostic(#[allow(dead_code)] String); #[salsa::tracked(cycle_fn = cycle_fn, cycle_initial = cycle_initial)] fn check_file(db: &dyn LogDatabase, file: File) -> Vec { db.push_log(format!( "check_file(name = {}, issues = {:?})", file.name(db), file.issues(db) )); let mut collected_issues = HashSet::::from_iter(file.issues(db).iter().copied()); for dep in file.dependencies(db) { let issues = check_file(db, dep); collected_issues.extend(issues); } let mut sorted_issues = collected_issues.iter().copied().collect::>(); sorted_issues.sort(); for issue in &sorted_issues { Diagnostic(format!("file {}: issue {}", file.name(db), issue)).accumulate(db); } sorted_issues } fn cycle_initial(_db: &dyn LogDatabase, _file: File) -> Vec { vec![] } fn cycle_fn( _db: &dyn LogDatabase, _value: &[u32], _count: u32, _file: File, ) -> salsa::CycleRecoveryAction> { salsa::CycleRecoveryAction::Iterate } #[test] fn accumulate_once() { let db = LoggerDatabase::default(); let file = File::new(&db, "fn".to_string(), vec![], vec![1]); let diagnostics = check_file::accumulated::(&db, file); db.assert_logs(expect![[r#" [ "check_file(name = fn, issues = [1])", ]"#]]); expect![[r#" [ Diagnostic( "file fn: issue 1", ), ]"#]] .assert_eq(&format!("{diagnostics:#?}")); } #[test] fn accumulate_with_dep() { let db = LoggerDatabase::default(); let file_a = File::new(&db, "file_a".to_string(), vec![], vec![1]); let file_b = File::new(&db, "file_b".to_string(), vec![file_a], vec![2]); let diagnostics = check_file::accumulated::(&db, file_b); db.assert_logs(expect![[r#" [ "check_file(name = file_b, issues = [2])", "check_file(name = file_a, issues = [1])", ]"#]]); expect![[r#" [ Diagnostic( "file file_b: issue 1", ), Diagnostic( "file file_b: issue 2", ), Diagnostic( "file file_a: issue 1", ), ]"#]] .assert_eq(&format!("{diagnostics:#?}")); } #[test] fn accumulate_with_cycle() { let mut db = LoggerDatabase::default(); let file_a = File::new(&db, "file_a".to_string(), vec![], vec![1]); let file_b = File::new(&db, "file_b".to_string(), vec![file_a], vec![2]); file_a.set_dependencies(&mut db).to(vec![file_b]); let diagnostics = check_file::accumulated::(&db, file_b); db.assert_logs(expect![[r#" [ "check_file(name = file_b, issues = [2])", "check_file(name = file_a, issues = [1])", "check_file(name = file_b, issues = [2])", "check_file(name = file_a, issues = [1])", ]"#]]); expect![[r#" [ Diagnostic( "file file_b: issue 1", ), Diagnostic( "file file_b: issue 2", ), Diagnostic( "file file_a: issue 1", ), Diagnostic( "file file_a: issue 2", ), ]"#]] .assert_eq(&format!("{diagnostics:#?}")); } #[test] fn accumulate_with_cycle_second_revision() { let mut db = LoggerDatabase::default(); let file_a = File::new(&db, "file_a".to_string(), vec![], vec![1]); let file_b = File::new(&db, "file_b".to_string(), vec![file_a], vec![2]); file_a.set_dependencies(&mut db).to(vec![file_b]); let diagnostics = check_file::accumulated::(&db, file_b); db.assert_logs(expect![[r#" [ "check_file(name = file_b, issues = [2])", "check_file(name = file_a, issues = [1])", "check_file(name = file_b, issues = [2])", "check_file(name = file_a, issues = [1])", ]"#]]); expect![[r#" [ Diagnostic( "file file_b: issue 1", ), Diagnostic( "file file_b: issue 2", ), Diagnostic( "file file_a: issue 1", ), Diagnostic( "file file_a: issue 2", ), ]"#]] .assert_eq(&format!("{diagnostics:#?}")); file_b.set_issues(&mut db).to(vec![2, 3]); let diagnostics = check_file::accumulated::(&db, file_a); db.assert_logs(expect![[r#" [ "check_file(name = file_b, issues = [2, 3])", "check_file(name = file_a, issues = [1])", "check_file(name = file_a, issues = [1])", "check_file(name = file_b, issues = [2, 3])", ]"#]]); expect![[r#" [ Diagnostic( "file file_a: issue 1", ), Diagnostic( "file file_a: issue 2", ), Diagnostic( "file file_a: issue 3", ), Diagnostic( "file file_b: issue 1", ), Diagnostic( "file file_b: issue 2", ), Diagnostic( "file file_b: issue 3", ), ]"#]] .assert_eq(&format!("{diagnostics:#?}")); } #[test] fn accumulate_add_cycle() { let mut db = LoggerDatabase::default(); let file_a = File::new(&db, "file_a".to_string(), vec![], vec![1]); let file_b = File::new(&db, "file_b".to_string(), vec![file_a], vec![2]); let diagnostics = check_file::accumulated::(&db, file_b); db.assert_logs(expect![[r#" [ "check_file(name = file_b, issues = [2])", "check_file(name = file_a, issues = [1])", ]"#]]); expect![[r#" [ Diagnostic( "file file_b: issue 1", ), Diagnostic( "file file_b: issue 2", ), Diagnostic( "file file_a: issue 1", ), ]"#]] .assert_eq(&format!("{diagnostics:#?}")); file_a.set_dependencies(&mut db).to(vec![file_b]); let diagnostics = check_file::accumulated::(&db, file_a); db.assert_logs(expect![[r#" [ "check_file(name = file_a, issues = [1])", "check_file(name = file_b, issues = [2])", "check_file(name = file_a, issues = [1])", "check_file(name = file_b, issues = [2])", ]"#]]); expect![[r#" [ Diagnostic( "file file_a: issue 1", ), Diagnostic( "file file_a: issue 2", ), Diagnostic( "file file_b: issue 1", ), Diagnostic( "file file_b: issue 2", ), ]"#]] .assert_eq(&format!("{diagnostics:#?}")); } #[test] fn accumulate_remove_cycle() { let mut db = LoggerDatabase::default(); let file_a = File::new(&db, "file_a".to_string(), vec![], vec![1]); let file_b = File::new(&db, "file_b".to_string(), vec![file_a], vec![2]); file_a.set_dependencies(&mut db).to(vec![file_b]); let diagnostics = check_file::accumulated::(&db, file_b); db.assert_logs(expect![[r#" [ "check_file(name = file_b, issues = [2])", "check_file(name = file_a, issues = [1])", "check_file(name = file_b, issues = [2])", "check_file(name = file_a, issues = [1])", ]"#]]); expect![[r#" [ Diagnostic( "file file_b: issue 1", ), Diagnostic( "file file_b: issue 2", ), Diagnostic( "file file_a: issue 1", ), Diagnostic( "file file_a: issue 2", ), ]"#]] .assert_eq(&format!("{diagnostics:#?}")); file_a.set_dependencies(&mut db).to(vec![]); let diagnostics = check_file::accumulated::(&db, file_b); db.assert_logs(expect![[r#" [ "check_file(name = file_a, issues = [1])", "check_file(name = file_b, issues = [2])", ]"#]]); expect![[r#" [ Diagnostic( "file file_b: issue 1", ), Diagnostic( "file file_b: issue 2", ), Diagnostic( "file file_a: issue 1", ), ]"#]] .assert_eq(&format!("{diagnostics:#?}")); } salsa-0.23.0/tests/cycle_fallback_immediate.rs000064400000000000000000000024071046102023000174600ustar 00000000000000//! It is possible to omit the `cycle_fn`, only specifying `cycle_result` in which case //! an immediate fallback value is used as the cycle handling opposed to doing a fixpoint resolution. use std::sync::atomic::{AtomicI32, Ordering}; #[salsa::tracked(cycle_result=cycle_result)] fn one_o_one(db: &dyn salsa::Database) -> u32 { let val = one_o_one(db); val + 1 } fn cycle_result(_db: &dyn salsa::Database) -> u32 { 100 } #[test_log::test] fn simple() { let db = salsa::DatabaseImpl::default(); assert_eq!(one_o_one(&db), 100); } #[salsa::tracked(cycle_result=two_queries_cycle_result)] fn two_queries1(db: &dyn salsa::Database) -> i32 { two_queries2(db); 0 } #[salsa::tracked] fn two_queries2(db: &dyn salsa::Database) -> i32 { two_queries1(db); // This is horribly against Salsa's rules, but we want to test that // the value from within the cycle is not considered, and this is // the only way I found. static CALLS_COUNT: AtomicI32 = AtomicI32::new(0); CALLS_COUNT.fetch_add(1, Ordering::Relaxed) } fn two_queries_cycle_result(_db: &dyn salsa::Database) -> i32 { 1 } #[test] fn two_queries() { let db = salsa::DatabaseImpl::default(); assert_eq!(two_queries1(&db), 1); assert_eq!(two_queries2(&db), 1); } salsa-0.23.0/tests/cycle_initial_call_back_into_cycle.rs000064400000000000000000000014111046102023000215110ustar 00000000000000//! Calling back into the same cycle from your cycle initial function will trigger another cycle. #[salsa::tracked] fn initial_value(db: &dyn salsa::Database) -> u32 { query(db) } #[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=cycle_initial)] fn query(db: &dyn salsa::Database) -> u32 { let val = query(db); if val < 5 { val + 1 } else { val } } fn cycle_initial(db: &dyn salsa::Database) -> u32 { initial_value(db) } fn cycle_fn( _db: &dyn salsa::Database, _value: &u32, _count: u32, ) -> salsa::CycleRecoveryAction { salsa::CycleRecoveryAction::Iterate } #[test_log::test] #[should_panic(expected = "dependency graph cycle")] fn the_test() { let db = salsa::DatabaseImpl::default(); query(&db); } salsa-0.23.0/tests/cycle_initial_call_query.rs000064400000000000000000000013021046102023000175450ustar 00000000000000//! It's possible to call a Salsa query from within a cycle initial fn. #[salsa::tracked] fn initial_value(_db: &dyn salsa::Database) -> u32 { 0 } #[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=cycle_initial)] fn query(db: &dyn salsa::Database) -> u32 { let val = query(db); if val < 5 { val + 1 } else { val } } fn cycle_initial(db: &dyn salsa::Database) -> u32 { initial_value(db) } fn cycle_fn( _db: &dyn salsa::Database, _value: &u32, _count: u32, ) -> salsa::CycleRecoveryAction { salsa::CycleRecoveryAction::Iterate } #[test_log::test] fn the_test() { let db = salsa::DatabaseImpl::default(); assert_eq!(query(&db), 5); } salsa-0.23.0/tests/cycle_maybe_changed_after.rs000064400000000000000000000131171046102023000176320ustar 00000000000000//! Tests for incremental validation for queries involved in a cycle. mod common; use crate::common::EventLoggerDatabase; use salsa::{CycleRecoveryAction, Database, Durability, Setter}; #[salsa::input(debug)] struct Input { value: u32, max: u32, } #[salsa::interned(debug)] struct Output<'db> { value: u32, } #[salsa::tracked(cycle_fn=query_a_recover, cycle_initial=query_a_initial)] fn query_c<'db>(db: &'db dyn salsa::Database, input: Input) -> u32 { query_d(db, input) } #[salsa::tracked] fn query_d<'db>(db: &'db dyn salsa::Database, input: Input) -> u32 { let value = query_c(db, input); if value < input.max(db) * 2 { // Only the first iteration depends on value but the entire // cycle must re-run if input changes. let result = value + input.value(db); Output::new(db, result); result } else { value } } fn query_a_initial(_db: &dyn Database, _input: Input) -> u32 { 0 } fn query_a_recover( _db: &dyn Database, _output: &u32, _count: u32, _input: Input, ) -> CycleRecoveryAction { CycleRecoveryAction::Iterate } /// Only the first iteration depends on `input.value`. It's important that the entire query /// reruns if `input.value` changes. That's why salsa has to carry-over the inputs and outputs /// from the previous iteration. #[test_log::test] fn first_iteration_input_only() { #[salsa::tracked(cycle_fn=query_a_recover, cycle_initial=query_a_initial)] fn query_a<'db>(db: &'db dyn salsa::Database, input: Input) -> u32 { query_b(db, input) } #[salsa::tracked] fn query_b<'db>(db: &'db dyn salsa::Database, input: Input) -> u32 { let value = query_a(db, input); if value < input.max(db) { // Only the first iteration depends on value but the entire // cycle must re-run if input changes. value + input.value(db) } else { value } } let mut db = EventLoggerDatabase::default(); let input = Input::builder(4, 5).durability(Durability::MEDIUM).new(&db); { let result = query_a(&db, input); assert_eq!(result, 8); } { input.set_value(&mut db).to(3); let result = query_a(&db, input); assert_eq!(result, 6); } } /// Very similar to the previous test, but the difference is that the called function /// isn't the cycle head and that `cycle_participant` is called from /// both the `cycle_head` and the `entry` function. #[test_log::test] fn nested_cycle_fewer_dependencies_in_first_iteration() { #[salsa::interned(debug)] struct ClassLiteral<'db> { scope: Scope<'db>, } #[salsa::tracked] impl<'db> ClassLiteral<'db> { #[salsa::tracked] fn context(self, db: &'db dyn salsa::Database) -> u32 { let scope = self.scope(db); // Access a field on `scope` that changed in the new revision. scope.field(db) } } #[salsa::tracked(debug)] struct Scope<'db> { field: u32, } #[salsa::tracked] fn create_interned<'db>(db: &'db dyn salsa::Database, scope: Scope<'db>) -> ClassLiteral<'db> { ClassLiteral::new(db, scope) } #[derive(Eq, PartialEq, Debug, salsa::Update)] struct Index<'db> { scope: Scope<'db>, } #[salsa::tracked(cycle_fn=head_recover, cycle_initial=head_initial)] fn cycle_head<'db>(db: &'db dyn salsa::Database, input: Input) -> Option> { let b = cycle_outer(db, input); tracing::info!("query_b = {b:?}"); b.or_else(|| { let index = index(db, input); Some(create_interned(db, index.scope)) }) } fn head_initial(_db: &dyn Database, _input: Input) -> Option> { None } fn head_recover<'db>( _db: &'db dyn Database, _output: &Option>, _count: u32, _input: Input, ) -> CycleRecoveryAction>> { CycleRecoveryAction::Iterate } #[salsa::tracked] fn cycle_outer<'db>(db: &'db dyn salsa::Database, input: Input) -> Option> { cycle_participant(db, input) } #[salsa::tracked] fn cycle_participant<'db>( db: &'db dyn salsa::Database, input: Input, ) -> Option> { let value = cycle_head(db, input); tracing::info!("cycle_head = {value:?}"); if let Some(value) = value { value.context(db); Some(value) } else { None } } #[salsa::tracked(returns(ref))] fn index<'db>(db: &'db dyn salsa::Database, input: Input) -> Index<'db> { Index { scope: Scope::new(db, input.value(db) * 2), } } #[salsa::tracked] fn entry(db: &dyn salsa::Database, input: Input) -> u32 { let _ = input.value(db); let head = cycle_head(db, input); let participant = cycle_participant(db, input); tracing::debug!("head: {head:?}, participant: {participant:?}"); head.or(participant) .map(|class| class.scope(db).field(db)) .unwrap_or(0) } let mut db = EventLoggerDatabase::default(); let input = Input::builder(3, 5) .max_durability(Durability::HIGH) .value_durability(Durability::LOW) .new(&db); { let result = entry(&db, input); assert_eq!(result, 6); } db.synthetic_write(Durability::MEDIUM); { input.set_value(&mut db).to(4); let result = entry(&db, input); assert_eq!(result, 8); } } salsa-0.23.0/tests/cycle_output.rs000064400000000000000000000153121046102023000152420ustar 00000000000000//! Test tracked struct output from a query in a cycle. mod common; use common::{HasLogger, LogDatabase, Logger}; use expect_test::expect; use salsa::{Setter, Storage}; #[salsa::tracked] struct Output<'db> { value: u32, } #[salsa::input] struct InputValue { value: u32, } #[salsa::tracked] fn read_value<'db>(db: &'db dyn Db, output: Output<'db>) -> u32 { output.value(db) } #[salsa::tracked] fn query_a(db: &dyn Db, input: InputValue) -> u32 { let val = query_b(db, input); let output = Output::new(db, val); let read = read_value(db, output); assert_eq!(read, val); query_d(db); if val > 2 { val } else { val + input.value(db) } } #[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=cycle_initial)] fn query_b(db: &dyn Db, input: InputValue) -> u32 { query_a(db, input) } fn cycle_initial(_db: &dyn Db, _input: InputValue) -> u32 { 0 } fn cycle_fn( _db: &dyn Db, _value: &u32, _count: u32, _input: InputValue, ) -> salsa::CycleRecoveryAction { salsa::CycleRecoveryAction::Iterate } #[salsa::tracked] fn query_c(db: &dyn Db, input: InputValue) -> u32 { input.value(db) } #[salsa::tracked] fn query_d(db: &dyn Db) -> u32 { db.get_input().map(|input| input.value(db)).unwrap_or(0) } trait HasOptionInput { fn get_input(&self) -> Option; fn set_input(&mut self, input: InputValue); } #[salsa::db] trait Db: HasOptionInput + salsa::Database {} #[salsa::db] #[derive(Clone)] struct Database { storage: salsa::Storage, logger: Logger, input: Option, } impl HasLogger for Database { fn logger(&self) -> &Logger { &self.logger } } impl Default for Database { fn default() -> Self { let logger = Logger::default(); Self { storage: Storage::new(Some(Box::new({ let logger = logger.clone(); move |event| match event.kind { salsa::EventKind::WillExecute { .. } | salsa::EventKind::DidValidateMemoizedValue { .. } => { logger.push_log(format!("salsa_event({:?})", event.kind)); } salsa::EventKind::WillCheckCancellation => {} _ => { logger.push_log(format!("salsa_event({:?})", event.kind)); } } }))), logger, input: Default::default(), } } } impl HasOptionInput for Database { fn get_input(&self) -> Option { self.input } fn set_input(&mut self, input: InputValue) { self.input.replace(input); } } #[salsa::db] impl salsa::Database for Database {} #[salsa::db] impl Db for Database {} #[test_log::test] fn single_revision() { let db = Database::default(); let input = InputValue::new(&db, 1); assert_eq!(query_b(&db, input), 3); } #[test_log::test] fn revalidate_no_changes() { let mut db = Database::default(); let ab_input = InputValue::new(&db, 1); let c_input = InputValue::new(&db, 10); assert_eq!(query_c(&db, c_input), 10); assert_eq!(query_b(&db, ab_input), 3); db.assert_logs_len(15); // trigger a new revision, but one that doesn't touch the query_a/query_b cycle c_input.set_value(&mut db).to(20); assert_eq!(query_b(&db, ab_input), 3); db.assert_logs(expect![[r#" [ "salsa_event(DidSetCancellationFlag)", "salsa_event(DidValidateMemoizedValue { database_key: read_value(Id(400)) })", "salsa_event(DidValidateInternedValue { key: query_d::interned_arguments(Id(800)), revision: R2 })", "salsa_event(DidValidateMemoizedValue { database_key: query_d(Id(800)) })", "salsa_event(DidValidateMemoizedValue { database_key: read_value(Id(401)) })", "salsa_event(DidValidateMemoizedValue { database_key: read_value(Id(402)) })", "salsa_event(DidValidateMemoizedValue { database_key: read_value(Id(403)) })", "salsa_event(DidValidateMemoizedValue { database_key: query_b(Id(0)) })", ]"#]]); } #[test_log::test] fn revalidate_with_change_after_output_read() { let mut db = Database::default(); let ab_input = InputValue::new(&db, 1); let d_input = InputValue::new(&db, 10); db.set_input(d_input); assert_eq!(query_b(&db, ab_input), 3); db.assert_logs_len(14); // trigger a new revision that changes the output of query_d d_input.set_value(&mut db).to(20); assert_eq!(query_b(&db, ab_input), 3); db.assert_logs(expect![[r#" [ "salsa_event(DidSetCancellationFlag)", "salsa_event(DidValidateMemoizedValue { database_key: read_value(Id(400)) })", "salsa_event(DidValidateInternedValue { key: query_d::interned_arguments(Id(800)), revision: R2 })", "salsa_event(WillExecute { database_key: query_b(Id(0)) })", "salsa_event(DidValidateInternedValue { key: query_d::interned_arguments(Id(800)), revision: R2 })", "salsa_event(WillExecute { database_key: query_a(Id(0)) })", "salsa_event(WillExecute { database_key: query_d(Id(800)) })", "salsa_event(WillDiscardStaleOutput { execute_key: query_a(Id(0)), output_key: Output(Id(403)) })", "salsa_event(DidDiscard { key: Output(Id(403)) })", "salsa_event(DidDiscard { key: read_value(Id(403)) })", "salsa_event(WillDiscardStaleOutput { execute_key: query_a(Id(0)), output_key: Output(Id(401)) })", "salsa_event(DidDiscard { key: Output(Id(401)) })", "salsa_event(DidDiscard { key: read_value(Id(401)) })", "salsa_event(WillDiscardStaleOutput { execute_key: query_a(Id(0)), output_key: Output(Id(402)) })", "salsa_event(DidDiscard { key: Output(Id(402)) })", "salsa_event(DidDiscard { key: read_value(Id(402)) })", "salsa_event(WillIterateCycle { database_key: query_b(Id(0)), iteration_count: IterationCount(1), fell_back: false })", "salsa_event(WillExecute { database_key: query_a(Id(0)) })", "salsa_event(WillExecute { database_key: read_value(Id(403g1)) })", "salsa_event(WillIterateCycle { database_key: query_b(Id(0)), iteration_count: IterationCount(2), fell_back: false })", "salsa_event(WillExecute { database_key: query_a(Id(0)) })", "salsa_event(WillExecute { database_key: read_value(Id(401g1)) })", "salsa_event(WillIterateCycle { database_key: query_b(Id(0)), iteration_count: IterationCount(3), fell_back: false })", "salsa_event(WillExecute { database_key: query_a(Id(0)) })", "salsa_event(WillExecute { database_key: read_value(Id(402g1)) })", ]"#]]); } salsa-0.23.0/tests/cycle_recovery_call_back_into_cycle.rs000064400000000000000000000017071046102023000217260ustar 00000000000000//! Calling back into the same cycle from your cycle recovery function _can_ work out, as long as //! the overall cycle still converges. mod common; use common::{DatabaseWithValue, ValueDatabase}; #[salsa::tracked] fn fallback_value(db: &dyn ValueDatabase) -> u32 { query(db) + db.get_value() } #[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=cycle_initial)] fn query(db: &dyn ValueDatabase) -> u32 { let val = query(db); if val < 5 { val + 1 } else { val } } fn cycle_initial(_db: &dyn ValueDatabase) -> u32 { 0 } fn cycle_fn(db: &dyn ValueDatabase, _value: &u32, _count: u32) -> salsa::CycleRecoveryAction { salsa::CycleRecoveryAction::Fallback(fallback_value(db)) } #[test] fn converges() { let db = DatabaseWithValue::new(10); assert_eq!(query(&db), 10); } #[test] #[should_panic(expected = "fallback did not converge")] fn diverges() { let db = DatabaseWithValue::new(3); query(&db); } salsa-0.23.0/tests/cycle_recovery_call_query.rs000064400000000000000000000013131046102023000177540ustar 00000000000000//! It's possible to call a Salsa query from within a cycle recovery fn. #[salsa::tracked] fn fallback_value(_db: &dyn salsa::Database) -> u32 { 10 } #[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=cycle_initial)] fn query(db: &dyn salsa::Database) -> u32 { let val = query(db); if val < 5 { val + 1 } else { val } } fn cycle_initial(_db: &dyn salsa::Database) -> u32 { 0 } fn cycle_fn( db: &dyn salsa::Database, _value: &u32, _count: u32, ) -> salsa::CycleRecoveryAction { salsa::CycleRecoveryAction::Fallback(fallback_value(db)) } #[test_log::test] fn the_test() { let db = salsa::DatabaseImpl::default(); assert_eq!(query(&db), 10); } salsa-0.23.0/tests/cycle_regression_455.rs000064400000000000000000000022661046102023000164630ustar 00000000000000use salsa::{Database, Setter}; #[salsa::tracked] fn memoized(db: &dyn Database, input: MyInput) -> u32 { memoized_a(db, MyTracked::new(db, input.field(db))) } #[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=cycle_initial)] fn memoized_a<'db>(db: &'db dyn Database, tracked: MyTracked<'db>) -> u32 { MyTracked::new(db, 0); memoized_b(db, tracked) } fn cycle_fn<'db>( _db: &'db dyn Database, _value: &u32, _count: u32, _input: MyTracked<'db>, ) -> salsa::CycleRecoveryAction { salsa::CycleRecoveryAction::Iterate } fn cycle_initial(_db: &dyn Database, _input: MyTracked) -> u32 { 0 } #[salsa::tracked] fn memoized_b<'db>(db: &'db dyn Database, tracked: MyTracked<'db>) -> u32 { let incr = tracked.field(db); let a = memoized_a(db, tracked); if a > 8 { a } else { a + incr } } #[salsa::input] struct MyInput { field: u32, } #[salsa::tracked] struct MyTracked<'db> { field: u32, } #[test] fn cycle_memoized() { let mut db = salsa::DatabaseImpl::new(); let input = MyInput::new(&db, 2); assert_eq!(memoized(&db, input), 10); input.set_field(&mut db).to(3); assert_eq!(memoized(&db, input), 9); } salsa-0.23.0/tests/cycle_result_dependencies.rs000064400000000000000000000010611046102023000177220ustar 00000000000000use salsa::{Database, Setter}; #[salsa::input] struct Input { value: i32, } #[salsa::tracked(cycle_result=cycle_result)] fn has_cycle(db: &dyn Database, input: Input) -> i32 { has_cycle(db, input) } fn cycle_result(db: &dyn Database, input: Input) -> i32 { input.value(db) } #[test] fn cycle_result_dependencies_are_recorded() { let mut db = salsa::DatabaseImpl::default(); let input = Input::new(&db, 123); assert_eq!(has_cycle(&db, input), 123); input.set_value(&mut db).to(456); assert_eq!(has_cycle(&db, input), 456); } salsa-0.23.0/tests/cycle_tracked.rs000064400000000000000000000137731046102023000153300ustar 00000000000000//! Tests for cycles where the cycle head is stored on a tracked struct //! and that tracked struct is freed in a later revision. mod common; use crate::common::{EventLoggerDatabase, LogDatabase}; use expect_test::expect; use salsa::{CycleRecoveryAction, Database, Setter}; #[derive(Clone, Debug, Eq, PartialEq, Hash, salsa::Update)] struct Graph<'db> { nodes: Vec>, } impl<'db> Graph<'db> { fn find_node(&self, db: &dyn salsa::Database, name: &str) -> Option> { self.nodes .iter() .find(|node| node.name(db) == name) .copied() } } #[derive(Clone, Debug, Eq, PartialEq, Hash)] struct Edge { // Index into `graph.nodes` to: usize, cost: usize, } #[salsa::tracked(debug)] struct Node<'db> { #[returns(ref)] name: String, #[returns(deref)] #[tracked] edges: Vec, graph: GraphInput, } #[salsa::input(debug)] struct GraphInput { simple: bool, } #[salsa::tracked(returns(ref))] fn create_graph(db: &dyn salsa::Database, input: GraphInput) -> Graph<'_> { if input.simple(db) { let a = Node::new(db, "a".to_string(), vec![], input); let b = Node::new(db, "b".to_string(), vec![Edge { to: 0, cost: 20 }], input); let c = Node::new(db, "c".to_string(), vec![Edge { to: 1, cost: 2 }], input); Graph { nodes: vec![a, b, c], } } else { // ``` // flowchart TD // // A("a") // B("b") // C("c") // D{"d"} // // B -- 20 --> D // C -- 4 --> D // D -- 4 --> A // D -- 4 --> B // ``` let a = Node::new(db, "a".to_string(), vec![], input); let b = Node::new(db, "b".to_string(), vec![Edge { to: 3, cost: 20 }], input); let c = Node::new(db, "c".to_string(), vec![Edge { to: 3, cost: 4 }], input); let d = Node::new( db, "d".to_string(), vec![Edge { to: 0, cost: 4 }, Edge { to: 1, cost: 4 }], input, ); Graph { nodes: vec![a, b, c, d], } } } /// Computes the minimum cost from the node with offset `0` to the given node. #[salsa::tracked(cycle_fn=cycle_recover, cycle_initial=max_initial)] fn cost_to_start<'db>(db: &'db dyn Database, node: Node<'db>) -> usize { let mut min_cost = usize::MAX; let graph = create_graph(db, node.graph(db)); for edge in node.edges(db) { if edge.to == 0 { min_cost = min_cost.min(edge.cost); } let edge_cost_to_start = cost_to_start(db, graph.nodes[edge.to]); // We hit a cycle, never take this edge because it will always be more expensive than // any other edge if edge_cost_to_start == usize::MAX { continue; } min_cost = min_cost.min(edge.cost + edge_cost_to_start); } min_cost } fn max_initial(_db: &dyn Database, _node: Node) -> usize { usize::MAX } fn cycle_recover( _db: &dyn Database, _value: &usize, _count: u32, _inputs: Node, ) -> CycleRecoveryAction { CycleRecoveryAction::Iterate } #[test] fn main() { let mut db = EventLoggerDatabase::default(); let input = GraphInput::new(&db, false); let graph = create_graph(&db, input); let c = graph.find_node(&db, "c").unwrap(); // Query the cost from `c` to `a`. // There's a cycle between `b` and `d`, where `d` becomes the cycle head and `b` is a provisional, non finalized result. assert_eq!(cost_to_start(&db, c), 8); // Change the graph, this will remove `d`, leaving `b` pointing to a cycle head that's now collected. // Querying the cost from `c` to `a` should try to verify the result of `b` and it is important // that `b` doesn't try to dereference the cycle head (because its memo is now stored on a tracked // struct that has been freed). input.set_simple(&mut db).to(true); let graph = create_graph(&db, input); let c = graph.find_node(&db, "c").unwrap(); assert_eq!(cost_to_start(&db, c), 22); db.assert_logs(expect![[r#" [ "WillCheckCancellation", "WillExecute { database_key: create_graph(Id(0)) }", "WillCheckCancellation", "WillExecute { database_key: cost_to_start(Id(402)) }", "WillCheckCancellation", "WillCheckCancellation", "WillExecute { database_key: cost_to_start(Id(403)) }", "WillCheckCancellation", "WillCheckCancellation", "WillExecute { database_key: cost_to_start(Id(400)) }", "WillCheckCancellation", "WillCheckCancellation", "WillExecute { database_key: cost_to_start(Id(401)) }", "WillCheckCancellation", "WillCheckCancellation", "WillIterateCycle { database_key: cost_to_start(Id(403)), iteration_count: IterationCount(1), fell_back: false }", "WillCheckCancellation", "WillCheckCancellation", "WillCheckCancellation", "WillExecute { database_key: cost_to_start(Id(401)) }", "WillCheckCancellation", "WillCheckCancellation", "DidSetCancellationFlag", "WillCheckCancellation", "WillExecute { database_key: create_graph(Id(0)) }", "WillDiscardStaleOutput { execute_key: create_graph(Id(0)), output_key: Node(Id(403)) }", "DidDiscard { key: Node(Id(403)) }", "DidDiscard { key: cost_to_start(Id(403)) }", "WillCheckCancellation", "WillCheckCancellation", "WillExecute { database_key: cost_to_start(Id(402)) }", "WillCheckCancellation", "WillCheckCancellation", "WillCheckCancellation", "WillExecute { database_key: cost_to_start(Id(401)) }", "WillCheckCancellation", "WillCheckCancellation", "WillCheckCancellation", "WillExecute { database_key: cost_to_start(Id(400)) }", "WillCheckCancellation", ]"#]]); } salsa-0.23.0/tests/cycle_tracked_own_input.rs000064400000000000000000000103221046102023000174150ustar 00000000000000//! Test for cycle handling where a tracked struct created in the first revision //! is stored in the final value of the cycle but isn't recreated in the second //! iteration of the creating query. //! //! It's important that the creating query in the last iteration keeps *owning* the //! tracked struct from the previous iteration, otherwise Salsa will discard it //! and dereferencing the value panics. mod common; use crate::common::{EventLoggerDatabase, LogDatabase}; use expect_test::expect; use salsa::{CycleRecoveryAction, Database, Setter}; #[salsa::input(debug)] struct ClassNode { name: String, type_params: Option, } #[salsa::input(debug)] struct TypeParamNode { name: String, constraint: Option, } #[salsa::interned(debug)] struct Class<'db> { name: String, type_params: Option>, } #[salsa::tracked(debug)] struct TypeParam<'db> { name: String, constraint: Option>, } #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, salsa::Update)] enum Type<'db> { Class(Class<'db>), Unknown, } impl Type<'_> { fn class(&self) -> Option> { match self { Type::Class(class) => Some(*class), Type::Unknown => None, } } } #[salsa::tracked(cycle_fn=infer_class_recover, cycle_initial=infer_class_initial)] fn infer_class<'db>(db: &'db dyn salsa::Database, node: ClassNode) -> Type<'db> { Type::Class(Class::new( db, node.name(db), node.type_params(db).map(|tp| infer_type_param(db, tp)), )) } #[salsa::tracked] fn infer_type_param<'db>(db: &'db dyn salsa::Database, node: TypeParamNode) -> TypeParam<'db> { if let Some(constraint) = node.constraint(db) { // Reuse the type param from the class if any. // The example is a bit silly, because it's a reduction of what we have in Astral's type checker // but including all the details doesn't make sense. What's important for the test is // that this query doesn't re-create the `TypeParam` tracked struct in the second iteration // and instead returns the one from the first iteration which // then is returned in the overall result (Class). match infer_class(db, constraint) { Type::Class(class) => class .type_params(db) .unwrap_or_else(|| TypeParam::new(db, node.name(db), Some(Type::Unknown))), Type::Unknown => TypeParam::new(db, node.name(db), Some(Type::Unknown)), } } else { TypeParam::new(db, node.name(db), None) } } fn infer_class_initial(_db: &'_ dyn Database, _node: ClassNode) -> Type<'_> { Type::Unknown } fn infer_class_recover<'db>( _db: &'db dyn Database, _type: &Type<'db>, _count: u32, _inputs: ClassNode, ) -> CycleRecoveryAction> { CycleRecoveryAction::Iterate } #[test] fn main() { let mut db = EventLoggerDatabase::default(); // Class with a type parameter that's constrained to itself. // class Test[T: Test]: ... let class_node = ClassNode::new(&db, "Test".to_string(), None); let type_param_node = TypeParamNode::new(&db, "T".to_string(), Some(class_node)); class_node .set_type_params(&mut db) .to(Some(type_param_node)); let ty = infer_class(&db, class_node); db.assert_logs(expect![[r#" [ "DidSetCancellationFlag", "WillCheckCancellation", "WillExecute { database_key: infer_class(Id(0)) }", "WillCheckCancellation", "WillExecute { database_key: infer_type_param(Id(400)) }", "WillCheckCancellation", "DidInternValue { key: Class(Id(c00)), revision: R2 }", "WillIterateCycle { database_key: infer_class(Id(0)), iteration_count: IterationCount(1), fell_back: false }", "WillCheckCancellation", "WillExecute { database_key: infer_type_param(Id(400)) }", "WillCheckCancellation", ]"#]]); let class = ty.class().unwrap(); let type_param = class.type_params(&db).unwrap(); // Now read the name from the type param struct that was created in the first iteration of // `infer_type_param`. This should not panic! assert_eq!(type_param.name(&db), "T"); } salsa-0.23.0/tests/dataflow.rs000064400000000000000000000154621046102023000143320ustar 00000000000000//! Test case for fixpoint iteration cycle resolution. //! //! This test case is intended to simulate a (very simplified) version of a real dataflow analysis //! using fixpoint iteration. use std::collections::BTreeSet; use std::iter::IntoIterator; use salsa::{CycleRecoveryAction, Database as Db, Setter}; /// A Use of a symbol. #[salsa::input] struct Use { reaching_definitions: Vec, } /// A Definition of a symbol, either of the form `base + increment` or `0 + increment`. #[salsa::input] struct Definition { base: Option, increment: usize, } #[derive(Eq, PartialEq, Clone, Debug, salsa::Update)] enum Type { Bottom, Values(Box<[usize]>), Top, } impl Type { fn join(tys: impl IntoIterator) -> Type { let mut result = Type::Bottom; for ty in tys.into_iter() { result = match (result, ty) { (result, Type::Bottom) => result, (_, Type::Top) => Type::Top, (Type::Top, _) => Type::Top, (Type::Bottom, ty) => ty, (Type::Values(a_ints), Type::Values(b_ints)) => { let mut set = BTreeSet::new(); set.extend(a_ints); set.extend(b_ints); Type::Values(set.into_iter().collect()) } } } result } } #[salsa::tracked(cycle_fn=use_cycle_recover, cycle_initial=use_cycle_initial)] fn infer_use<'db>(db: &'db dyn Db, u: Use) -> Type { let defs = u.reaching_definitions(db); match defs[..] { [] => Type::Bottom, [def] => infer_definition(db, def), _ => Type::join(defs.iter().map(|&def| infer_definition(db, def))), } } #[salsa::tracked(cycle_fn=def_cycle_recover, cycle_initial=def_cycle_initial)] fn infer_definition<'db>(db: &'db dyn Db, def: Definition) -> Type { let increment_ty = Type::Values(Box::from([def.increment(db)])); if let Some(base) = def.base(db) { let base_ty = infer_use(db, base); add(&base_ty, &increment_ty) } else { increment_ty } } fn def_cycle_initial(_db: &dyn Db, _def: Definition) -> Type { Type::Bottom } fn def_cycle_recover( _db: &dyn Db, value: &Type, count: u32, _def: Definition, ) -> CycleRecoveryAction { cycle_recover(value, count) } fn use_cycle_initial(_db: &dyn Db, _use: Use) -> Type { Type::Bottom } fn use_cycle_recover( _db: &dyn Db, value: &Type, count: u32, _use: Use, ) -> CycleRecoveryAction { cycle_recover(value, count) } fn cycle_recover(value: &Type, count: u32) -> CycleRecoveryAction { match value { Type::Bottom => CycleRecoveryAction::Iterate, Type::Values(_) => { if count > 4 { CycleRecoveryAction::Fallback(Type::Top) } else { CycleRecoveryAction::Iterate } } Type::Top => CycleRecoveryAction::Iterate, } } fn add(a: &Type, b: &Type) -> Type { match (a, b) { (Type::Bottom, _) | (_, Type::Bottom) => Type::Bottom, (Type::Top, _) | (_, Type::Top) => Type::Top, (Type::Values(a_ints), Type::Values(b_ints)) => { let mut set = BTreeSet::new(); set.extend( a_ints .into_iter() .flat_map(|a| b_ints.into_iter().map(move |b| a + b)), ); Type::Values(set.into_iter().collect()) } } } /// x = 1 #[test] fn simple() { let db = salsa::DatabaseImpl::new(); let def = Definition::new(&db, None, 1); let u = Use::new(&db, vec![def]); let ty = infer_use(&db, u); assert_eq!(ty, Type::Values(Box::from([1]))); } /// x = 1 if flag else 2 #[test] fn union() { let db = salsa::DatabaseImpl::new(); let def1 = Definition::new(&db, None, 1); let def2 = Definition::new(&db, None, 2); let u = Use::new(&db, vec![def1, def2]); let ty = infer_use(&db, u); assert_eq!(ty, Type::Values(Box::from([1, 2]))); } /// x = 1 if flag else 2; y = x + 1 #[test] fn union_add() { let db = salsa::DatabaseImpl::new(); let x1 = Definition::new(&db, None, 1); let x2 = Definition::new(&db, None, 2); let x_use = Use::new(&db, vec![x1, x2]); let y_def = Definition::new(&db, Some(x_use), 1); let y_use = Use::new(&db, vec![y_def]); let ty = infer_use(&db, y_use); assert_eq!(ty, Type::Values(Box::from([2, 3]))); } /// x = 1; loop { x = x + 0 } #[test] fn cycle_converges_then_diverges() { let mut db = salsa::DatabaseImpl::new(); let def1 = Definition::new(&db, None, 1); let def2 = Definition::new(&db, None, 0); let u = Use::new(&db, vec![def1, def2]); def2.set_base(&mut db).to(Some(u)); let ty = infer_use(&db, u); // Loop converges on 1 assert_eq!(ty, Type::Values(Box::from([1]))); // Set the increment on x from 0 to 1 let new_increment = 1; def2.set_increment(&mut db).to(new_increment); // Now the loop diverges and we fall back to Top assert_eq!(infer_use(&db, u), Type::Top); } /// x = 1; loop { x = x + 1 } #[test] fn cycle_diverges_then_converges() { let mut db = salsa::DatabaseImpl::new(); let def1 = Definition::new(&db, None, 1); let def2 = Definition::new(&db, None, 1); let u = Use::new(&db, vec![def1, def2]); def2.set_base(&mut db).to(Some(u)); let ty = infer_use(&db, u); // Loop diverges. Cut it off and fallback to Type::Top assert_eq!(ty, Type::Top); // Set the increment from 1 to 0. def2.set_increment(&mut db).to(0); // Now the loop converges on 1. assert_eq!(infer_use(&db, u), Type::Values(Box::from([1]))); } /// x = 0; y = 0; loop { x = y + 0; y = x + 0 } #[test_log::test] fn multi_symbol_cycle_converges_then_diverges() { let mut db = salsa::DatabaseImpl::new(); let defx0 = Definition::new(&db, None, 0); let defy0 = Definition::new(&db, None, 0); let defx1 = Definition::new(&db, None, 0); let defy1 = Definition::new(&db, None, 0); let use_x = Use::new(&db, vec![defx0, defx1]); let use_y = Use::new(&db, vec![defy0, defy1]); defx1.set_base(&mut db).to(Some(use_y)); defy1.set_base(&mut db).to(Some(use_x)); // Both symbols converge on 0 assert_eq!(infer_use(&db, use_x), Type::Values(Box::from([0]))); assert_eq!(infer_use(&db, use_y), Type::Values(Box::from([0]))); // Set the increment on x to 0. defx1.set_increment(&mut db).to(0); // Both symbols still converge on 0. assert_eq!(infer_use(&db, use_x), Type::Values(Box::from([0]))); assert_eq!(infer_use(&db, use_y), Type::Values(Box::from([0]))); // Set the increment on x from 0 to 1. defx1.set_increment(&mut db).to(1); // Now the loop diverges and we fall back to Top. assert_eq!(infer_use(&db, use_x), Type::Top); assert_eq!(infer_use(&db, use_y), Type::Top); } salsa-0.23.0/tests/debug.rs000064400000000000000000000050171046102023000136120ustar 00000000000000//! Test that `DeriveWithDb` is correctly derived. use expect_test::expect; use salsa::{Database, Setter}; #[salsa::input(debug)] struct MyInput { field: u32, } #[derive(Debug, Eq, PartialEq, Clone)] struct NotSalsa { field: String, } #[salsa::input(debug)] struct ComplexStruct { my_input: MyInput, not_salsa: NotSalsa, } #[test] fn input() { salsa::DatabaseImpl::new().attach(|db| { let input = MyInput::new(db, 22); let not_salsa = NotSalsa { field: "it's salsa time".to_string(), }; let complex_struct = ComplexStruct::new(db, input, not_salsa); // debug includes all fields let actual = format!("{complex_struct:?}"); let expected = expect![[r#"ComplexStruct { [salsa id]: Id(400), my_input: MyInput { [salsa id]: Id(0), field: 22 }, not_salsa: NotSalsa { field: "it's salsa time" } }"#]]; expected.assert_eq(&actual); }) } #[salsa::tracked] fn leak_debug_string(_db: &dyn salsa::Database, input: MyInput) -> String { format!("{input:?}") } /// Test that field reads that occur as part of `Debug` are not tracked. /// Intentionally leaks the debug string. /// Don't try this at home, kids. #[test] fn untracked_dependencies() { let mut db = salsa::DatabaseImpl::new(); let input = MyInput::new(&db, 22); let s = leak_debug_string(&db, input); expect![[r#" "MyInput { [salsa id]: Id(0), field: 22 }" "#]] .assert_debug_eq(&s); input.set_field(&mut db).to(23); // check that we reuse the cached result for debug string // even though the dependency changed. let s = leak_debug_string(&db, input); assert!(s.contains(", field: 22 }")); } #[salsa::tracked] struct DerivedCustom<'db> { my_input: MyInput, value: u32, } impl std::fmt::Debug for DerivedCustom<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { salsa::with_attached_database(|db| { write!(f, "{:?} / {:?}", self.my_input(db), self.value(db)) }) .unwrap_or_else(|| f.debug_tuple("DerivedCustom").finish()) } } #[salsa::tracked] fn leak_derived_custom(db: &dyn salsa::Database, input: MyInput, value: u32) -> String { let c = DerivedCustom::new(db, input, value); format!("{c:?}") } #[test] fn custom_debug_impl() { let db = salsa::DatabaseImpl::new(); let input = MyInput::new(&db, 22); let s = leak_derived_custom(&db, input, 23); expect![[r#" "MyInput { [salsa id]: Id(0), field: 22 } / 23" "#]] .assert_debug_eq(&s); } salsa-0.23.0/tests/debug_db_contents.rs000064400000000000000000000025201046102023000161700ustar 00000000000000#[salsa::interned(debug)] struct InternedStruct<'db> { name: String, } #[salsa::input(debug)] struct InputStruct { field: u32, } #[salsa::tracked(debug)] struct TrackedStruct<'db> { field: u32, } #[salsa::tracked] fn tracked_fn(db: &dyn salsa::Database, input: InputStruct) -> TrackedStruct<'_> { TrackedStruct::new(db, input.field(db) * 2) } #[test] fn execute() { let db = salsa::DatabaseImpl::new(); let _ = InternedStruct::new(&db, "Salsa".to_string()); let _ = InternedStruct::new(&db, "Salsa2".to_string()); // test interned structs let interned = InternedStruct::ingredient(&db) .entries(&db) .collect::>(); assert_eq!(interned.len(), 2); assert_eq!(interned[0].fields().0, "Salsa"); assert_eq!(interned[1].fields().0, "Salsa2"); // test input structs let input = InputStruct::new(&db, 22); let inputs = InputStruct::ingredient(&db) .entries(&db) .collect::>(); assert_eq!(inputs.len(), 1); assert_eq!(inputs[0].fields().0, 22); // test tracked structs let computed = tracked_fn(&db, input).field(&db); assert_eq!(computed, 44); let tracked = TrackedStruct::ingredient(&db) .entries(&db) .collect::>(); assert_eq!(tracked.len(), 1); assert_eq!(tracked[0].fields().0, computed); } salsa-0.23.0/tests/deletion-cascade.rs000064400000000000000000000052651046102023000157150ustar 00000000000000//! Delete cascade: //! //! * when we delete memoized data, also delete outputs from that data mod common; use common::LogDatabase; use expect_test::expect; use salsa::Setter; use test_log::test; #[salsa::input(singleton, debug)] struct MyInput { field: u32, } #[salsa::tracked] fn final_result(db: &dyn LogDatabase, input: MyInput) -> u32 { db.push_log(format!("final_result({input:?})")); let mut sum = 0; for tracked_struct in create_tracked_structs(db, input) { sum += contribution_from_struct(db, tracked_struct); } sum } #[salsa::tracked] struct MyTracked<'db> { field: u32, } #[salsa::tracked] fn create_tracked_structs(db: &dyn LogDatabase, input: MyInput) -> Vec> { db.push_log(format!("intermediate_result({input:?})")); (0..input.field(db)) .map(|i| MyTracked::new(db, i)) .collect() } #[salsa::tracked] fn contribution_from_struct<'db>(db: &'db dyn LogDatabase, tracked: MyTracked<'db>) -> u32 { let m = MyTracked::new(db, tracked.field(db)); copy_field(db, m) * 2 } #[salsa::tracked] fn copy_field<'db>(db: &'db dyn LogDatabase, tracked: MyTracked<'db>) -> u32 { tracked.field(db) } #[test] fn basic() { let mut db = common::DiscardLoggerDatabase::default(); // Creates 3 tracked structs let input = MyInput::new(&db, 3); assert_eq!(final_result(&db, input), 2 * 2 + 2); db.assert_logs(expect![[r#" [ "final_result(MyInput { [salsa id]: Id(0), field: 3 })", "intermediate_result(MyInput { [salsa id]: Id(0), field: 3 })", ]"#]]); // Creates only 2 tracked structs in this revision, should delete 1 // // Expect to see 6 DidDiscard events. Three from the primary struct: // // * the struct itself // * the struct's field // * the `contribution_from_struct` result // // and then 3 more from the struct created by `contribution_from_struct`: // // * the struct itself // * the struct's field // * the `copy_field` result input.set_field(&mut db).to(2); assert_eq!(final_result(&db, input), 2); db.assert_logs(expect![[r#" [ "intermediate_result(MyInput { [salsa id]: Id(0), field: 2 })", "salsa_event(WillDiscardStaleOutput { execute_key: create_tracked_structs(Id(0)), output_key: MyTracked(Id(402)) })", "salsa_event(DidDiscard { key: MyTracked(Id(402)) })", "salsa_event(DidDiscard { key: contribution_from_struct(Id(402)) })", "salsa_event(DidDiscard { key: MyTracked(Id(405)) })", "salsa_event(DidDiscard { key: copy_field(Id(405)) })", "final_result(MyInput { [salsa id]: Id(0), field: 2 })", ]"#]]); } salsa-0.23.0/tests/deletion-drops.rs000064400000000000000000000041431046102023000154530ustar 00000000000000//! Basic deletion test: //! //! * entities not created in a revision are deleted, as is any memoized data keyed on them. mod common; use salsa::{Database, Setter}; use test_log::test; #[salsa::input] struct MyInput { identity: u32, } #[salsa::tracked] struct MyTracked<'db> { identifier: u32, #[tracked] #[returns(ref)] field: Bomb, } thread_local! { static DROPPED: std::cell::RefCell> = const { std::cell::RefCell::new(vec![]) }; } fn dropped() -> Vec { DROPPED.with(|d| d.borrow().clone()) } #[derive(Clone, Debug, PartialEq, Eq)] struct Bomb { identity: u32, } impl Drop for Bomb { fn drop(&mut self) { DROPPED.with(|d| d.borrow_mut().push(self.identity)); } } #[salsa::tracked] impl MyInput { #[salsa::tracked] fn create_tracked_struct(self, db: &dyn Database) -> MyTracked<'_> { MyTracked::new( db, self.identity(db), Bomb { identity: self.identity(db), }, ) } } #[test] fn deletion_drops() { let mut db = salsa::DatabaseImpl::new(); let input = MyInput::new(&db, 22); expect_test::expect![[r#" [] "#]] .assert_debug_eq(&dropped()); let tracked_struct = input.create_tracked_struct(&db); assert_eq!(tracked_struct.field(&db).identity, 22); expect_test::expect![[r#" [] "#]] .assert_debug_eq(&dropped()); input.set_identity(&mut db).to(44); expect_test::expect![[r#" [] "#]] .assert_debug_eq(&dropped()); // Now that we execute with rev = 44, the old id is put on the free list let tracked_struct = input.create_tracked_struct(&db); assert_eq!(tracked_struct.field(&db).identity, 44); expect_test::expect![[r#" [] "#]] .assert_debug_eq(&dropped()); // When we execute again with `input1`, that id is re-used, so the old value is deleted let input1 = MyInput::new(&db, 66); let _tracked_struct1 = input1.create_tracked_struct(&db); expect_test::expect![[r#" [ 22, ] "#]] .assert_debug_eq(&dropped()); } salsa-0.23.0/tests/deletion.rs000064400000000000000000000042761046102023000143350ustar 00000000000000//! Basic deletion test: //! //! * entities not created in a revision are deleted, as is any memoized data keyed on them. mod common; use common::LogDatabase; use expect_test::expect; use salsa::Setter; use test_log::test; #[salsa::input(debug)] struct MyInput { field: u32, } #[salsa::tracked] fn final_result(db: &dyn LogDatabase, input: MyInput) -> u32 { db.push_log(format!("final_result({input:?})")); let mut sum = 0; for tracked_struct in create_tracked_structs(db, input) { sum += contribution_from_struct(db, tracked_struct); } sum } #[salsa::tracked] struct MyTracked<'db> { field: u32, } #[salsa::tracked] fn create_tracked_structs(db: &dyn LogDatabase, input: MyInput) -> Vec> { db.push_log(format!("intermediate_result({input:?})")); (0..input.field(db)) .map(|i| MyTracked::new(db, i)) .collect() } #[salsa::tracked] fn contribution_from_struct<'db>(db: &'db dyn LogDatabase, tracked: MyTracked<'db>) -> u32 { tracked.field(db) * 2 } #[test] fn basic() { let mut db = common::DiscardLoggerDatabase::default(); // Creates 3 tracked structs let input = MyInput::new(&db, 3); assert_eq!(final_result(&db, input), 2 * 2 + 2); db.assert_logs(expect![[r#" [ "final_result(MyInput { [salsa id]: Id(0), field: 3 })", "intermediate_result(MyInput { [salsa id]: Id(0), field: 3 })", ]"#]]); // Creates only 2 tracked structs in this revision, should delete 1 // // Expect to see 3 DidDiscard events-- // // * the struct itself // * the struct's field // * the `contribution_from_struct` result input.set_field(&mut db).to(2); assert_eq!(final_result(&db, input), 2); db.assert_logs(expect![[r#" [ "intermediate_result(MyInput { [salsa id]: Id(0), field: 2 })", "salsa_event(WillDiscardStaleOutput { execute_key: create_tracked_structs(Id(0)), output_key: MyTracked(Id(402)) })", "salsa_event(DidDiscard { key: MyTracked(Id(402)) })", "salsa_event(DidDiscard { key: contribution_from_struct(Id(402)) })", "final_result(MyInput { [salsa id]: Id(0), field: 2 })", ]"#]]); } salsa-0.23.0/tests/derive_update.rs000064400000000000000000000030771046102023000153500ustar 00000000000000//! Test that the `Update` derive works as expected #[derive(salsa::Update)] struct MyInput { field: &'static str, } #[derive(salsa::Update)] struct MyInput2 { #[update(unsafe(with(custom_update)))] field: &'static str, #[update(unsafe(with(|dest, data| { *dest = data; true })))] field2: &'static str, } unsafe fn custom_update(dest: *mut &'static str, _data: &'static str) -> bool { unsafe { *dest = "ill-behaved for testing purposes" }; true } #[test] fn derived() { let mut m = MyInput { field: "foo" }; assert_eq!(m.field, "foo"); assert!(unsafe { salsa::Update::maybe_update(&mut m, MyInput { field: "bar" }) }); assert_eq!(m.field, "bar"); assert!(!unsafe { salsa::Update::maybe_update(&mut m, MyInput { field: "bar" }) }); assert_eq!(m.field, "bar"); } #[test] fn derived_with() { let mut m = MyInput2 { field: "foo", field2: "foo", }; assert_eq!(m.field, "foo"); assert_eq!(m.field2, "foo"); assert!(unsafe { salsa::Update::maybe_update( &mut m, MyInput2 { field: "bar", field2: "bar", }, ) }); assert_eq!(m.field, "ill-behaved for testing purposes"); assert_eq!(m.field2, "bar"); assert!(unsafe { salsa::Update::maybe_update( &mut m, MyInput2 { field: "ill-behaved for testing purposes", field2: "foo", }, ) }); assert_eq!(m.field, "ill-behaved for testing purposes"); assert_eq!(m.field2, "foo"); } salsa-0.23.0/tests/durability.rs000064400000000000000000000024351046102023000146750ustar 00000000000000//! Tests that code using the builder's durability methods compiles. use salsa::{Database, Durability, Setter}; use test_log::test; #[salsa::input] struct N { value: u32, } #[salsa::tracked] fn add3(db: &dyn Database, a: N, b: N, c: N) -> u32 { add(db, a, b) + c.value(db) } #[salsa::tracked] fn add(db: &dyn Database, a: N, b: N) -> u32 { a.value(db) + b.value(db) } #[test] fn durable_to_less_durable() { let mut db = salsa::DatabaseImpl::new(); let a = N::builder(11).value_durability(Durability::HIGH).new(&db); let b = N::builder(22).value_durability(Durability::HIGH).new(&db); let c = N::builder(33).value_durability(Durability::HIGH).new(&db); // Here, `add3` invokes `add(a, b)`, which yields 33. assert_eq!(add3(&db, a, b, c), 66); a.set_value(&mut db).with_durability(Durability::LOW).to(11); // Here, `add3` invokes `add`, which *still* yields 33, but which // is no longer of high durability. Since value didn't change, we might // preserve `add3` unchanged, not noticing that it is no longer // of high durability. assert_eq!(add3(&db, a, b, c), 66); // In that case, we would not get the correct result here, when // 'a' changes *again*. a.set_value(&mut db).to(22); assert_eq!(add3(&db, a, b, c), 77); } salsa-0.23.0/tests/elided-lifetime-in-tracked-fn.rs000064400000000000000000000032121046102023000201610ustar 00000000000000//! Test that a `tracked` fn on a `salsa::input` //! compiles and executes successfully. mod common; use common::LogDatabase; use expect_test::expect; use salsa::Setter; use test_log::test; #[salsa::input(debug)] struct MyInput { field: u32, } #[salsa::tracked] fn final_result(db: &dyn LogDatabase, input: MyInput) -> u32 { db.push_log(format!("final_result({input:?})")); intermediate_result(db, input).field(db) * 2 } #[salsa::tracked] struct MyTracked<'db> { field: u32, } #[salsa::tracked] fn intermediate_result(db: &dyn LogDatabase, input: MyInput) -> MyTracked<'_> { db.push_log(format!("intermediate_result({input:?})")); MyTracked::new(db, input.field(db) / 2) } #[test] fn execute() { let mut db = common::LoggerDatabase::default(); let input = MyInput::new(&db, 22); assert_eq!(final_result(&db, input), 22); db.assert_logs(expect![[r#" [ "final_result(MyInput { [salsa id]: Id(0), field: 22 })", "intermediate_result(MyInput { [salsa id]: Id(0), field: 22 })", ]"#]]); // Intermediate result is the same, so final result does // not need to be recomputed: input.set_field(&mut db).to(23); assert_eq!(final_result(&db, input), 22); db.assert_logs(expect![[r#" [ "intermediate_result(MyInput { [salsa id]: Id(0), field: 23 })", ]"#]]); input.set_field(&mut db).to(24); assert_eq!(final_result(&db, input), 24); db.assert_logs(expect![[r#" [ "intermediate_result(MyInput { [salsa id]: Id(0), field: 24 })", "final_result(MyInput { [salsa id]: Id(0), field: 24 })", ]"#]]); } salsa-0.23.0/tests/expect_reuse_field_x_of_a_tracked_struct_changes_but_fn_depends_on_field_y.rs000064400000000000000000000045721046102023000317210ustar 00000000000000//! Test that if field X of a tracked struct changes but not field Y, //! functions that depend on X re-execute, but those depending only on Y do not //! compiles and executes successfully. #![allow(dead_code)] mod common; use common::LogDatabase; use expect_test::expect; use salsa::Setter; #[salsa::input(debug)] struct MyInput { field: u32, } #[salsa::tracked] fn final_result_depends_on_x(db: &dyn LogDatabase, input: MyInput) -> u32 { db.push_log(format!("final_result_depends_on_x({input:?})")); intermediate_result(db, input).x(db) * 2 } #[salsa::tracked] fn final_result_depends_on_y(db: &dyn LogDatabase, input: MyInput) -> u32 { db.push_log(format!("final_result_depends_on_y({input:?})")); intermediate_result(db, input).y(db) * 2 } #[salsa::tracked] struct MyTracked<'db> { #[tracked] x: u32, #[tracked] y: u32, } #[salsa::tracked] fn intermediate_result(db: &dyn LogDatabase, input: MyInput) -> MyTracked<'_> { MyTracked::new(db, input.field(db).div_ceil(2), input.field(db) / 2) } #[test] fn execute() { // x = (input.field + 1) / 2 // y = input.field / 2 // final_result_depends_on_x = x * 2 = (input.field + 1) / 2 * 2 // final_result_depends_on_y = y * 2 = input.field / 2 * 2 let mut db = common::LoggerDatabase::default(); // intermediate results: // x = (22 + 1) / 2 = 11 // y = 22 / 2 = 11 let input = MyInput::new(&db, 22); assert_eq!(final_result_depends_on_x(&db, input), 22); db.assert_logs(expect![[r#" [ "final_result_depends_on_x(MyInput { [salsa id]: Id(0), field: 22 })", ]"#]]); assert_eq!(final_result_depends_on_y(&db, input), 22); db.assert_logs(expect![[r#" [ "final_result_depends_on_y(MyInput { [salsa id]: Id(0), field: 22 })", ]"#]]); input.set_field(&mut db).to(23); // x = (23 + 1) / 2 = 12 // Intermediate result x changes, so final result depends on x // needs to be recomputed; assert_eq!(final_result_depends_on_x(&db, input), 24); db.assert_logs(expect![[r#" [ "final_result_depends_on_x(MyInput { [salsa id]: Id(0), field: 23 })", ]"#]]); // y = 23 / 2 = 11 // Intermediate result y is the same, so final result depends on y // does not need to be recomputed; assert_eq!(final_result_depends_on_y(&db, input), 22); db.assert_logs(expect!["[]"]); } salsa-0.23.0/tests/expect_reuse_field_x_of_an_input_changes_but_fn_depends_on_field_y.rs000064400000000000000000000033201046102023000302030ustar 00000000000000//! Test that if field X of an input changes but not field Y, //! functions that depend on X re-execute, but those depending only on Y do not //! compiles and executes successfully. #![allow(dead_code)] mod common; use common::LogDatabase; use expect_test::expect; use salsa::Setter; #[salsa::input(debug)] struct MyInput { x: u32, y: u32, } #[salsa::tracked] fn result_depends_on_x(db: &dyn LogDatabase, input: MyInput) -> u32 { db.push_log(format!("result_depends_on_x({input:?})")); input.x(db) + 1 } #[salsa::tracked] fn result_depends_on_y(db: &dyn LogDatabase, input: MyInput) -> u32 { db.push_log(format!("result_depends_on_y({input:?})")); input.y(db) - 1 } #[test] fn execute() { // result_depends_on_x = x + 1 // result_depends_on_y = y - 1 let mut db = common::LoggerDatabase::default(); let input = MyInput::new(&db, 22, 33); assert_eq!(result_depends_on_x(&db, input), 23); db.assert_logs(expect![[r#" [ "result_depends_on_x(MyInput { [salsa id]: Id(0), x: 22, y: 33 })", ]"#]]); assert_eq!(result_depends_on_y(&db, input), 32); db.assert_logs(expect![[r#" [ "result_depends_on_y(MyInput { [salsa id]: Id(0), x: 22, y: 33 })", ]"#]]); input.set_x(&mut db).to(23); // input x changes, so result depends on x needs to be recomputed; assert_eq!(result_depends_on_x(&db, input), 24); db.assert_logs(expect![[r#" [ "result_depends_on_x(MyInput { [salsa id]: Id(0), x: 23, y: 33 })", ]"#]]); // input y is the same, so result depends on y // does not need to be recomputed; assert_eq!(result_depends_on_y(&db, input), 32); db.assert_logs(expect!["[]"]); } salsa-0.23.0/tests/hash_collision.rs000064400000000000000000000011331046102023000155150ustar 00000000000000use std::hash::Hash; #[test] fn hello() { use salsa::{Database, DatabaseImpl, Setter}; #[salsa::input] struct Bool { value: bool, } #[salsa::tracked] struct True<'db> {} #[salsa::tracked] struct False<'db> {} #[salsa::tracked] fn hello(db: &dyn Database, bool: Bool) { if bool.value(db) { True::new(db); } else { False::new(db); } } let mut db = DatabaseImpl::new(); let input = Bool::new(&db, false); hello(&db, input); input.set_value(&mut db).to(true); hello(&db, input); } salsa-0.23.0/tests/hello_world.rs000064400000000000000000000047261046102023000150440ustar 00000000000000//! Test that a `tracked` fn on a `salsa::input` //! compiles and executes successfully. mod common; use common::LogDatabase; use expect_test::expect; use salsa::Setter; use test_log::test; #[salsa::input(debug)] struct MyInput { field: u32, } #[salsa::tracked] fn final_result(db: &dyn LogDatabase, input: MyInput) -> u32 { db.push_log(format!("final_result({input:?})")); intermediate_result(db, input).field(db) * 2 } #[salsa::tracked] struct MyTracked<'db> { field: u32, } #[salsa::tracked] fn intermediate_result(db: &dyn LogDatabase, input: MyInput) -> MyTracked<'_> { db.push_log(format!("intermediate_result({input:?})")); MyTracked::new(db, input.field(db) / 2) } #[test] fn execute() { let mut db = common::LoggerDatabase::default(); let input = MyInput::new(&db, 22); assert_eq!(final_result(&db, input), 22); db.assert_logs(expect![[r#" [ "final_result(MyInput { [salsa id]: Id(0), field: 22 })", "intermediate_result(MyInput { [salsa id]: Id(0), field: 22 })", ]"#]]); // Intermediate result is the same, so final result does // not need to be recomputed: input.set_field(&mut db).to(23); assert_eq!(final_result(&db, input), 22); db.assert_logs(expect![[r#" [ "intermediate_result(MyInput { [salsa id]: Id(0), field: 23 })", ]"#]]); input.set_field(&mut db).to(24); assert_eq!(final_result(&db, input), 24); db.assert_logs(expect![[r#" [ "intermediate_result(MyInput { [salsa id]: Id(0), field: 24 })", "final_result(MyInput { [salsa id]: Id(0), field: 24 })", ]"#]]); } /// Create and mutate a distinct input. No re-execution required. #[test] fn red_herring() { let mut db = common::LoggerDatabase::default(); let input = MyInput::new(&db, 22); assert_eq!(final_result(&db, input), 22); db.assert_logs(expect![[r#" [ "final_result(MyInput { [salsa id]: Id(0), field: 22 })", "intermediate_result(MyInput { [salsa id]: Id(0), field: 22 })", ]"#]]); // Create a distinct input and mutate it. // This will trigger a new revision in the database // but shouldn't actually invalidate our existing ones. let input2 = MyInput::new(&db, 44); input2.set_field(&mut db).to(66); // Re-run the query on the original input. Nothing re-executes! assert_eq!(final_result(&db, input), 22); db.assert_logs(expect![[r#" []"#]]); } salsa-0.23.0/tests/input_default.rs000064400000000000000000000016241046102023000153670ustar 00000000000000//! Tests that fields attributed with `#[default]` are initialized with `Default::default()`. use salsa::Durability; use test_log::test; #[salsa::input] struct MyInput { required: bool, #[default] optional: usize, } #[test] fn new_constructor() { let db = salsa::DatabaseImpl::new(); let input = MyInput::new(&db, true); assert!(input.required(&db)); assert_eq!(input.optional(&db), 0); } #[test] fn builder_specify_optional() { let db = salsa::DatabaseImpl::new(); let input = MyInput::builder(true).optional(20).new(&db); assert!(input.required(&db)); assert_eq!(input.optional(&db), 20); } #[test] fn builder_default_optional_value() { let db = salsa::DatabaseImpl::new(); let input = MyInput::builder(true) .required_durability(Durability::HIGH) .new(&db); assert!(input.required(&db)); assert_eq!(input.optional(&db), 0); } salsa-0.23.0/tests/input_field_durability.rs000064400000000000000000000014571046102023000172620ustar 00000000000000//! Tests that code using the builder's durability methods compiles. use salsa::Durability; use test_log::test; #[salsa::input] struct MyInput { required_field: bool, #[default] optional_field: usize, } #[test] fn required_field_durability() { let db = salsa::DatabaseImpl::new(); let input = MyInput::builder(true) .required_field_durability(Durability::HIGH) .new(&db); assert!(input.required_field(&db)); assert_eq!(input.optional_field(&db), 0); } #[test] fn optional_field_durability() { let db = salsa::DatabaseImpl::new(); let input = MyInput::builder(true) .optional_field(20) .optional_field_durability(Durability::HIGH) .new(&db); assert!(input.required_field(&db)); assert_eq!(input.optional_field(&db), 20); } salsa-0.23.0/tests/input_setter_preserves_durability.rs000064400000000000000000000014651046102023000216020ustar 00000000000000use salsa::plumbing::ZalsaDatabase; use salsa::{Durability, Setter}; use test_log::test; #[salsa::input] struct MyInput { required_field: bool, #[default] optional_field: usize, } #[test] fn execute() { let mut db = salsa::DatabaseImpl::new(); let input = MyInput::builder(true) .required_field_durability(Durability::HIGH) .new(&db); // Change the field value. It should preserve high durability. input.set_required_field(&mut db).to(false); let last_high_revision = db.zalsa().last_changed_revision(Durability::HIGH); // Changing the value again should **again** dump the high durability revision. input.set_required_field(&mut db).to(false); assert_ne!( db.zalsa().last_changed_revision(Durability::HIGH), last_high_revision ); } salsa-0.23.0/tests/intern_access_in_different_revision.rs000064400000000000000000000011611046102023000217720ustar 00000000000000use salsa::{Durability, Setter}; #[salsa::interned(no_lifetime)] struct Interned { field: u32, } #[salsa::input] struct Input { field: i32, } #[test] fn the_test() { let mut db = salsa::DatabaseImpl::default(); let input = Input::builder(-123456) .field_durability(Durability::HIGH) .new(&db); // Create an intern in an early revision. let interned = Interned::new(&db, 0xDEADBEEF); // Trigger a new revision. input .set_field(&mut db) .with_durability(Durability::HIGH) .to(123456); // Read the interned value let _ = interned.field(&db); } salsa-0.23.0/tests/interned-revisions.rs000064400000000000000000000365701046102023000163630ustar 00000000000000//! Test that a `tracked` fn on a `salsa::input` //! compiles and executes successfully. mod common; use common::LogDatabase; use expect_test::expect; use salsa::{Database, Durability, Setter}; use test_log::test; #[salsa::input] struct Input { field1: usize, } #[salsa::interned(revisions = 3)] #[derive(Debug)] struct Interned<'db> { field1: BadHash, } // Use a consistent hash value to ensure that interned value sharding // does not interefere with garbage collection. #[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone)] struct BadHash(usize); impl std::hash::Hash for BadHash { fn hash(&self, state: &mut H) { state.write_i16(0); } } #[salsa::interned] #[derive(Debug)] struct NestedInterned<'db> { interned: Interned<'db>, } #[test] fn test_intern_new() { #[salsa::tracked] fn function<'db>(db: &'db dyn Database, input: Input) -> Interned<'db> { Interned::new(db, BadHash(input.field1(db))) } let mut db = common::EventLoggerDatabase::default(); let input = Input::new(&db, 0); let result_in_rev_1 = function(&db, input); assert_eq!(result_in_rev_1.field1(&db).0, 0); // Modify the input to force a new value to be created. input.set_field1(&mut db).to(1); let result_in_rev_2 = function(&db, input); assert_eq!(result_in_rev_2.field1(&db).0, 1); db.assert_logs(expect![[r#" [ "WillCheckCancellation", "WillExecute { database_key: function(Id(0)) }", "DidInternValue { key: Interned(Id(400)), revision: R1 }", "DidSetCancellationFlag", "WillCheckCancellation", "WillExecute { database_key: function(Id(0)) }", "DidInternValue { key: Interned(Id(401)), revision: R2 }", ]"#]]); } #[test] fn test_reintern() { #[salsa::tracked] fn function(db: &dyn Database, input: Input) -> Interned<'_> { let _ = input.field1(db); Interned::new(db, BadHash(0)) } let mut db = common::EventLoggerDatabase::default(); let input = Input::new(&db, 0); let result_in_rev_1 = function(&db, input); db.assert_logs(expect![[r#" [ "WillCheckCancellation", "WillExecute { database_key: function(Id(0)) }", "DidInternValue { key: Interned(Id(400)), revision: R1 }", ]"#]]); assert_eq!(result_in_rev_1.field1(&db).0, 0); // Modify the input to force the value to be re-interned. input.set_field1(&mut db).to(1); let result_in_rev_2 = function(&db, input); db.assert_logs(expect![[r#" [ "DidSetCancellationFlag", "WillCheckCancellation", "WillExecute { database_key: function(Id(0)) }", "DidValidateInternedValue { key: Interned(Id(400)), revision: R2 }", ]"#]]); assert_eq!(result_in_rev_2.field1(&db).0, 0); } #[test] fn test_durability() { #[salsa::tracked] fn function<'db>(db: &'db dyn Database, _input: Input) -> Interned<'db> { Interned::new(db, BadHash(0)) } let mut db = common::EventLoggerDatabase::default(); let input = Input::new(&db, 0); let result_in_rev_1 = function(&db, input); assert_eq!(result_in_rev_1.field1(&db).0, 0); // Modify the input to bump the revision without re-interning the value, as there // is no read dependency. input.set_field1(&mut db).to(1); let result_in_rev_2 = function(&db, input); assert_eq!(result_in_rev_2.field1(&db).0, 0); db.assert_logs(expect![[r#" [ "WillCheckCancellation", "WillExecute { database_key: function(Id(0)) }", "DidInternValue { key: Interned(Id(400)), revision: R1 }", "DidSetCancellationFlag", "WillCheckCancellation", "DidValidateMemoizedValue { database_key: function(Id(0)) }", ]"#]]); } #[salsa::interned(revisions = usize::MAX)] #[derive(Debug)] struct Immortal<'db> { field1: BadHash, } #[test] fn test_immortal() { #[salsa::tracked] fn function<'db>(db: &'db dyn Database, input: Input) -> Immortal<'db> { Immortal::new(db, BadHash(input.field1(db))) } let mut db = common::EventLoggerDatabase::default(); let input = Input::new(&db, 0); let result = function(&db, input); assert_eq!(result.field1(&db).0, 0); // Modify the input to bump the revision and intern a new value. // // No values should ever be reused with `durability = usize::MAX`. for i in 1..100 { input.set_field1(&mut db).to(i); let result = function(&db, input); assert_eq!(result.field1(&db).0, i); assert_eq!(salsa::plumbing::AsId::as_id(&result).generation(), 0); } } #[test] fn test_reuse() { #[salsa::tracked] fn function<'db>(db: &'db dyn Database, input: Input) -> Interned<'db> { Interned::new(db, BadHash(input.field1(db))) } let mut db = common::EventLoggerDatabase::default(); let input = Input::new(&db, 0); let result = function(&db, input); assert_eq!(result.field1(&db).0, 0); // Modify the input to bump the revision and intern a new value. // // The slot will not be reused for the first few revisions, but after // that we should not allocate any more slots. for i in 1..10 { input.set_field1(&mut db).to(i); let result = function(&db, input); assert_eq!(result.field1(&db).0, i); } // Values that have been reused should be re-interned. for i in 1..10 { let result = function(&db, Input::new(&db, i)); assert_eq!(result.field1(&db).0, i); } db.assert_logs(expect![[r#" [ "WillCheckCancellation", "WillExecute { database_key: function(Id(0)) }", "DidInternValue { key: Interned(Id(400)), revision: R1 }", "DidSetCancellationFlag", "WillCheckCancellation", "WillExecute { database_key: function(Id(0)) }", "DidInternValue { key: Interned(Id(401)), revision: R2 }", "DidSetCancellationFlag", "WillCheckCancellation", "WillExecute { database_key: function(Id(0)) }", "DidInternValue { key: Interned(Id(402)), revision: R3 }", "DidSetCancellationFlag", "WillCheckCancellation", "WillExecute { database_key: function(Id(0)) }", "DidReuseInternedValue { key: Interned(Id(400g1)), revision: R4 }", "DidSetCancellationFlag", "WillCheckCancellation", "WillExecute { database_key: function(Id(0)) }", "DidReuseInternedValue { key: Interned(Id(401g1)), revision: R5 }", "DidSetCancellationFlag", "WillCheckCancellation", "WillExecute { database_key: function(Id(0)) }", "DidReuseInternedValue { key: Interned(Id(402g1)), revision: R6 }", "DidSetCancellationFlag", "WillCheckCancellation", "WillExecute { database_key: function(Id(0)) }", "DidReuseInternedValue { key: Interned(Id(400g2)), revision: R7 }", "DidSetCancellationFlag", "WillCheckCancellation", "WillExecute { database_key: function(Id(0)) }", "DidReuseInternedValue { key: Interned(Id(401g2)), revision: R8 }", "DidSetCancellationFlag", "WillCheckCancellation", "WillExecute { database_key: function(Id(0)) }", "DidReuseInternedValue { key: Interned(Id(402g2)), revision: R9 }", "DidSetCancellationFlag", "WillCheckCancellation", "WillExecute { database_key: function(Id(0)) }", "DidReuseInternedValue { key: Interned(Id(400g3)), revision: R10 }", "WillCheckCancellation", "WillExecute { database_key: function(Id(1)) }", "DidInternValue { key: Interned(Id(403)), revision: R10 }", "WillCheckCancellation", "WillExecute { database_key: function(Id(2)) }", "DidInternValue { key: Interned(Id(404)), revision: R10 }", "WillCheckCancellation", "WillExecute { database_key: function(Id(3)) }", "DidInternValue { key: Interned(Id(405)), revision: R10 }", "WillCheckCancellation", "WillExecute { database_key: function(Id(4)) }", "DidInternValue { key: Interned(Id(406)), revision: R10 }", "WillCheckCancellation", "WillExecute { database_key: function(Id(5)) }", "DidInternValue { key: Interned(Id(407)), revision: R10 }", "WillCheckCancellation", "WillExecute { database_key: function(Id(6)) }", "DidInternValue { key: Interned(Id(408)), revision: R10 }", "WillCheckCancellation", "WillExecute { database_key: function(Id(7)) }", "DidValidateInternedValue { key: Interned(Id(401g2)), revision: R10 }", "WillCheckCancellation", "WillExecute { database_key: function(Id(8)) }", "DidValidateInternedValue { key: Interned(Id(402g2)), revision: R10 }", "WillCheckCancellation", "WillExecute { database_key: function(Id(9)) }", ]"#]]); } #[test] fn test_reuse_indirect() { #[salsa::tracked] fn intern<'db>(db: &'db dyn Database, input: Input, value: usize) -> Interned<'db> { intern_inner(db, input, value) } #[salsa::tracked] fn intern_inner<'db>(db: &'db dyn Database, input: Input, value: usize) -> Interned<'db> { let _i = input.field1(db); Interned::new(db, BadHash(value)) } let mut db = common::EventLoggerDatabase::default(); let input = Input::builder(0).durability(Durability::LOW).new(&db); // Intern `i0`. let i0 = intern(&db, input, 0); let i0_id = salsa::plumbing::AsId::as_id(&i0); assert_eq!(i0.field1(&db).0, 0); // Get the garbage collector to consider `i0` stale. for x in 1.. { db.synthetic_write(Durability::LOW); let ix = intern(&db, input, x); let ix_id = salsa::plumbing::AsId::as_id(&ix); // We reused the slot of `i0`. if ix_id.index() == i0_id.index() { assert_eq!(ix.field1(&db).0, x); // Re-intern and read `i0` from a new slot. // // Note that the only writes have been synthetic, so none of the query dependencies // have changed directly. The interned value dependency should be enough to force // the inner query to update. let i0 = intern(&db, input, 0); assert_eq!(i0.field1(&db).0, 0); break; } } } #[test] fn test_reuse_interned_input() { // A query that creates an interned value. #[salsa::tracked] fn create_interned<'db>(db: &'db dyn Database, input: Input) -> Interned<'db> { Interned::new(db, BadHash(input.field1(db))) } #[salsa::tracked] fn use_interned<'db>(db: &'db dyn Database, interned: Interned<'db>) -> usize { interned.field1(db).0 } let mut db = common::EventLoggerDatabase::default(); let input = Input::new(&db, 0); // Create and use I0 in R0. let interned = create_interned(&db, input); let result = use_interned(&db, interned); assert_eq!(result, 0); // Create and use I1 in a number of revisions, marking I0 as stale. input.set_field1(&mut db).to(1); for _ in 0..10 { let interned = create_interned(&db, input); let result = use_interned(&db, interned); assert_eq!(result, 1); // Trigger a new revision. input.set_field1(&mut db).to(1); } // Create I2, reusing the stale slot of I0. input.set_field1(&mut db).to(2); let interned = create_interned(&db, input); // Use I2. The function should not be memoized with the value of I0, despite I2 and I0 // sharing the same slot. let result = use_interned(&db, interned); assert_eq!(result, 2); } #[test] fn test_reuse_multiple_interned_input() { // A query that creates an interned value. #[salsa::tracked] fn create_interned<'db>(db: &'db dyn Database, input: Input) -> Interned<'db> { Interned::new(db, BadHash(input.field1(db))) } // A query that creates an interned value. #[salsa::tracked] fn create_nested_interned<'db>( db: &'db dyn Database, interned: Interned<'db>, ) -> NestedInterned<'db> { NestedInterned::new(db, interned) } #[salsa::tracked] fn use_interned<'db>(db: &'db dyn Database, interned: Interned<'db>) -> usize { interned.field1(db).0 } // A query that reads an interned value. #[salsa::tracked] fn use_nested_interned<'db>( db: &'db dyn Database, nested_interned: NestedInterned<'db>, ) -> usize { nested_interned.interned(db).field1(db).0 } let mut db = common::EventLoggerDatabase::default(); let input = Input::new(&db, 0); // Create and use NI0, which wraps I0, in R0. let interned = create_interned(&db, input); let i0_id = salsa::plumbing::AsId::as_id(&interned); let nested_interned = create_nested_interned(&db, interned); let result = use_nested_interned(&db, nested_interned); assert_eq!(result, 0); // Create and use I1 in a number of revisions, marking I0 as stale. input.set_field1(&mut db).to(1); for _ in 0..10 { let interned = create_interned(&db, input); let result = use_interned(&db, interned); assert_eq!(result, 1); // Trigger a new revision. input.set_field1(&mut db).to(1); } // Create I2, reusing the stale slot of I0. input.set_field1(&mut db).to(2); let interned = create_interned(&db, input); let i2_id = salsa::plumbing::AsId::as_id(&interned); assert_ne!(i0_id, i2_id); // Create NI1 wrapping I2 instead of I0. let nested_interned = create_nested_interned(&db, interned); // Use NI1. The function should not be memoized with the value of NI0, // despite I2 and I0 sharing the same ID. let result = use_nested_interned(&db, nested_interned); assert_eq!(result, 2); } #[test] fn test_durability_increase() { #[salsa::tracked] fn intern<'db>(db: &'db dyn Database, input: Input, value: usize) -> Interned<'db> { let _f = input.field1(db); Interned::new(db, BadHash(value)) } let mut db = common::EventLoggerDatabase::default(); let high_durability = Input::builder(0).durability(Durability::HIGH).new(&db); let low_durability = Input::builder(1).durability(Durability::LOW).new(&db); // Intern `i0`. let _i0 = intern(&db, low_durability, 0); // Re-intern `i0`, this time using a high-durability. let _i0 = intern(&db, high_durability, 0); // Get the garbage collector to consider `i0` stale. for _ in 0..100 { let _dummy = intern(&db, low_durability, 1000).field1(&db); db.synthetic_write(Durability::LOW); } // Intern `i1`. // // The slot of `i0` should not be reused as it is high-durability, and there // were no high-durability writes. let _i1 = intern(&db, low_durability, 1); // Re-intern and read `i0`. // // If the slot was reused, the memo would be shallow-verified and we would // read `i1` incorrectly. let value = intern(&db, high_durability, 0); assert_eq!(value.field1(&db).0, 0); db.synthetic_write(Durability::LOW); // We should have the same issue even after a low-durability write. let value = intern(&db, high_durability, 0); assert_eq!(value.field1(&db).0, 0); } salsa-0.23.0/tests/interned-structs.rs000064400000000000000000000140351046102023000160410ustar 00000000000000//! Test that a `tracked` fn on a `salsa::input` //! compiles and executes successfully. use std::path::{Path, PathBuf}; use expect_test::expect; use salsa::plumbing::{AsId, FromId}; use test_log::test; #[salsa::interned(debug)] struct InternedBoxed<'db> { data: Box, } #[salsa::interned(debug)] struct InternedString<'db> { data: String, } #[salsa::interned(debug)] struct InternedPair<'db> { data: (InternedString<'db>, InternedString<'db>), } #[salsa::interned(debug)] struct InternedTwoFields<'db> { data1: String, data2: String, } #[salsa::interned(debug)] struct InternedVec<'db> { data1: Vec, } #[salsa::interned(debug)] struct InternedPathBuf<'db> { data1: PathBuf, } #[salsa::interned(no_lifetime, debug)] struct InternedStringNoLifetime { data: String, } #[derive(Debug, Eq, PartialEq, Hash, Clone)] struct Foo; #[salsa::interned(debug)] struct InternedFoo<'db> { data: Foo, } #[derive(Clone, Copy, Hash, Debug, PartialEq, Eq, PartialOrd, Ord)] struct SalsaIdWrapper(salsa::Id); impl AsId for SalsaIdWrapper { fn as_id(&self) -> salsa::Id { self.0 } } impl FromId for SalsaIdWrapper { fn from_id(id: salsa::Id) -> Self { SalsaIdWrapper(id) } } #[salsa::interned(id = SalsaIdWrapper, debug)] struct InternedStringWithCustomId<'db> { data: String, } #[salsa::interned(id = SalsaIdWrapper, no_lifetime, debug)] struct InternedStringWithCustomIdAndNoLifetime<'db> { data: String, } #[salsa::tracked] fn intern_stuff(db: &dyn salsa::Database) -> String { let s1 = InternedString::new(db, "Hello, ".to_string()); let s2 = InternedString::new(db, "World, "); let s3 = InternedPair::new(db, (s1, s2)); format!("{s3:?}") } #[test] fn execute() { let db = salsa::DatabaseImpl::new(); expect![[r#" "InternedPair { data: (InternedString { data: \"Hello, \" }, InternedString { data: \"World, \" }) }" "#]].assert_debug_eq(&intern_stuff(&db)); } #[test] fn interning_returns_equal_keys_for_equal_data() { let db = salsa::DatabaseImpl::new(); let s1 = InternedString::new(&db, "Hello, ".to_string()); let s2 = InternedString::new(&db, "World, ".to_string()); let s1_2 = InternedString::new(&db, "Hello, "); let s2_2 = InternedString::new(&db, "World, "); assert_eq!(s1, s1_2); assert_eq!(s2, s2_2); } #[test] fn interning_returns_equal_keys_for_equal_data_multi_field() { let db = salsa::DatabaseImpl::new(); let s1 = InternedTwoFields::new(&db, "Hello, ".to_string(), "World"); let s2 = InternedTwoFields::new(&db, "World, ", "Hello".to_string()); let s1_2 = InternedTwoFields::new(&db, "Hello, ", "World"); let s2_2 = InternedTwoFields::new(&db, "World, ", "Hello"); let new = InternedTwoFields::new(&db, "Hello, World", ""); assert_eq!(s1, s1_2); assert_eq!(s2, s2_2); assert_ne!(s1, s2_2); assert_ne!(s1, new); } #[test] fn interning_boxed() { let db = salsa::DatabaseImpl::new(); assert_eq!( InternedBoxed::new(&db, "Hello"), InternedBoxed::new(&db, Box::from("Hello")) ); } #[test] fn interned_structs_have_public_ingredients() { use salsa::plumbing::AsId; let db = salsa::DatabaseImpl::new(); let s = InternedString::new(&db, String::from("Hello, world!")); let underlying_id = s.0; let data = InternedString::ingredient(&db).data(&db, underlying_id.as_id()); assert_eq!(data, &(String::from("Hello, world!"),)); } #[test] fn interning_vec() { let db = salsa::DatabaseImpl::new(); let s1 = InternedVec::new(&db, ["Hello, ".to_string(), "World".to_string()].as_slice()); let s2 = InternedVec::new(&db, ["Hello, ", "World"].as_slice()); let s3 = InternedVec::new(&db, vec!["Hello, ".to_string(), "World".to_string()]); let s4 = InternedVec::new(&db, ["Hello, ", "World"].as_slice()); let s5 = InternedVec::new(&db, ["Hello, ", "World", "Test"].as_slice()); let s6 = InternedVec::new(&db, ["Hello, ", "World", ""].as_slice()); let s7 = InternedVec::new(&db, ["Hello, "].as_slice()); assert_eq!(s1, s2); assert_eq!(s1, s3); assert_eq!(s1, s4); assert_ne!(s1, s5); assert_ne!(s1, s6); assert_ne!(s5, s6); assert_ne!(s6, s7); } #[test] fn interning_path_buf() { let db = salsa::DatabaseImpl::new(); let s1 = InternedPathBuf::new(&db, PathBuf::from("test_path".to_string())); let s2 = InternedPathBuf::new(&db, Path::new("test_path")); let s3 = InternedPathBuf::new(&db, Path::new("test_path/")); let s4 = InternedPathBuf::new(&db, Path::new("test_path/a")); assert_eq!(s1, s2); assert_eq!(s1, s3); assert_ne!(s1, s4); } #[test] fn interning_without_lifetimes() { let db = salsa::DatabaseImpl::new(); let s1 = InternedStringNoLifetime::new(&db, "Hello, ".to_string()); let s2 = InternedStringNoLifetime::new(&db, "World, ".to_string()); let s1_2 = InternedStringNoLifetime::new(&db, "Hello, "); let s2_2 = InternedStringNoLifetime::new(&db, "World, "); assert_eq!(s1, s1_2); assert_eq!(s2, s2_2); } #[test] fn interning_with_custom_ids() { let db = salsa::DatabaseImpl::new(); let s1 = InternedStringWithCustomId::new(&db, "Hello, ".to_string()); let s2 = InternedStringWithCustomId::new(&db, "World, ".to_string()); let s1_2 = InternedStringWithCustomId::new(&db, "Hello, "); let s2_2 = InternedStringWithCustomId::new(&db, "World, "); assert_eq!(s1, s1_2); assert_eq!(s2, s2_2); } #[test] fn interning_with_custom_ids_and_no_lifetime() { let db = salsa::DatabaseImpl::new(); let s1 = InternedStringWithCustomIdAndNoLifetime::new(&db, "Hello, ".to_string()); let s2 = InternedStringWithCustomIdAndNoLifetime::new(&db, "World, ".to_string()); let s1_2 = InternedStringWithCustomIdAndNoLifetime::new(&db, "Hello, "); let s2_2 = InternedStringWithCustomIdAndNoLifetime::new(&db, "World, "); assert_eq!(s1, s1_2); assert_eq!(s2, s2_2); } #[test] fn interning_reference() { let db = salsa::DatabaseImpl::new(); let s1 = InternedFoo::new(&db, Foo); let s2 = InternedFoo::new(&db, &Foo); assert_eq!(s1, s2); } salsa-0.23.0/tests/interned-structs_self_ref.rs000064400000000000000000000154501046102023000177100ustar 00000000000000//! Test that a `tracked` fn on a `salsa::input` //! compiles and executes successfully. use std::any::TypeId; use std::convert::identity; use salsa::plumbing::Zalsa; use test_log::test; #[test] fn interning_returns_equal_keys_for_equal_data() { let db = salsa::DatabaseImpl::new(); let s1 = InternedString::new(&db, "Hello, ".to_string(), identity); let s2 = InternedString::new(&db, "World, ".to_string(), |_| s1); let s1_2 = InternedString::new(&db, "Hello, ", identity); let s2_2 = InternedString::new(&db, "World, ", |_| s2); assert_eq!(s1, s1_2); assert_eq!(s2, s2_2); } // Recursive expansion of interned macro // #[salsa::interned] // struct InternedString<'db> { // data: String, // other: InternedString<'db>, // } // ====================================== #[derive(Copy, Clone, PartialEq, PartialOrd, Eq, Ord, Hash)] struct InternedString<'db>( salsa::Id, std::marker::PhantomData<&'db salsa::plumbing::interned::Value>>, ); #[allow(warnings)] const _: () = { use salsa::plumbing as zalsa_; use zalsa_::interned as zalsa_struct_; type Configuration_ = InternedString<'static>; #[derive(Clone)] struct StructData<'db>(String, InternedString<'db>); impl<'db> Eq for StructData<'db> {} impl<'db> PartialEq for StructData<'db> { fn eq(&self, other: &Self) -> bool { self.0 == other.0 } } impl<'db> std::hash::Hash for StructData<'db> { fn hash(&self, state: &mut H) { self.0.hash(state); } } #[doc = r" Key to use during hash lookups. Each field is some type that implements `Lookup`"] #[doc = r" for the owned type. This permits interning with an `&str` when a `String` is required and so forth."] #[derive(Hash)] struct StructKey<'db, T0>(T0, std::marker::PhantomData<&'db ()>); impl<'db, T0> zalsa_::interned::HashEqLike> for StructData<'db> where String: zalsa_::interned::HashEqLike, { fn hash(&self, h: &mut H) { zalsa_::interned::HashEqLike::::hash(&self.0, &mut *h); } fn eq(&self, data: &StructKey<'db, T0>) -> bool { (zalsa_::interned::HashEqLike::::eq(&self.0, &data.0) && true) } } impl zalsa_struct_::Configuration for Configuration_ { const LOCATION: zalsa_::Location = zalsa_::Location { file: file!(), line: line!(), }; const DEBUG_NAME: &'static str = "InternedString"; type Fields<'a> = StructData<'a>; type Struct<'a> = InternedString<'a>; } impl Configuration_ { pub fn ingredient(db: &Db) -> &zalsa_struct_::IngredientImpl where Db: ?Sized + zalsa_::Database, { static CACHE: zalsa_::IngredientCache> = zalsa_::IngredientCache::new(); let zalsa = db.zalsa(); CACHE.get_or_create(zalsa, || { zalsa .lookup_jar_by_type::>() .get_or_create() }) } } impl zalsa_::AsId for InternedString<'_> { fn as_id(&self) -> salsa::Id { self.0 } } impl zalsa_::FromId for InternedString<'_> { fn from_id(id: salsa::Id) -> Self { Self(id, std::marker::PhantomData) } } unsafe impl Send for InternedString<'_> {} unsafe impl Sync for InternedString<'_> {} impl std::fmt::Debug for InternedString<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { Self::default_debug_fmt(*self, f) } } impl zalsa_::SalsaStructInDb for InternedString<'_> { type MemoIngredientMap = zalsa_::MemoIngredientSingletonIndex; fn lookup_or_create_ingredient_index(aux: &Zalsa) -> salsa::plumbing::IngredientIndices { aux.lookup_jar_by_type::>() .get_or_create() .into() } #[inline] fn cast(id: zalsa_::Id, type_id: TypeId) -> Option { if type_id == TypeId::of::() { Some(::from_id(id)) } else { None } } } unsafe impl zalsa_::Update for InternedString<'_> { unsafe fn maybe_update(old_pointer: *mut Self, new_value: Self) -> bool { if unsafe { *old_pointer } != new_value { unsafe { *old_pointer = new_value }; true } else { false } } } impl<'db> InternedString<'db> { pub fn new + std::hash::Hash>( db: &'db Db_, data: T0, other: impl FnOnce(InternedString<'db>) -> InternedString<'db>, ) -> Self where Db_: ?Sized + salsa::Database, String: zalsa_::interned::HashEqLike, { Configuration_::ingredient(db).intern( db.as_dyn_database(), StructKey::<'db>(data, std::marker::PhantomData::default()), |id, data| { StructData( zalsa_::interned::Lookup::into_owned(data.0), other(zalsa_::FromId::from_id(id)), ) }, ) } fn data(self, db: &'db Db_) -> String where Db_: ?Sized + zalsa_::Database, { let fields = Configuration_::ingredient(db).fields(db.as_dyn_database(), self); std::clone::Clone::clone((&fields.0)) } fn other(self, db: &'db Db_) -> InternedString<'db> where Db_: ?Sized + zalsa_::Database, { let fields = Configuration_::ingredient(db).fields(db.as_dyn_database(), self); std::clone::Clone::clone((&fields.1)) } #[doc = r" Default debug formatting for this struct (may be useful if you define your own `Debug` impl)"] pub fn default_debug_fmt(this: Self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { zalsa_::with_attached_database(|db| { let fields = Configuration_::ingredient(db).fields(db.as_dyn_database(), this); let mut f = f.debug_struct("InternedString"); let f = f.field("data", &fields.0); let f = f.field("other", &fields.1); f.finish() }) .unwrap_or_else(|| { f.debug_tuple("InternedString") .field(&zalsa_::AsId::as_id(&this)) .finish() }) } } }; salsa-0.23.0/tests/lru.rs000064400000000000000000000075551046102023000133370ustar 00000000000000//! Test that a `tracked` fn with lru options //! compiles and executes successfully. use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; mod common; use common::LogDatabase; use salsa::Database as _; use test_log::test; #[derive(Debug, PartialEq, Eq)] struct HotPotato(u32); thread_local! { static N_POTATOES: AtomicUsize = const { AtomicUsize::new(0) } } impl HotPotato { fn new(id: u32) -> HotPotato { N_POTATOES.with(|n| n.fetch_add(1, Ordering::SeqCst)); HotPotato(id) } } impl Drop for HotPotato { fn drop(&mut self) { N_POTATOES.with(|n| n.fetch_sub(1, Ordering::SeqCst)); } } #[salsa::input] struct MyInput { field: u32, } #[salsa::tracked(lru = 8)] fn get_hot_potato(db: &dyn LogDatabase, input: MyInput) -> Arc { db.push_log(format!("get_hot_potato({:?})", input.field(db))); Arc::new(HotPotato::new(input.field(db))) } #[salsa::tracked] fn get_hot_potato2(db: &dyn LogDatabase, input: MyInput) -> u32 { db.push_log(format!("get_hot_potato2({:?})", input.field(db))); get_hot_potato(db, input).0 } fn load_n_potatoes() -> usize { N_POTATOES.with(|n| n.load(Ordering::SeqCst)) } #[test] fn lru_works() { let mut db = common::LoggerDatabase::default(); assert_eq!(load_n_potatoes(), 0); for i in 0..32u32 { let input = MyInput::new(&db, i); let p = get_hot_potato(&db, input); assert_eq!(p.0, i); } assert_eq!(load_n_potatoes(), 32); // trigger the GC db.synthetic_write(salsa::Durability::HIGH); assert_eq!(load_n_potatoes(), 8); } #[test] fn lru_can_be_changed_at_runtime() { let mut db = common::LoggerDatabase::default(); assert_eq!(load_n_potatoes(), 0); let inputs: Vec<(u32, MyInput)> = (0..32).map(|i| (i, MyInput::new(&db, i))).collect(); for &(i, input) in inputs.iter() { let p = get_hot_potato(&db, input); assert_eq!(p.0, i); } assert_eq!(load_n_potatoes(), 32); // trigger the GC db.synthetic_write(salsa::Durability::HIGH); assert_eq!(load_n_potatoes(), 8); get_hot_potato::set_lru_capacity(&mut db, 16); assert_eq!(load_n_potatoes(), 8); for &(i, input) in inputs.iter() { let p = get_hot_potato(&db, input); assert_eq!(p.0, i); } assert_eq!(load_n_potatoes(), 32); // trigger the GC db.synthetic_write(salsa::Durability::HIGH); assert_eq!(load_n_potatoes(), 16); // Special case: setting capacity to zero disables LRU get_hot_potato::set_lru_capacity(&mut db, 0); assert_eq!(load_n_potatoes(), 16); for &(i, input) in inputs.iter() { let p = get_hot_potato(&db, input); assert_eq!(p.0, i); } assert_eq!(load_n_potatoes(), 32); // trigger the GC db.synthetic_write(salsa::Durability::HIGH); assert_eq!(load_n_potatoes(), 32); drop(db); assert_eq!(load_n_potatoes(), 0); } #[test] fn lru_keeps_dependency_info() { let mut db = common::LoggerDatabase::default(); let capacity = 8; // Invoke `get_hot_potato2` 33 times. This will (in turn) invoke // `get_hot_potato`, which will trigger LRU after 8 executions. let inputs: Vec = (0..(capacity + 1)) .map(|i| MyInput::new(&db, i as u32)) .collect(); for (i, input) in inputs.iter().enumerate() { let x = get_hot_potato2(&db, *input); assert_eq!(x as usize, i); } db.synthetic_write(salsa::Durability::HIGH); // We want to test that calls to `get_hot_potato2` are still considered // clean. Check that no new executions occur as we go here. db.assert_logs_len((capacity + 1) * 2); // calling `get_hot_potato2(0)` has to check that `get_hot_potato(0)` is still valid; // even though we've evicted it (LRU), we find that it is still good let p = get_hot_potato2(&db, *inputs.first().unwrap()); assert_eq!(p, 0); db.assert_logs_len(0); } salsa-0.23.0/tests/memory-usage.rs000064400000000000000000000107501046102023000151360ustar 00000000000000use expect_test::expect; #[salsa::input] struct MyInput { field: u32, } #[salsa::tracked] struct MyTracked<'db> { field: u32, } #[salsa::interned] struct MyInterned<'db> { field: u32, } #[salsa::tracked] fn input_to_interned<'db>(db: &'db dyn salsa::Database, input: MyInput) -> MyInterned<'db> { MyInterned::new(db, input.field(db)) } #[salsa::tracked] fn input_to_tracked<'db>(db: &'db dyn salsa::Database, input: MyInput) -> MyTracked<'db> { MyTracked::new(db, input.field(db)) } #[salsa::tracked] fn input_to_string<'db>(_db: &'db dyn salsa::Database) -> String { "a".repeat(1000) } #[salsa::tracked(heap_size = string_heap_size)] fn input_to_string_get_size<'db>(_db: &'db dyn salsa::Database) -> String { "a".repeat(1000) } fn string_heap_size(x: &String) -> usize { x.capacity() } #[salsa::tracked] fn input_to_tracked_tuple<'db>( db: &'db dyn salsa::Database, input: MyInput, ) -> (MyTracked<'db>, MyTracked<'db>) { ( MyTracked::new(db, input.field(db)), MyTracked::new(db, input.field(db)), ) } #[test] fn test() { let db = salsa::DatabaseImpl::new(); let input1 = MyInput::new(&db, 1); let input2 = MyInput::new(&db, 2); let input3 = MyInput::new(&db, 3); let _tracked1 = input_to_tracked(&db, input1); let _tracked2 = input_to_tracked(&db, input2); let _tracked_tuple = input_to_tracked_tuple(&db, input1); let _interned1 = input_to_interned(&db, input1); let _interned2 = input_to_interned(&db, input2); let _interned3 = input_to_interned(&db, input3); let _string1 = input_to_string(&db); let _string2 = input_to_string_get_size(&db); let structs_info = ::structs_info(&db); let expected = expect![[r#" [ IngredientInfo { debug_name: "MyInput", count: 3, size_of_metadata: 84, size_of_fields: 12, }, IngredientInfo { debug_name: "MyTracked", count: 4, size_of_metadata: 112, size_of_fields: 16, }, IngredientInfo { debug_name: "MyInterned", count: 3, size_of_metadata: 156, size_of_fields: 12, }, IngredientInfo { debug_name: "input_to_string::interned_arguments", count: 1, size_of_metadata: 56, size_of_fields: 0, }, IngredientInfo { debug_name: "input_to_string_get_size::interned_arguments", count: 1, size_of_metadata: 56, size_of_fields: 0, }, ]"#]]; expected.assert_eq(&format!("{structs_info:#?}")); let mut queries_info = ::queries_info(&db) .into_iter() .collect::>(); queries_info.sort(); let expected = expect![[r#" [ ( "input_to_interned", IngredientInfo { debug_name: "memory_usage::MyInterned", count: 3, size_of_metadata: 192, size_of_fields: 24, }, ), ( "input_to_string", IngredientInfo { debug_name: "alloc::string::String", count: 1, size_of_metadata: 40, size_of_fields: 24, }, ), ( "input_to_string_get_size", IngredientInfo { debug_name: "alloc::string::String", count: 1, size_of_metadata: 40, size_of_fields: 1024, }, ), ( "input_to_tracked", IngredientInfo { debug_name: "memory_usage::MyTracked", count: 2, size_of_metadata: 192, size_of_fields: 16, }, ), ( "input_to_tracked_tuple", IngredientInfo { debug_name: "(memory_usage::MyTracked, memory_usage::MyTracked)", count: 1, size_of_metadata: 132, size_of_fields: 16, }, ), ]"#]]; expected.assert_eq(&format!("{queries_info:#?}")); } salsa-0.23.0/tests/mutate_in_place.rs000064400000000000000000000014051046102023000156520ustar 00000000000000//! Test that a setting a field on a `#[salsa::input]` //! overwrites and returns the old value. use salsa::Setter; use test_log::test; #[salsa::input] struct MyInput { field: String, } #[test] fn execute() { let mut db = salsa::DatabaseImpl::new(); let input = MyInput::new(&db, "Hello".to_string()); // Overwrite field with an empty String // and store the old value in my_string let mut my_string = input.set_field(&mut db).to(String::new()); my_string.push_str(" World!"); // Set the field back to out initial String, // expecting to get the empty one back assert_eq!(input.set_field(&mut db).to(my_string), ""); // Check if the stored String is the one we expected assert_eq!(input.field(&db), "Hello World!"); } salsa-0.23.0/tests/override_new_get_set.rs000064400000000000000000000026671046102023000167360ustar 00000000000000//! Test that the `constructor` macro overrides //! the `new` method's name and `get` and `set` //! change the name of the getter and setter of the fields. #![allow(warnings)] use std::fmt::Display; use salsa::Setter; #[salsa::db] trait Db: salsa::Database {} #[salsa::input(constructor = from_string)] struct MyInput { #[get(text)] #[set(set_text)] field: String, } impl MyInput { pub fn new(db: &mut dyn Db, s: impl Display) -> MyInput { MyInput::from_string(db, s.to_string()) } pub fn field(self, db: &dyn Db) -> String { self.text(db) } pub fn set_field(self, db: &mut dyn Db, id: String) { self.set_text(db).to(id); } } #[salsa::interned(constructor = from_string)] struct MyInterned<'db> { #[get(text)] #[returns(ref)] field: String, } impl<'db> MyInterned<'db> { pub fn new(db: &'db dyn Db, s: impl Display) -> MyInterned<'db> { MyInterned::from_string(db, s.to_string()) } pub fn field(self, db: &'db dyn Db) -> &str { &self.text(db) } } #[salsa::tracked(constructor = from_string)] struct MyTracked<'db> { #[get(text)] field: String, } impl<'db> MyTracked<'db> { pub fn new(db: &'db dyn Db, s: impl Display) -> MyTracked<'db> { MyTracked::from_string(db, s.to_string()) } pub fn field(self, db: &'db dyn Db) -> String { self.text(db) } } #[test] fn execute() { salsa::DatabaseImpl::new(); } salsa-0.23.0/tests/panic-when-creating-tracked-struct-outside-of-tracked-fn.rs000064400000000000000000000005641046102023000253760ustar 00000000000000//! Test that creating a tracked struct outside of a //! tracked function panics with an assert message. #[salsa::tracked] struct MyTracked<'db> { field: u32, } #[test] #[should_panic( expected = "cannot create a tracked struct disambiguator outside of a tracked function" )] fn execute() { let db = salsa::DatabaseImpl::new(); MyTracked::new(&db, 0); } salsa-0.23.0/tests/parallel/cycle_a_t1_b_t2.rs000064400000000000000000000040531046102023000172300ustar 00000000000000//! Test a specific cycle scenario: //! //! ```text //! Thread T1 Thread T2 //! --------- --------- //! | | //! v | //! query_a() | //! ^ | v //! | +------------> query_b() //! | | //! +--------------------+ //! ``` use crate::sync::thread; use crate::{Knobs, KnobsDatabase}; use salsa::CycleRecoveryAction; #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, salsa::Update)] struct CycleValue(u32); const MIN: CycleValue = CycleValue(0); const MAX: CycleValue = CycleValue(3); // Signal 1: T1 has entered `query_a` // Signal 2: T2 has entered `query_b` #[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=initial)] fn query_a(db: &dyn KnobsDatabase) -> CycleValue { db.signal(1); // Wait for Thread T2 to enter `query_b` before we continue. db.wait_for(2); query_b(db) } #[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=initial)] fn query_b(db: &dyn KnobsDatabase) -> CycleValue { // Wait for Thread T1 to enter `query_a` before we continue. db.wait_for(1); db.signal(2); let a_value = query_a(db); CycleValue(a_value.0 + 1).min(MAX) } fn cycle_fn( _db: &dyn KnobsDatabase, _value: &CycleValue, _count: u32, ) -> CycleRecoveryAction { CycleRecoveryAction::Iterate } fn initial(_db: &dyn KnobsDatabase) -> CycleValue { MIN } #[test_log::test] fn the_test() { crate::sync::check(|| { tracing::debug!("New run"); let db_t1 = Knobs::default(); let db_t2 = db_t1.clone(); let t1 = thread::spawn(move || { let _span = tracing::debug_span!("t1", thread_id = ?thread::current().id()).entered(); query_a(&db_t1) }); let t2 = thread::spawn(move || { let _span = tracing::debug_span!("t2", thread_id = ?thread::current().id()).entered(); query_b(&db_t2) }); let (r_t1, r_t2) = (t1.join().unwrap(), t2.join().unwrap()); assert_eq!((r_t1, r_t2), (MAX, MAX)); }); } salsa-0.23.0/tests/parallel/cycle_a_t1_b_t2_fallback.rs000064400000000000000000000030631046102023000210470ustar 00000000000000//! Test a specific cycle scenario: //! //! ```text //! Thread T1 Thread T2 //! --------- --------- //! | | //! v | //! query_a() | //! ^ | v //! | +------------> query_b() //! | | //! +--------------------+ //! ``` use crate::KnobsDatabase; const FALLBACK_A: u32 = 0b01; const FALLBACK_B: u32 = 0b10; const OFFSET_A: u32 = 0b0100; const OFFSET_B: u32 = 0b1000; // Signal 1: T1 has entered `query_a` // Signal 2: T2 has entered `query_b` #[salsa::tracked(cycle_result=cycle_result_a)] fn query_a(db: &dyn KnobsDatabase) -> u32 { db.signal(1); // Wait for Thread T2 to enter `query_b` before we continue. db.wait_for(2); query_b(db) | OFFSET_A } #[salsa::tracked(cycle_result=cycle_result_b)] fn query_b(db: &dyn KnobsDatabase) -> u32 { // Wait for Thread T1 to enter `query_a` before we continue. db.wait_for(1); db.signal(2); query_a(db) | OFFSET_B } fn cycle_result_a(_db: &dyn KnobsDatabase) -> u32 { FALLBACK_A } fn cycle_result_b(_db: &dyn KnobsDatabase) -> u32 { FALLBACK_B } #[test_log::test] fn the_test() { use crate::sync::thread; use crate::Knobs; crate::sync::check(|| { let db_t1 = Knobs::default(); let db_t2 = db_t1.clone(); let t1 = thread::spawn(move || query_a(&db_t1)); let t2 = thread::spawn(move || query_b(&db_t2)); let (r_t1, r_t2) = (t1.join(), t2.join()); assert_eq!((r_t1.unwrap(), r_t2.unwrap()), (FALLBACK_A, FALLBACK_B)); }); } salsa-0.23.0/tests/parallel/cycle_ab_peeping_c.rs000064400000000000000000000044271046102023000200760ustar 00000000000000//! Test a specific cycle scenario: //! //! Thread T1 calls A which calls B which calls A. //! //! Thread T2 calls C which calls B. //! //! The trick is that the call from Thread T2 comes before B has reached a fixed point. //! We want to be sure that C sees the final value (and blocks until it is complete). use crate::sync::thread; use crate::{Knobs, KnobsDatabase}; use salsa::CycleRecoveryAction; #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, salsa::Update)] struct CycleValue(u32); const MIN: CycleValue = CycleValue(0); const MID: CycleValue = CycleValue(5); const MAX: CycleValue = CycleValue(10); #[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=cycle_initial)] fn query_a(db: &dyn KnobsDatabase) -> CycleValue { let b_value = query_b(db); // When we reach the mid point, signal stage 1 (unblocking T2) // and then wait for T2 to signal stage 2. if b_value == MID { db.signal(1); db.wait_for(2); } b_value } fn cycle_fn( _db: &dyn KnobsDatabase, _value: &CycleValue, _count: u32, ) -> CycleRecoveryAction { CycleRecoveryAction::Iterate } fn cycle_initial(_db: &dyn KnobsDatabase) -> CycleValue { MIN } #[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=cycle_initial)] fn query_b(db: &dyn KnobsDatabase) -> CycleValue { let a_value = query_a(db); CycleValue(a_value.0 + 1).min(MAX) } #[salsa::tracked] fn query_c(db: &dyn KnobsDatabase) -> CycleValue { // Wait until T1 has reached MID then execute `query_b`. // This should block and (due to the configuration on our database) signal stage 2. db.wait_for(1); query_b(db) } #[test_log::test] fn the_test() { crate::sync::check(|| { let db_t1 = Knobs::default(); let db_t2 = db_t1.clone(); db_t2.signal_on_will_block(2); let t1 = thread::spawn(move || { let _span = tracing::debug_span!("t1", thread_id = ?thread::current().id()).entered(); query_a(&db_t1) }); let t2 = thread::spawn(move || { let _span = tracing::debug_span!("t2", thread_id = ?thread::current().id()).entered(); query_c(&db_t2) }); let (r_t1, r_t2) = (t1.join().unwrap(), t2.join().unwrap()); assert_eq!((r_t1, r_t2), (MAX, MAX)); }); } salsa-0.23.0/tests/parallel/cycle_nested_deep.rs000064400000000000000000000057031046102023000177600ustar 00000000000000//! Test a deeply nested-cycle scenario across multiple threads. //! //! The trick is that different threads call into the same cycle from different entry queries. //! //! * Thread 1: `a` -> b -> c (which calls back into d, e, b, a) //! * Thread 2: `b` //! * Thread 3: `d` -> `c` //! * Thread 4: `e` -> `c` use crate::sync::thread; use crate::{Knobs, KnobsDatabase}; use salsa::CycleRecoveryAction; #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, salsa::Update)] struct CycleValue(u32); const MIN: CycleValue = CycleValue(0); const MAX: CycleValue = CycleValue(3); #[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=initial)] fn query_a(db: &dyn KnobsDatabase) -> CycleValue { query_b(db) } #[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=initial)] fn query_b(db: &dyn KnobsDatabase) -> CycleValue { let c_value = query_c(db); CycleValue(c_value.0 + 1).min(MAX) } #[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=initial)] fn query_c(db: &dyn KnobsDatabase) -> CycleValue { let d_value = query_d(db); let e_value = query_e(db); let b_value = query_b(db); let a_value = query_a(db); CycleValue(d_value.0.max(e_value.0).max(b_value.0).max(a_value.0)) } #[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=initial)] fn query_d(db: &dyn KnobsDatabase) -> CycleValue { query_c(db) } #[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=initial)] fn query_e(db: &dyn KnobsDatabase) -> CycleValue { query_c(db) } fn cycle_fn( _db: &dyn KnobsDatabase, _value: &CycleValue, _count: u32, ) -> CycleRecoveryAction { CycleRecoveryAction::Iterate } fn initial(_db: &dyn KnobsDatabase) -> CycleValue { MIN } #[test_log::test] fn the_test() { crate::sync::check(|| { let db_t1 = Knobs::default(); let db_t2 = db_t1.clone(); let db_t3 = db_t1.clone(); let db_t4 = db_t1.clone(); let t1 = thread::spawn(move || { let _span = tracing::debug_span!("t1", thread_id = ?thread::current().id()).entered(); let result = query_a(&db_t1); db_t1.signal(1); result }); let t2 = thread::spawn(move || { let _span = tracing::debug_span!("t4", thread_id = ?thread::current().id()).entered(); db_t4.wait_for(1); query_b(&db_t4) }); let t3 = thread::spawn(move || { let _span = tracing::debug_span!("t2", thread_id = ?thread::current().id()).entered(); db_t2.wait_for(1); query_d(&db_t2) }); let t4 = thread::spawn(move || { let _span = tracing::debug_span!("t3", thread_id = ?thread::current().id()).entered(); db_t3.wait_for(1); query_e(&db_t3) }); let r_t1 = t1.join().unwrap(); let r_t2 = t2.join().unwrap(); let r_t3 = t3.join().unwrap(); let r_t4 = t4.join().unwrap(); assert_eq!((r_t1, r_t2, r_t3, r_t4), (MAX, MAX, MAX, MAX)); }); } salsa-0.23.0/tests/parallel/cycle_nested_deep_conditional.rs000064400000000000000000000063431046102023000223440ustar 00000000000000//! Test a deeply nested-cycle scenario where cycles have changing query dependencies. //! //! The trick is that different threads call into the same cycle from different entry queries and //! the cycle heads change over different iterations //! //! * Thread 1: `a` -> b -> c //! * Thread 2: `b` //! * Thread 3: `d` -> `c` //! * Thread 4: `e` -> `c` //! //! `c` calls: //! * `d` and `a` in the first few iterations //! * `d`, `b` and `e` in the last iterations use crate::sync::thread; use crate::{Knobs, KnobsDatabase}; use salsa::CycleRecoveryAction; #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, salsa::Update)] struct CycleValue(u32); const MIN: CycleValue = CycleValue(0); const MAX: CycleValue = CycleValue(3); #[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=initial)] fn query_a(db: &dyn KnobsDatabase) -> CycleValue { query_b(db) } #[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=initial)] fn query_b(db: &dyn KnobsDatabase) -> CycleValue { let c_value = query_c(db); CycleValue(c_value.0 + 1).min(MAX) } #[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=initial)] fn query_c(db: &dyn KnobsDatabase) -> CycleValue { let d_value = query_d(db); if d_value > CycleValue(0) { let e_value = query_e(db); let b_value = query_b(db); CycleValue(d_value.0.max(e_value.0).max(b_value.0)) } else { let a_value = query_a(db); CycleValue(d_value.0.max(a_value.0)) } } #[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=initial)] fn query_d(db: &dyn KnobsDatabase) -> CycleValue { query_c(db) } #[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=initial)] fn query_e(db: &dyn KnobsDatabase) -> CycleValue { query_c(db) } fn cycle_fn( _db: &dyn KnobsDatabase, _value: &CycleValue, _count: u32, ) -> CycleRecoveryAction { CycleRecoveryAction::Iterate } fn initial(_db: &dyn KnobsDatabase) -> CycleValue { MIN } #[test_log::test] fn the_test() { crate::sync::check(|| { tracing::debug!("New run"); let db_t1 = Knobs::default(); let db_t2 = db_t1.clone(); let db_t3 = db_t1.clone(); let db_t4 = db_t1.clone(); let t1 = thread::spawn(move || { let _span = tracing::debug_span!("t1", thread_id = ?thread::current().id()).entered(); let result = query_a(&db_t1); db_t1.signal(1); result }); let t2 = thread::spawn(move || { let _span = tracing::debug_span!("t4", thread_id = ?thread::current().id()).entered(); db_t4.wait_for(1); query_b(&db_t4) }); let t3 = thread::spawn(move || { let _span = tracing::debug_span!("t2", thread_id = ?thread::current().id()).entered(); db_t2.wait_for(1); query_d(&db_t2) }); let t4 = thread::spawn(move || { let _span = tracing::debug_span!("t3", thread_id = ?thread::current().id()).entered(); db_t3.wait_for(1); query_e(&db_t3) }); let r_t1 = t1.join().unwrap(); let r_t2 = t2.join().unwrap(); let r_t3 = t3.join().unwrap(); let r_t4 = t4.join().unwrap(); assert_eq!((r_t1, r_t2, r_t3, r_t4), (MAX, MAX, MAX, MAX)); }); } salsa-0.23.0/tests/parallel/cycle_nested_three_threads.rs000064400000000000000000000045741046102023000216710ustar 00000000000000//! Test a nested-cycle scenario across three threads: //! //! ```text //! Thread T1 Thread T2 Thread T3 //! --------- --------- --------- //! | | | //! v | | //! query_a() | | //! ^ | v | //! | +------------> query_b() | //! | ^ | v //! | | +------------> query_c() //! | | | //! +------------------+--------------------+ //! //! ``` use crate::sync::thread; use crate::{Knobs, KnobsDatabase}; use salsa::CycleRecoveryAction; #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, salsa::Update)] struct CycleValue(u32); const MIN: CycleValue = CycleValue(0); const MAX: CycleValue = CycleValue(3); // Signal 1: T1 has entered `query_a` // Signal 2: T2 has entered `query_b` // Signal 3: T3 has entered `query_c` #[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=initial)] fn query_a(db: &dyn KnobsDatabase) -> CycleValue { db.signal(1); db.wait_for(3); query_b(db) } #[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=initial)] fn query_b(db: &dyn KnobsDatabase) -> CycleValue { db.wait_for(1); db.signal(2); db.wait_for(3); let c_value = query_c(db); CycleValue(c_value.0 + 1).min(MAX) } #[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=initial)] fn query_c(db: &dyn KnobsDatabase) -> CycleValue { db.wait_for(2); db.signal(3); let a_value = query_a(db); let b_value = query_b(db); CycleValue(a_value.0.max(b_value.0)) } fn cycle_fn( _db: &dyn KnobsDatabase, _value: &CycleValue, _count: u32, ) -> CycleRecoveryAction { CycleRecoveryAction::Iterate } fn initial(_db: &dyn KnobsDatabase) -> CycleValue { MIN } #[test_log::test] fn the_test() { crate::sync::check(|| { let db_t1 = Knobs::default(); let db_t2 = db_t1.clone(); let db_t3 = db_t1.clone(); let t1 = thread::spawn(move || query_a(&db_t1)); let t2 = thread::spawn(move || query_b(&db_t2)); let t3 = thread::spawn(move || query_c(&db_t3)); let r_t1 = t1.join().unwrap(); let r_t2 = t2.join().unwrap(); let r_t3 = t3.join().unwrap(); assert_eq!((r_t1, r_t2, r_t3), (MAX, MAX, MAX)); }); } salsa-0.23.0/tests/parallel/cycle_panic.rs000064400000000000000000000021001046102023000165570ustar 00000000000000// Shuttle doesn't like panics inside of its runtime. #![cfg(not(feature = "shuttle"))] //! Test for panic in cycle recovery function, in cross-thread cycle. use crate::setup::{Knobs, KnobsDatabase}; #[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=initial)] fn query_a(db: &dyn KnobsDatabase) -> u32 { db.signal(1); db.wait_for(2); query_b(db) } #[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=initial)] fn query_b(db: &dyn KnobsDatabase) -> u32 { db.wait_for(1); db.signal(2); query_a(db) + 1 } fn cycle_fn(_db: &dyn KnobsDatabase, _value: &u32, _count: u32) -> salsa::CycleRecoveryAction { panic!("cancel!") } fn initial(_db: &dyn KnobsDatabase) -> u32 { 0 } #[test] fn execute() { let db = Knobs::default(); let db_t1 = db.clone(); let t1 = std::thread::spawn(move || query_a(&db_t1)); let db_t2 = db.clone(); let t2 = std::thread::spawn(move || query_b(&db_t2)); // The main thing here is that we don't deadlock. let (r1, r2) = (t1.join(), t2.join()); assert!(r1.is_err()); assert!(r2.is_err()); } salsa-0.23.0/tests/parallel/cycle_provisional_depending_on_itself.rs000064400000000000000000000071301046102023000241210ustar 00000000000000//! Test a specific cycle scenario: //! //! 1. Thread T1 calls `a` which calls `b` //! 2. Thread T2 calls `c` which calls `b` (blocks on T1 for `b`). The ordering here is important! //! 3. Thread T1: `b` calls `c` and `a`, both trigger a cycle and Salsa returns a fixpoint initial values (with `c` and `a` as cycle heads). //! 4. Thread T1: `b` is released (its not in its own cycle heads), `Memo::provisional_retry` blocks blocks on `T2` because `c` is in its cycle heads //! 5. Thread T2: Iterates `c`, blocks on T1 when reading `a`. //! 6. Thread T1: Completes the first itaration of `a`, inserting a provisional that depends on `c` and itself (`a`). //! Starts a new iteration where it executes `b`. Calling `query_a` hits a cycle: //! //! 1. `fetch_cold` returns the current provisional for `a` that depends both on `a` (owned by itself) and `c` (has no cycle heads). //! 2. `Memo::provisional_retry`: Awaits `c` (which has no cycle heads anymore). //! - Before: it skipped over the dependency key `a` that it is holding itself. It sees that `c` is final, so it retries (which gets us back to 6.1) //! - Now: Return the provisional memo and allow the outer cycle to resolve. //! //! The desired behavior here is that: //! 1. `t1`: completes the first iteration of b //! 2. `t2`: completes the cycle `c`, up to where it only depends on `a`, now blocks on `a` //! 3. `t1`: Iterates on `a`, finalizes the memo use crate::sync::thread; use salsa::CycleRecoveryAction; use crate::setup::{Knobs, KnobsDatabase}; #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, salsa::Update)] struct CycleValue(u32); const MIN: CycleValue = CycleValue(0); const MAX: CycleValue = CycleValue(1); #[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=cycle_initial)] fn query_a(db: &dyn KnobsDatabase) -> CycleValue { query_b(db) } #[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=cycle_initial)] fn query_b(db: &dyn KnobsDatabase) -> CycleValue { // Wait for thread 2 to have entered `query_c`. tracing::debug!("Wait for signal 1 from thread 2"); db.wait_for(1); // Unblock query_c on thread 2 db.signal(2); tracing::debug!("Signal 2 for thread 2"); let c_value = query_c(db); tracing::debug!("query_b: c = {:?}", c_value); let a_value = query_a(db); tracing::debug!("query_b: a = {:?}", a_value); CycleValue(a_value.0 + 1).min(MAX) } #[salsa::tracked(cycle_fn=cycle_fn, cycle_initial=cycle_initial)] fn query_c(db: &dyn KnobsDatabase) -> CycleValue { tracing::debug!("query_c: signaling thread1 to call c"); db.signal(1); tracing::debug!("query_c: waiting for signal"); // Wait for thread 1 to acquire the lock on query_b db.wait_for(1); let b = query_b(db); tracing::debug!("query_c: b = {:?}", b); b } fn cycle_fn( _db: &dyn KnobsDatabase, _value: &CycleValue, _count: u32, ) -> CycleRecoveryAction { CycleRecoveryAction::Iterate } fn cycle_initial(_db: &dyn KnobsDatabase) -> CycleValue { MIN } #[test_log::test] fn the_test() { crate::sync::check(|| { let db_t1 = Knobs::default(); let db_t2 = db_t1.clone(); let t1 = thread::spawn(move || { let _span = tracing::debug_span!("t1", thread_id = ?thread::current().id()).entered(); query_a(&db_t1) }); let t2 = thread::spawn(move || { let _span = tracing::debug_span!("t2", thread_id = ?thread::current().id()).entered(); query_c(&db_t2) }); let (r_t1, r_t2) = (t1.join().unwrap(), t2.join().unwrap()); assert_eq!((r_t1, r_t2), (MAX, MAX)); }); } salsa-0.23.0/tests/parallel/main.rs000064400000000000000000000013461046102023000152450ustar 00000000000000mod setup; mod signal; mod cycle_a_t1_b_t2; mod cycle_a_t1_b_t2_fallback; mod cycle_ab_peeping_c; mod cycle_nested_deep; mod cycle_nested_deep_conditional; mod cycle_nested_three_threads; mod cycle_panic; mod cycle_provisional_depending_on_itself; mod parallel_cancellation; mod parallel_join; mod parallel_map; #[cfg(not(feature = "shuttle"))] pub(crate) mod sync { pub use std::sync::*; pub use std::thread; pub fn check(f: impl Fn() + Send + Sync + 'static) { f(); } } #[cfg(feature = "shuttle")] pub(crate) mod sync { pub use shuttle::sync::*; pub use shuttle::thread; pub fn check(f: impl Fn() + Send + Sync + 'static) { shuttle::check_pct(f, 1000, 50); } } pub(crate) use setup::*; salsa-0.23.0/tests/parallel/parallel_cancellation.rs000064400000000000000000000026611046102023000206320ustar 00000000000000// Shuttle doesn't like panics inside of its runtime. #![cfg(not(feature = "shuttle"))] //! Test for thread cancellation. use salsa::{Cancelled, Setter}; use crate::setup::{Knobs, KnobsDatabase}; #[salsa::input(debug)] struct MyInput { field: i32, } #[salsa::tracked] fn a1(db: &dyn KnobsDatabase, input: MyInput) -> MyInput { db.signal(1); db.wait_for(2); dummy(db, input) } #[salsa::tracked] fn dummy(_db: &dyn KnobsDatabase, _input: MyInput) -> MyInput { panic!("should never get here!") } // Cancellation signalling test // // The pattern is as follows. // // Thread A Thread B // -------- -------- // a1 // | wait for stage 1 // signal stage 1 set input, triggers cancellation // wait for stage 2 (blocks) triggering cancellation sends stage 2 // | // (unblocked) // dummy // panics #[test] fn execute() { let mut db = Knobs::default(); let input = MyInput::new(&db, 1); let thread_a = std::thread::spawn({ let db = db.clone(); move || a1(&db, input) }); db.signal_on_did_cancel(2); input.set_field(&mut db).to(2); // Assert thread A *should* was cancelled let cancelled = thread_a .join() .unwrap_err() .downcast::() .unwrap(); // and inspect the output expect_test::expect![[r#" PendingWrite "#]] .assert_debug_eq(&cancelled); } salsa-0.23.0/tests/parallel/parallel_join.rs000064400000000000000000000100031046102023000171220ustar 00000000000000#![cfg(all(feature = "rayon", not(feature = "shuttle")))] // test for rayon-like join interactions. use std::sync::{ atomic::{AtomicUsize, Ordering}, Arc, }; use salsa::{Cancelled, Database, Setter, Storage}; use crate::signal::Signal; #[salsa::input] struct ParallelInput { a: u32, b: u32, } #[salsa::tracked] fn tracked_fn(db: &dyn salsa::Database, input: ParallelInput) -> (u32, u32) { salsa::join(db, |db| input.a(db) + 1, |db| input.b(db) - 1) } #[salsa::tracked] fn a1(db: &dyn KnobsDatabase, input: ParallelInput) -> (u32, u32) { db.signal(1); salsa::join( db, |db| { db.wait_for(2); input.a(db) + dummy(db) }, |db| { db.wait_for(2); input.b(db) + dummy(db) }, ) } #[salsa::tracked] fn dummy(_db: &dyn KnobsDatabase) -> u32 { panic!("should never get here!") } #[test] #[cfg_attr(miri, ignore)] fn execute() { let db = salsa::DatabaseImpl::new(); let input = ParallelInput::new(&db, 10, 20); tracked_fn(&db, input); } // we expect this to panic, as `salsa::par_map` needs to be called from a query. #[test] #[cfg_attr(miri, ignore)] #[should_panic] fn direct_calls_panic() { let db = salsa::DatabaseImpl::new(); let input = ParallelInput::new(&db, 10, 20); let (_, _) = salsa::join(&db, |db| input.a(db) + 1, |db| input.b(db) - 1); } // Cancellation signalling test // // The pattern is as follows. // // Thread A Thread B // -------- -------- // a1 // | wait for stage 1 // signal stage 1 set input, triggers cancellation // wait for stage 2 (blocks) triggering cancellation sends stage 2 // | // (unblocked) // dummy // panics #[test] #[cfg_attr(miri, ignore)] fn execute_cancellation() { let mut db = Knobs::default(); let input = ParallelInput::new(&db, 10, 20); let thread_a = std::thread::spawn({ let db = db.clone(); move || a1(&db, input) }); db.signal_on_did_cancel(2); input.set_a(&mut db).to(30); // Assert thread A was cancelled let cancelled = thread_a .join() .unwrap_err() .downcast::() .unwrap(); // and inspect the output expect_test::expect![[r#" PendingWrite "#]] .assert_debug_eq(&cancelled); } #[salsa::db] trait KnobsDatabase: Database { fn signal(&self, stage: usize); fn wait_for(&self, stage: usize); } /// A copy of `tests\parallel\setup.rs` that does not assert, as the assert is incorrect for the /// purposes of this test. #[salsa::db] struct Knobs { storage: salsa::Storage, signal: Arc, signal_on_did_cancel: Arc, } impl Knobs { pub fn signal_on_did_cancel(&self, stage: usize) { self.signal_on_did_cancel.store(stage, Ordering::Release); } } impl Clone for Knobs { #[track_caller] fn clone(&self) -> Self { Self { storage: self.storage.clone(), signal: self.signal.clone(), signal_on_did_cancel: self.signal_on_did_cancel.clone(), } } } impl Default for Knobs { fn default() -> Self { let signal = >::default(); let signal_on_did_cancel = Arc::new(AtomicUsize::new(0)); Self { storage: Storage::new(Some(Box::new({ let signal = signal.clone(); let signal_on_did_cancel = signal_on_did_cancel.clone(); move |event| { if let salsa::EventKind::DidSetCancellationFlag = event.kind { signal.signal(signal_on_did_cancel.load(Ordering::Acquire)); } } }))), signal, signal_on_did_cancel, } } } #[salsa::db] impl salsa::Database for Knobs {} #[salsa::db] impl KnobsDatabase for Knobs { fn signal(&self, stage: usize) { self.signal.signal(stage); } fn wait_for(&self, stage: usize) { self.signal.wait_for(stage); } } salsa-0.23.0/tests/parallel/parallel_map.rs000064400000000000000000000045701046102023000167540ustar 00000000000000#![cfg(all(feature = "rayon", not(feature = "shuttle")))] // test for rayon-like parallel map interactions. use salsa::{Cancelled, Setter}; use crate::setup::{Knobs, KnobsDatabase}; #[salsa::input] struct ParallelInput { field: Vec, } #[salsa::tracked] fn tracked_fn(db: &dyn salsa::Database, input: ParallelInput) -> Vec { salsa::par_map(db, input.field(db), |_db, field| field + 1) } #[salsa::tracked] fn a1(db: &dyn KnobsDatabase, input: ParallelInput) -> Vec { db.signal(1); salsa::par_map(db, input.field(db), |db, field| { db.wait_for(2); field + dummy(db) }) } #[salsa::tracked] fn dummy(_db: &dyn KnobsDatabase) -> u32 { panic!("should never get here!") } #[test] #[cfg_attr(miri, ignore)] fn execute() { let db = salsa::DatabaseImpl::new(); let counts = (1..=10).collect::>(); let input = ParallelInput::new(&db, counts); tracked_fn(&db, input); } // we expect this to panic, as `salsa::par_map` needs to be called from a query. #[test] #[cfg_attr(miri, ignore)] #[should_panic] fn direct_calls_panic() { let db = salsa::DatabaseImpl::new(); let counts = (1..=10).collect::>(); let input = ParallelInput::new(&db, counts); let _: Vec = salsa::par_map(&db, input.field(&db), |_db, field| field + 1); } // Cancellation signalling test // // The pattern is as follows. // // Thread A Thread B // -------- -------- // a1 // | wait for stage 1 // signal stage 1 set input, triggers cancellation // wait for stage 2 (blocks) triggering cancellation sends stage 2 // | // (unblocked) // dummy // panics #[test] #[cfg_attr(miri, ignore)] fn execute_cancellation() { let mut db = Knobs::default(); let counts = (1..=10).collect::>(); let input = ParallelInput::new(&db, counts); let thread_a = std::thread::spawn({ let db = db.clone(); move || a1(&db, input) }); let counts = (2..=20).collect::>(); db.signal_on_did_cancel(2); input.set_field(&mut db).to(counts); // Assert thread A *should* was cancelled let cancelled = thread_a .join() .unwrap_err() .downcast::() .unwrap(); // and inspect the output expect_test::expect![[r#" PendingWrite "#]] .assert_debug_eq(&cancelled); } salsa-0.23.0/tests/parallel/setup.rs000064400000000000000000000064051046102023000154620ustar 00000000000000#![allow(dead_code)] use salsa::{Database, Storage}; use super::signal::Signal; use super::sync::atomic::{AtomicUsize, Ordering}; use super::sync::Arc; /// Various "knobs" and utilities used by tests to force /// a certain behavior. #[salsa::db] pub(crate) trait KnobsDatabase: Database { /// Signal that we are entering stage `stage`. fn signal(&self, stage: usize); /// Wait until we reach stage `stage` (no-op if we have already reached that stage). fn wait_for(&self, stage: usize); } /// A database containing various "knobs" that can be used to customize how the queries /// behave on one specific thread. Note that this state is /// intentionally thread-local (apart from `signal`). #[salsa::db] pub(crate) struct Knobs { storage: salsa::Storage, /// A kind of flexible barrier used to coordinate execution across /// threads to ensure we reach various weird states. pub(crate) signal: Arc, /// When this database is about to block, send this signal. signal_on_will_block: Arc, /// When this database has set the cancellation flag, send this signal. signal_on_did_cancel: Arc, } impl Knobs { pub fn signal_on_did_cancel(&self, stage: usize) { self.signal_on_did_cancel.store(stage, Ordering::Release); } pub fn signal_on_will_block(&self, stage: usize) { self.signal_on_will_block.store(stage, Ordering::Release); } } impl Clone for Knobs { #[track_caller] fn clone(&self) -> Self { // To avoid mistakes, check that when we clone, we haven't customized this behavior yet assert_eq!(self.signal_on_will_block.load(Ordering::Acquire), 0); assert_eq!(self.signal_on_did_cancel.load(Ordering::Acquire), 0); Self { storage: self.storage.clone(), signal: self.signal.clone(), signal_on_will_block: self.signal_on_will_block.clone(), signal_on_did_cancel: self.signal_on_did_cancel.clone(), } } } impl Default for Knobs { fn default() -> Self { let signal = >::default(); let signal_on_will_block = Arc::new(AtomicUsize::new(0)); let signal_on_did_cancel = Arc::new(AtomicUsize::new(0)); Self { storage: Storage::new(Some(Box::new({ let signal = signal.clone(); let signal_on_will_block = signal_on_will_block.clone(); let signal_on_did_cancel = signal_on_did_cancel.clone(); move |event| match event.kind { salsa::EventKind::WillBlockOn { .. } => { signal.signal(signal_on_will_block.load(Ordering::Acquire)); } salsa::EventKind::DidSetCancellationFlag => { signal.signal(signal_on_did_cancel.load(Ordering::Acquire)); } _ => {} } }))), signal, signal_on_will_block, signal_on_did_cancel, } } } #[salsa::db] impl salsa::Database for Knobs {} #[salsa::db] impl KnobsDatabase for Knobs { fn signal(&self, stage: usize) { self.signal.signal(stage); } fn wait_for(&self, stage: usize) { self.signal.wait_for(stage); } } salsa-0.23.0/tests/parallel/signal.rs000064400000000000000000000026231046102023000155750ustar 00000000000000#![allow(unused)] use super::sync::{Condvar, Mutex}; #[derive(Default)] pub(crate) struct Signal { value: Mutex, cond_var: Condvar, } impl Signal { pub(crate) fn signal(&self, stage: usize) { // When running with shuttle we want to explore as many possible // executions, so we avoid signals entirely. #[cfg(not(feature = "shuttle"))] { // This check avoids acquiring the lock for things that will // clearly be a no-op. Not *necessary* but helps to ensure we // are more likely to encounter weird race conditions; // otherwise calls to `sum` will tend to be unnecessarily // synchronous. if stage > 0 { let mut v = self.value.lock().unwrap(); if stage > *v { *v = stage; self.cond_var.notify_all(); } } } } /// Waits until the given condition is true; the fn is invoked /// with the current stage. pub(crate) fn wait_for(&self, stage: usize) { #[cfg(not(feature = "shuttle"))] { // As above, avoid lock if clearly a no-op. if stage > 0 { let mut v = self.value.lock().unwrap(); while *v < stage { v = self.cond_var.wait(v).unwrap(); } } } } } salsa-0.23.0/tests/preverify-struct-with-leaked-data-2.rs000064400000000000000000000062341046102023000213250ustar 00000000000000//! Test that a `tracked` fn on a `salsa::input` //! compiles and executes successfully. use std::cell::Cell; use common::LogDatabase; use expect_test::expect; mod common; use salsa::{Database, Setter}; use test_log::test; thread_local! { static COUNTER: Cell = const { Cell::new(0) }; } #[salsa::input] struct MyInput { field1: u32, field2: u32, } #[salsa::tracked] struct MyTracked<'db> { #[tracked] counter: usize, } #[salsa::tracked] fn function(db: &dyn Database, input: MyInput) -> (usize, usize) { // Read input 1 let _field1 = input.field1(db); // **BAD:** Leak in the value of the counter non-deterministically let counter = COUNTER.with(|c| c.get()); // Create the tracked struct, which (from salsa's POV), only depends on field1; // but which actually depends on the leaked value. let tracked = MyTracked::new(db, counter); // Read the tracked field let result = counter_field(db, input, tracked); // Read input 2. This will cause us to re-execute on revision 2. let _field2 = input.field2(db); (result, tracked.counter(db)) } #[salsa::tracked] fn counter_field<'db>(db: &'db dyn Database, input: MyInput, tracked: MyTracked<'db>) -> usize { // Read input 2. This will cause us to re-execute on revision 2. let _field2 = input.field2(db); tracked.counter(db) } #[test] fn test_leaked_inputs_ignored() { let mut db = common::EventLoggerDatabase::default(); let input = MyInput::new(&db, 10, 20); let result_in_rev_1 = function(&db, input); db.assert_logs(expect![[r#" [ "WillCheckCancellation", "WillExecute { database_key: function(Id(0)) }", "DidInternValue { key: counter_field::interned_arguments(Id(800)), revision: R1 }", "WillCheckCancellation", "WillExecute { database_key: counter_field(Id(800)) }", ]"#]]); assert_eq!(result_in_rev_1, (0, 0)); // Modify field2 so that `function` is seen to have changed -- // but only *after* the tracked struct is created. input.set_field2(&mut db).to(30); // Also modify the thread-local counter COUNTER.with(|c| c.set(100)); let result_in_rev_2 = function(&db, input); db.assert_logs(expect![[r#" [ "DidSetCancellationFlag", "WillCheckCancellation", "DidValidateInternedValue { key: counter_field::interned_arguments(Id(800)), revision: R2 }", "WillCheckCancellation", "WillExecute { database_key: counter_field(Id(800)) }", "WillExecute { database_key: function(Id(0)) }", "WillCheckCancellation", ]"#]]); // Salsa will re-execute `counter_field` before re-executing // `function` since, from what it can see, no inputs have changed // before `counter_field` is called. This will read the field of // the tracked struct which means it will be *fixed* at `0`. // When we re-execute `counter_field` later, we ignore the new // value of 100 since the struct has already been read during // this revision. // // Contrast with preverify-struct-with-leaked-data.rs. assert_eq!(result_in_rev_2, (0, 0)); } salsa-0.23.0/tests/preverify-struct-with-leaked-data.rs000064400000000000000000000052621046102023000211660ustar 00000000000000//! Test that a `tracked` fn on a `salsa::input` //! compiles and executes successfully. use std::cell::Cell; use common::LogDatabase; use expect_test::expect; mod common; use salsa::{Database, Setter}; use test_log::test; thread_local! { static COUNTER: Cell = const { Cell::new(0) }; } #[salsa::input] struct MyInput { field1: u32, field2: u32, } #[salsa::tracked] struct MyTracked<'db> { #[tracked] counter: usize, } #[salsa::tracked] fn function(db: &dyn Database, input: MyInput) -> (usize, usize) { // Read input 1 let _field1 = input.field1(db); // **BAD:** Leak in the value of the counter non-deterministically let counter = COUNTER.with(|c| c.get()); // Create the tracked struct, which (from salsa's POV), only depends on field1; // but which actually depends on the leaked value. let tracked = MyTracked::new(db, counter); // Read the tracked field let result = counter_field(db, tracked); // Read input 2. This will cause us to re-execute on revision 2. let _field2 = input.field2(db); (result, tracked.counter(db)) } #[salsa::tracked] fn counter_field<'db>(db: &'db dyn Database, tracked: MyTracked<'db>) -> usize { tracked.counter(db) } #[test] fn test_leaked_inputs_ignored() { let mut db = common::EventLoggerDatabase::default(); let input = MyInput::new(&db, 10, 20); let result_in_rev_1 = function(&db, input); db.assert_logs(expect![[r#" [ "WillCheckCancellation", "WillExecute { database_key: function(Id(0)) }", "WillCheckCancellation", "WillExecute { database_key: counter_field(Id(400)) }", ]"#]]); assert_eq!(result_in_rev_1, (0, 0)); // Modify field2 so that `function` is seen to have changed -- // but only *after* the tracked struct is created. input.set_field2(&mut db).to(30); // Also modify the thread-local counter COUNTER.with(|c| c.set(100)); let result_in_rev_2 = function(&db, input); db.assert_logs(expect![[r#" [ "DidSetCancellationFlag", "WillCheckCancellation", "WillCheckCancellation", "DidValidateMemoizedValue { database_key: counter_field(Id(400)) }", "WillExecute { database_key: function(Id(0)) }", "WillCheckCancellation", ]"#]]); // Because salsa does not see any way for the tracked // struct to have changed, it will re-use the cached return value // from `counter_field` (`0`). This in turn "locks" the cached // struct so that the new value of 100 is ignored. // // Contrast with preverify-struct-with-leaked-data-2.rs. assert_eq!(result_in_rev_2, (0, 0)); } salsa-0.23.0/tests/return_mode.rs000064400000000000000000000072431046102023000150520ustar 00000000000000use salsa::Database; #[salsa::input] struct DefaultInput { text: String, } #[salsa::tracked] fn default_fn(db: &dyn Database, input: DefaultInput) -> String { let input: String = input.text(db); input } #[test] fn default_test() { salsa::DatabaseImpl::new().attach(|db| { let input = DefaultInput::new(db, "Test".into()); let x: String = default_fn(db, input); expect_test::expect![[r#" "Test" "#]] .assert_debug_eq(&x); }) } #[salsa::input] struct CopyInput { #[returns(copy)] text: &'static str, } #[salsa::tracked(returns(copy))] fn copy_fn(db: &dyn Database, input: CopyInput) -> &'static str { let input: &'static str = input.text(db); input } #[test] fn copy_test() { salsa::DatabaseImpl::new().attach(|db| { let input = CopyInput::new(db, "Test"); let x: &str = copy_fn(db, input); expect_test::expect![[r#" "Test" "#]] .assert_debug_eq(&x); }) } #[salsa::input] struct CloneInput { #[returns(clone)] text: String, } #[salsa::tracked(returns(clone))] fn clone_fn(db: &dyn Database, input: CloneInput) -> String { let input: String = input.text(db); input } #[test] fn clone_test() { salsa::DatabaseImpl::new().attach(|db| { let input = CloneInput::new(db, "Test".into()); let x: String = clone_fn(db, input); expect_test::expect![[r#" "Test" "#]] .assert_debug_eq(&x); }) } #[salsa::input] struct RefInput { #[returns(ref)] text: String, } #[salsa::tracked(returns(ref))] fn ref_fn(db: &dyn Database, input: RefInput) -> String { let input: &String = input.text(db); input.to_owned() } #[test] fn ref_test() { salsa::DatabaseImpl::new().attach(|db| { let input = RefInput::new(db, "Test".into()); let x: &String = ref_fn(db, input); expect_test::expect![[r#" "Test" "#]] .assert_debug_eq(&x); }) } #[salsa::input] struct DerefInput { #[returns(deref)] text: String, } #[salsa::tracked(returns(deref))] fn deref_fn(db: &dyn Database, input: DerefInput) -> String { let input: &str = input.text(db); input.to_owned() } #[test] fn deref_test() { salsa::DatabaseImpl::new().attach(|db| { let input = DerefInput::new(db, "Test".into()); let x: &str = deref_fn(db, input); expect_test::expect![[r#" "Test" "#]] .assert_debug_eq(&x); }) } #[salsa::input] struct AsRefInput { #[returns(as_ref)] text: Option, } #[salsa::tracked(returns(as_ref))] fn as_ref_fn(db: &dyn Database, input: AsRefInput) -> Option { let input: Option<&String> = input.text(db); input.cloned() } #[test] fn as_ref_test() { salsa::DatabaseImpl::new().attach(|db| { let input = AsRefInput::new(db, Some("Test".into())); let x: Option<&String> = as_ref_fn(db, input); expect_test::expect![[r#" Some( "Test", ) "#]] .assert_debug_eq(&x); }) } #[salsa::input] struct AsDerefInput { #[returns(as_deref)] text: Option, } #[salsa::tracked(returns(as_deref))] fn as_deref_fn(db: &dyn Database, input: AsDerefInput) -> Option { let input: Option<&str> = input.text(db); input.map(|s| s.to_owned()) } #[test] fn as_deref_test() { salsa::DatabaseImpl::new().attach(|db| { let input = AsDerefInput::new(db, Some("Test".into())); let x: Option<&str> = as_deref_fn(db, input); expect_test::expect![[r#" Some( "Test", ) "#]] .assert_debug_eq(&x); }) } salsa-0.23.0/tests/singleton.rs000064400000000000000000000021201046102023000145160ustar 00000000000000//! Basic Singleton struct test: //! //! Singleton structs are created only once. Subsequent `get`s and `new`s after creation return the same `Id`. use expect_test::expect; use salsa::Database as _; use test_log::test; #[salsa::input(singleton, debug)] struct MyInput { field: u32, id_field: u16, } #[test] fn basic() { let db = salsa::DatabaseImpl::new(); let input1 = MyInput::new(&db, 3, 4); let input2 = MyInput::get(&db); assert_eq!(input1, input2); let input3 = MyInput::try_get(&db); assert_eq!(Some(input1), input3); } #[test] #[should_panic] fn twice() { let db = salsa::DatabaseImpl::new(); let input1 = MyInput::new(&db, 3, 4); let input2 = MyInput::get(&db); assert_eq!(input1, input2); // should panic here _ = MyInput::new(&db, 3, 5); } #[test] fn debug() { salsa::DatabaseImpl::new().attach(|db| { let input = MyInput::new(db, 3, 4); let actual = format!("{input:?}"); let expected = expect!["MyInput { [salsa id]: Id(0), field: 3, id_field: 4 }"]; expected.assert_eq(&actual); }); } salsa-0.23.0/tests/specify-only-works-if-the-key-is-created-in-the-current-query.rs000064400000000000000000000021531046102023000262710ustar 00000000000000//! Test that `specify` only works if the key is a tracked struct created in the current query. //! compilation succeeds but execution panics #![allow(warnings)] #[salsa::input] struct MyInput { field: u32, } #[salsa::tracked] struct MyTracked<'db> { field: u32, } #[salsa::tracked] fn tracked_struct_created_in_another_query<'db>( db: &'db dyn salsa::Database, input: MyInput, ) -> MyTracked<'db> { MyTracked::new(db, input.field(db) * 2) } #[salsa::tracked] fn tracked_fn<'db>(db: &'db dyn salsa::Database, input: MyInput) -> MyTracked<'db> { let t = tracked_struct_created_in_another_query(db, input); if input.field(db) != 0 { tracked_fn_extra::specify(db, t, 2222); } t } #[salsa::tracked(specify)] fn tracked_fn_extra<'db>(_db: &'db dyn salsa::Database, _input: MyTracked<'db>) -> u32 { 0 } #[test] #[should_panic( expected = "can only use `specify` on salsa structs created during the current tracked fn" )] fn execute_when_specified() { let mut db = salsa::DatabaseImpl::new(); let input = MyInput::new(&db, 22); let tracked = tracked_fn(&db, input); } salsa-0.23.0/tests/synthetic_write.rs000064400000000000000000000017621046102023000157530ustar 00000000000000//! Test that a constant `tracked` fn (has no inputs) //! compiles and executes successfully. #![allow(warnings)] mod common; use common::{LogDatabase, Logger}; use expect_test::expect; use salsa::{Database, DatabaseImpl, Durability, Event, EventKind}; #[salsa::input] struct MyInput { field: u32, } #[salsa::tracked] fn tracked_fn(db: &dyn Database, input: MyInput) -> u32 { input.field(db) * 2 } #[test] fn execute() { let mut db = common::ExecuteValidateLoggerDatabase::default(); let input = MyInput::new(&db, 22); assert_eq!(tracked_fn(&db, input), 44); db.assert_logs(expect![[r#" [ "salsa_event(WillExecute { database_key: tracked_fn(Id(0)) })", ]"#]]); // Bumps the revision db.synthetic_write(Durability::LOW); // Query should re-run assert_eq!(tracked_fn(&db, input), 44); db.assert_logs(expect![[r#" [ "salsa_event(DidValidateMemoizedValue { database_key: tracked_fn(Id(0)) })", ]"#]]); } salsa-0.23.0/tests/tracked-struct-id-field-bad-eq.rs000064400000000000000000000016441046102023000202670ustar 00000000000000//! Test an id field whose `PartialEq` impl is always true. use salsa::{Database, Setter}; use test_log::test; #[salsa::input] struct MyInput { field: bool, } #[allow(clippy::derived_hash_with_manual_eq)] #[derive(Eq, Hash, Debug, Clone)] struct BadEq { field: bool, } impl PartialEq for BadEq { fn eq(&self, _other: &Self) -> bool { true } } impl From for BadEq { fn from(value: bool) -> Self { Self { field: value } } } #[salsa::tracked] struct MyTracked<'db> { field: BadEq, } #[salsa::tracked] fn the_fn(db: &dyn Database, input: MyInput) { let tracked0 = MyTracked::new(db, BadEq::from(input.field(db))); assert_eq!(tracked0.field(db).field, input.field(db)); } #[test] fn execute() { let mut db = salsa::DatabaseImpl::new(); let input = MyInput::new(&db, true); the_fn(&db, input); input.set_field(&mut db).to(false); the_fn(&db, input); } salsa-0.23.0/tests/tracked-struct-id-field-bad-hash.rs000064400000000000000000000042461046102023000206060ustar 00000000000000//! Test for a tracked struct where an untracked field has a //! very poorly chosen hash impl (always returns 0). //! //! This demonstrates that tracked struct ids will always change if //! untracked fields on a struct change values, because although struct //! ids are based on the *hash* of the untracked fields, ids are generational //! based on the field values. use salsa::{Database as Db, Setter}; use test_log::test; #[salsa::input] struct MyInput { field: u64, } #[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone)] struct BadHash { field: u64, } impl From for BadHash { fn from(value: u64) -> Self { Self { field: value } } } impl std::hash::Hash for BadHash { fn hash(&self, state: &mut H) { state.write_i16(0); } } #[salsa::tracked] struct MyTracked<'db> { field: BadHash, } #[salsa::tracked] fn the_fn(db: &dyn Db, input: MyInput) { let tracked0 = MyTracked::new(db, BadHash::from(input.field(db))); assert_eq!(tracked0.field(db).field, input.field(db)); } #[test] fn execute() { let mut db = salsa::DatabaseImpl::new(); let input = MyInput::new(&db, 1); the_fn(&db, input); input.set_field(&mut db).to(0); the_fn(&db, input); } #[salsa::tracked] fn create_tracked<'db>(db: &'db dyn Db, input: MyInput) -> MyTracked<'db> { MyTracked::new(db, BadHash::from(input.field(db))) } #[salsa::tracked] fn with_tracked<'db>(db: &'db dyn Db, tracked: MyTracked<'db>) -> u64 { tracked.field(db).field } #[test] fn dependent_query() { let mut db = salsa::DatabaseImpl::new(); let input = MyInput::new(&db, 1); let tracked = create_tracked(&db, input); assert_eq!(with_tracked(&db, tracked), 1); input.set_field(&mut db).to(0); // We now re-run the query that creates the tracked struct. // // Salsa will re-use the `MyTracked` struct from the previous revision, // but practically it has been re-created due to generational ids. let tracked = create_tracked(&db, input); assert_eq!(with_tracked(&db, tracked), 0); input.set_field(&mut db).to(2); let tracked = create_tracked(&db, input); assert_eq!(with_tracked(&db, tracked), 2); } salsa-0.23.0/tests/tracked-struct-unchanged-in-new-rev.rs000064400000000000000000000013251046102023000214000ustar 00000000000000use salsa::{Database as Db, Setter}; use test_log::test; #[salsa::input] struct MyInput { field: u32, } #[salsa::tracked] struct MyTracked<'db> { field: u32, } #[salsa::tracked] fn tracked_fn(db: &dyn Db, input: MyInput) -> MyTracked<'_> { MyTracked::new(db, input.field(db) / 2) } #[test] fn execute() { let mut db = salsa::DatabaseImpl::new(); let input1 = MyInput::new(&db, 22); let input2 = MyInput::new(&db, 44); let _tracked1 = tracked_fn(&db, input1); let _tracked2 = tracked_fn(&db, input2); // modify the input and change the revision input1.set_field(&mut db).to(24); let tracked2 = tracked_fn(&db, input2); // this should not panic tracked2.field(&db); } salsa-0.23.0/tests/tracked-struct-value-field-bad-eq.rs000064400000000000000000000044051046102023000210050ustar 00000000000000//! Test a field whose `PartialEq` impl is always true. //! This can result in us getting different results than //! if we were to execute from scratch. use expect_test::expect; use salsa::{Database, Setter}; mod common; use common::LogDatabase; use test_log::test; #[salsa::input] struct MyInput { field: bool, } #[allow(clippy::derived_hash_with_manual_eq)] #[derive(Eq, Hash, Debug, Clone)] struct BadEq { field: bool, } impl PartialEq for BadEq { fn eq(&self, _other: &Self) -> bool { true } } impl From for BadEq { fn from(value: bool) -> Self { Self { field: value } } } #[salsa::tracked] struct MyTracked<'db> { #[tracked] field: BadEq, } #[salsa::tracked] fn the_fn(db: &dyn Database, input: MyInput) -> bool { let tracked = make_tracked_struct(db, input); read_tracked_struct(db, tracked) } #[salsa::tracked] fn make_tracked_struct(db: &dyn Database, input: MyInput) -> MyTracked<'_> { MyTracked::new(db, BadEq::from(input.field(db))) } #[salsa::tracked] fn read_tracked_struct<'db>(db: &'db dyn Database, tracked: MyTracked<'db>) -> bool { tracked.field(db).field } #[test] fn execute() { let mut db = common::ExecuteValidateLoggerDatabase::default(); let input = MyInput::new(&db, true); let result = the_fn(&db, input); assert!(result); db.assert_logs(expect![[r#" [ "salsa_event(WillExecute { database_key: the_fn(Id(0)) })", "salsa_event(WillExecute { database_key: make_tracked_struct(Id(0)) })", "salsa_event(WillExecute { database_key: read_tracked_struct(Id(400)) })", ]"#]]); // Update the input to `false` and re-execute. input.set_field(&mut db).to(false); let result = the_fn(&db, input); // If the `Eq` impl were working properly, we would // now return `false`. But because the `Eq` is considered // equal we re-use memoized results and so we get true. assert!(result); db.assert_logs(expect![[r#" [ "salsa_event(WillExecute { database_key: make_tracked_struct(Id(0)) })", "salsa_event(DidValidateMemoizedValue { database_key: read_tracked_struct(Id(400)) })", "salsa_event(DidValidateMemoizedValue { database_key: the_fn(Id(0)) })", ]"#]]); } salsa-0.23.0/tests/tracked-struct-value-field-not-eq.rs000064400000000000000000000016261046102023000210610ustar 00000000000000//! Test a field whose `PartialEq` impl is always true. //! This can our "last changed" data to be wrong //! but we *should* always reflect the final values. use salsa::{Database, Setter}; use test_log::test; #[salsa::input] struct MyInput { field: bool, } #[derive(Hash, Debug, Clone)] struct NotEq { field: bool, } impl From for NotEq { fn from(value: bool) -> Self { Self { field: value } } } #[salsa::tracked] struct MyTracked<'db> { #[tracked] #[no_eq] field: NotEq, } #[salsa::tracked] fn the_fn(db: &dyn Database, input: MyInput) { let tracked0 = MyTracked::new(db, NotEq::from(input.field(db))); assert_eq!(tracked0.field(db).field, input.field(db)); } #[test] fn execute() { let mut db = salsa::DatabaseImpl::new(); let input = MyInput::new(&db, true); the_fn(&db, input); input.set_field(&mut db).to(false); the_fn(&db, input); } salsa-0.23.0/tests/tracked_assoc_fn.rs000064400000000000000000000042151046102023000160130ustar 00000000000000//! Test that a `tracked` fn on a `salsa::input` //! compiles and executes successfully. #![allow(warnings)] use common::LogDatabase as _; use expect_test::expect; mod common; trait TrackedTrait<'db> { type Output; fn tracked_trait_fn(db: &'db dyn salsa::Database, input: MyInput) -> Self::Output; } #[salsa::input] struct MyInput { field: u32, } #[salsa::tracked] struct MyOutput<'db> { field: u32, } #[salsa::tracked] impl MyInput { #[salsa::tracked] fn tracked_fn(db: &dyn salsa::Database, input: MyInput) -> Self { Self::new(db, 2 * input.field(db)) } #[salsa::tracked(returns(ref))] fn tracked_fn_ref(db: &dyn salsa::Database, input: MyInput) -> Self { Self::new(db, 3 * input.field(db)) } } #[salsa::tracked] impl<'db> TrackedTrait<'db> for MyOutput<'db> { type Output = Self; #[salsa::tracked] fn tracked_trait_fn(db: &'db dyn salsa::Database, input: MyInput) -> Self::Output { Self::new(db, 4 * input.field(db)) } } // The self-type of a tracked impl doesn't have to be tracked itself: struct UntrackedHelper; #[salsa::tracked] impl<'db> TrackedTrait<'db> for UntrackedHelper { type Output = MyOutput<'db>; #[salsa::tracked] fn tracked_trait_fn(db: &'db dyn salsa::Database, input: MyInput) -> Self::Output { MyOutput::tracked_trait_fn(db, input) } } #[test] fn execute() { let mut db = salsa::DatabaseImpl::new(); let input = MyInput::new(&db, 22); let output = MyOutput::tracked_trait_fn(&db, input); let helper_output = UntrackedHelper::tracked_trait_fn(&db, input); // assert_eq!(object.tracked_fn(&db), 44); // assert_eq!(*object.tracked_fn_ref(&db), 66); assert_eq!(output.field(&db), 88); assert_eq!(helper_output.field(&db), 88); } #[test] fn debug_name() { let mut db = common::ExecuteValidateLoggerDatabase::default(); let input = MyInput::new(&db, 22); let output = MyOutput::tracked_trait_fn(&db, input); assert_eq!(output.field(&db), 88); db.assert_logs(expect![[r#" [ "salsa_event(WillExecute { database_key: MyOutput < 'db >::tracked_trait_fn_(Id(0)) })", ]"#]]); } salsa-0.23.0/tests/tracked_fn_constant.rs000064400000000000000000000010521046102023000165300ustar 00000000000000//! Test that a constant `tracked` fn (has no inputs) //! compiles and executes successfully. #![allow(warnings)] use crate::common::LogDatabase; mod common; #[salsa::tracked] fn tracked_fn(db: &dyn salsa::Database) -> u32 { 44 } #[salsa::tracked] fn tracked_custom_db(db: &dyn LogDatabase) -> u32 { 44 } #[test] fn execute() { let mut db = salsa::DatabaseImpl::new(); assert_eq!(tracked_fn(&db), 44); } #[test] fn execute_custom() { let mut db = common::LoggerDatabase::default(); assert_eq!(tracked_custom_db(&db), 44); } salsa-0.23.0/tests/tracked_fn_high_durability_dependency.rs000064400000000000000000000013731046102023000222520ustar 00000000000000#![allow(warnings)] use salsa::plumbing::HasStorage; use salsa::{Database, Durability, Setter}; mod common; #[salsa::input] struct MyInput { field: u32, } #[salsa::tracked] fn tracked_fn(db: &dyn salsa::Database, input: MyInput) -> u32 { input.field(db) * 2 } #[test] fn execute() { let mut db = salsa::DatabaseImpl::default(); let input_high = MyInput::new(&mut db, 0); input_high .set_field(&mut db) .with_durability(Durability::HIGH) .to(2200); assert_eq!(tracked_fn(&db, input_high), 4400); // Changing the value should re-execute the query input_high .set_field(&mut db) .with_durability(Durability::HIGH) .to(2201); assert_eq!(tracked_fn(&db, input_high), 4402); } salsa-0.23.0/tests/tracked_fn_interned_lifetime.rs000064400000000000000000000004321046102023000203660ustar 00000000000000#[salsa::interned] struct Interned<'db> { field: i32, } #[salsa::tracked] fn foo<'a>(_db: &'a dyn salsa::Database, _: Interned<'_>, _: Interned<'a>) {} #[test] fn the_test() { let db = salsa::DatabaseImpl::new(); let i = Interned::new(&db, 123); foo(&db, i, i); } salsa-0.23.0/tests/tracked_fn_multiple_args.rs000064400000000000000000000011061046102023000175460ustar 00000000000000//! Test that a `tracked` fn on multiple salsa struct args //! compiles and executes successfully. #[salsa::input] struct MyInput { field: u32, } #[salsa::interned] struct MyInterned<'db> { field: u32, } #[salsa::tracked] fn tracked_fn<'db>(db: &'db dyn salsa::Database, input: MyInput, interned: MyInterned<'db>) -> u32 { input.field(db) + interned.field(db) } #[test] fn execute() { let db = salsa::DatabaseImpl::new(); let input = MyInput::new(&db, 22); let interned = MyInterned::new(&db, 33); assert_eq!(tracked_fn(&db, input, interned), 55); } salsa-0.23.0/tests/tracked_fn_no_eq.rs000064400000000000000000000017511046102023000160060ustar 00000000000000mod common; use common::LogDatabase; use expect_test::expect; use salsa::Setter as _; #[salsa::input] struct Input { number: i16, } #[salsa::tracked(no_eq)] fn abs_float(db: &dyn LogDatabase, input: Input) -> f32 { let number = input.number(db); db.push_log(format!("abs_float({number})")); number.abs() as f32 } #[salsa::tracked] fn derived(db: &dyn LogDatabase, input: Input) -> u32 { let x = abs_float(db, input); db.push_log("derived".to_string()); x as u32 } #[test] fn invoke() { let mut db = common::LoggerDatabase::default(); let input = Input::new(&db, 5); let x = derived(&db, input); assert_eq!(x, 5); input.set_number(&mut db).to(-5); // Derived should re-execute even the result of `abs_float` is the same. let x = derived(&db, input); assert_eq!(x, 5); db.assert_logs(expect![[r#" [ "abs_float(5)", "derived", "abs_float(-5)", "derived", ]"#]]); } salsa-0.23.0/tests/tracked_fn_on_input.rs000064400000000000000000000006511046102023000165360ustar 00000000000000//! Test that a `tracked` fn on a `salsa::input` //! compiles and executes successfully. #![allow(warnings)] #[salsa::input] struct MyInput { field: u32, } #[salsa::tracked] fn tracked_fn(db: &dyn salsa::Database, input: MyInput) -> u32 { input.field(db) * 2 } #[test] fn execute() { let mut db = salsa::DatabaseImpl::new(); let input = MyInput::new(&db, 22); assert_eq!(tracked_fn(&db, input), 44); } salsa-0.23.0/tests/tracked_fn_on_input_with_high_durability.rs000064400000000000000000000034401046102023000230170ustar 00000000000000#![allow(warnings)] use common::{EventLoggerDatabase, HasLogger, LogDatabase, Logger}; use expect_test::expect; use salsa::plumbing::HasStorage; use salsa::{Database, Durability, Event, EventKind, Setter}; mod common; #[salsa::input] struct MyInput { field: u32, } #[salsa::tracked] fn tracked_fn(db: &dyn salsa::Database, input: MyInput) -> u32 { input.field(db) * 2 } #[test] fn execute() { let mut db = EventLoggerDatabase::default(); let input_low = MyInput::new(&db, 22); let input_high = MyInput::builder(2200).durability(Durability::HIGH).new(&db); assert_eq!(tracked_fn(&db, input_low), 44); assert_eq!(tracked_fn(&db, input_high), 4400); db.assert_logs(expect![[r#" [ "WillCheckCancellation", "WillExecute { database_key: tracked_fn(Id(0)) }", "WillCheckCancellation", "WillExecute { database_key: tracked_fn(Id(1)) }", ]"#]]); db.synthetic_write(Durability::LOW); assert_eq!(tracked_fn(&db, input_low), 44); assert_eq!(tracked_fn(&db, input_high), 4400); // FIXME: There's currently no good way to verify whether an input was validated using shallow or deep comparison. // All we can do for now is verify that the values were validated. // Note: It maybe confusing why it validates `input_high` when the write has `Durability::LOW`. // This is because all values must be validated whenever a write occurs. It doesn't mean that it // executed the query. db.assert_logs(expect![[r#" [ "DidSetCancellationFlag", "WillCheckCancellation", "DidValidateMemoizedValue { database_key: tracked_fn(Id(0)) }", "WillCheckCancellation", "DidValidateMemoizedValue { database_key: tracked_fn(Id(1)) }", ]"#]]); } salsa-0.23.0/tests/tracked_fn_on_interned.rs000064400000000000000000000006741046102023000172140ustar 00000000000000//! Test that a `tracked` fn on a `salsa::interned` //! compiles and executes successfully. #[salsa::interned] struct Name<'db> { name: String, } #[salsa::tracked] fn tracked_fn<'db>(db: &'db dyn salsa::Database, name: Name<'db>) -> String { name.name(db).clone() } #[test] fn execute() { let db = salsa::DatabaseImpl::new(); let name = Name::new(&db, "Salsa".to_string()); assert_eq!(tracked_fn(&db, name), "Salsa"); } salsa-0.23.0/tests/tracked_fn_on_interned_enum.rs000064400000000000000000000047751046102023000202460ustar 00000000000000//! Test that a `tracked` fn on a `salsa::interned` //! compiles and executes successfully. #[salsa::interned(no_lifetime, debug)] struct Name { name: String, } #[salsa::interned(debug)] struct NameAndAge<'db> { name_and_age: String, } #[salsa::interned(no_lifetime, debug)] struct Age { age: u32, } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, salsa::Supertype)] enum Enum<'db> { Name(Name), NameAndAge(NameAndAge<'db>), Age(Age), } #[salsa::input(debug)] struct Input { value: String, } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, salsa::Supertype)] enum EnumOfEnum<'db> { Enum(Enum<'db>), Input(Input), } #[salsa::tracked] fn tracked_fn<'db>(db: &'db dyn salsa::Database, enum_: Enum<'db>) -> String { match enum_ { Enum::Name(name) => name.name(db), Enum::NameAndAge(name_and_age) => name_and_age.name_and_age(db), Enum::Age(age) => age.age(db).to_string(), } } #[salsa::tracked] fn tracked_fn2<'db>(db: &'db dyn salsa::Database, enum_: EnumOfEnum<'db>) -> String { match enum_ { EnumOfEnum::Enum(enum_) => tracked_fn(db, enum_), EnumOfEnum::Input(input) => input.value(db), } } #[test] fn execute() { let db = salsa::DatabaseImpl::new(); let name = Name::new(&db, "Salsa".to_string()); let name_and_age = NameAndAge::new(&db, "Salsa 3".to_string()); let age = Age::new(&db, 123); assert_eq!(tracked_fn(&db, Enum::Name(name)), "Salsa"); assert_eq!(tracked_fn(&db, Enum::NameAndAge(name_and_age)), "Salsa 3"); assert_eq!(tracked_fn(&db, Enum::Age(age)), "123"); assert_eq!(tracked_fn(&db, Enum::Name(name)), "Salsa"); assert_eq!(tracked_fn(&db, Enum::NameAndAge(name_and_age)), "Salsa 3"); assert_eq!(tracked_fn(&db, Enum::Age(age)), "123"); assert_eq!( tracked_fn2(&db, EnumOfEnum::Enum(Enum::Name(name))), "Salsa" ); assert_eq!( tracked_fn2(&db, EnumOfEnum::Enum(Enum::NameAndAge(name_and_age))), "Salsa 3" ); assert_eq!(tracked_fn2(&db, EnumOfEnum::Enum(Enum::Age(age))), "123"); assert_eq!( tracked_fn2(&db, EnumOfEnum::Enum(Enum::Name(name))), "Salsa" ); assert_eq!( tracked_fn2(&db, EnumOfEnum::Enum(Enum::NameAndAge(name_and_age))), "Salsa 3" ); assert_eq!(tracked_fn2(&db, EnumOfEnum::Enum(Enum::Age(age))), "123"); assert_eq!( tracked_fn2( &db, EnumOfEnum::Input(Input::new(&db, "Hello world!".to_string())) ), "Hello world!" ); } salsa-0.23.0/tests/tracked_fn_on_tracked.rs000064400000000000000000000007671046102023000170240ustar 00000000000000//! Test that a `tracked` fn on a `salsa::input` //! compiles and executes successfully. #[salsa::input] struct MyInput { field: u32, } #[salsa::tracked] struct MyTracked<'db> { field: u32, } #[salsa::tracked] fn tracked_fn(db: &dyn salsa::Database, input: MyInput) -> MyTracked<'_> { MyTracked::new(db, input.field(db) * 2) } #[test] fn execute() { let db = salsa::DatabaseImpl::new(); let input = MyInput::new(&db, 22); assert_eq!(tracked_fn(&db, input).field(&db), 44); } salsa-0.23.0/tests/tracked_fn_on_tracked_specify.rs000064400000000000000000000021351046102023000205350ustar 00000000000000//! Test that a `tracked` fn on a `salsa::input` //! compiles and executes successfully. #![allow(warnings)] #[salsa::input] struct MyInput { field: u32, } #[salsa::tracked] struct MyTracked<'db> { field: u32, } #[salsa::tracked] fn tracked_fn<'db>(db: &'db dyn salsa::Database, input: MyInput) -> MyTracked<'db> { let t = MyTracked::new(db, input.field(db) * 2); if input.field(db) != 0 { tracked_fn_extra::specify(db, t, 2222); } t } #[salsa::tracked(specify)] fn tracked_fn_extra<'db>(_db: &'db dyn salsa::Database, _input: MyTracked<'db>) -> u32 { 0 } #[test] fn execute_when_specified() { let mut db = salsa::DatabaseImpl::new(); let input = MyInput::new(&db, 22); let tracked = tracked_fn(&db, input); assert_eq!(tracked.field(&db), 44); assert_eq!(tracked_fn_extra(&db, tracked), 2222); } #[test] fn execute_when_not_specified() { let mut db = salsa::DatabaseImpl::new(); let input = MyInput::new(&db, 0); let tracked = tracked_fn(&db, input); assert_eq!(tracked.field(&db), 0); assert_eq!(tracked_fn_extra(&db, tracked), 0); } salsa-0.23.0/tests/tracked_fn_orphan_escape_hatch.rs000064400000000000000000000011061046102023000206550ustar 00000000000000//! Test that a `tracked` fn on a `salsa::input` //! compiles and executes successfully. #![allow(warnings)] use std::marker::PhantomData; #[salsa::input] struct MyInput { field: u32, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] struct NotUpdate<'a>(PhantomData &'a ()>); #[salsa::tracked(unsafe(non_update_return_type))] fn tracked_fn(db: &dyn salsa::Database, input: MyInput) -> NotUpdate<'_> { NotUpdate(PhantomData) } #[test] fn execute() { let mut db = salsa::DatabaseImpl::new(); let input = MyInput::new(&db, 22); tracked_fn(&db, input); } salsa-0.23.0/tests/tracked_fn_read_own_entity.rs000064400000000000000000000050751046102023000201020ustar 00000000000000//! Test that a `tracked` fn on a `salsa::input` //! compiles and executes successfully. use expect_test::expect; mod common; use common::LogDatabase; use salsa::Setter; use test_log::test; #[salsa::input(debug)] struct MyInput { field: u32, } #[salsa::tracked] fn final_result(db: &dyn LogDatabase, input: MyInput) -> u32 { db.push_log(format!("final_result({input:?})")); intermediate_result(db, input).field(db) * 2 } #[salsa::tracked] struct MyTracked<'db> { field: u32, } #[salsa::tracked] fn intermediate_result(db: &dyn LogDatabase, input: MyInput) -> MyTracked<'_> { db.push_log(format!("intermediate_result({input:?})")); let tracked = MyTracked::new(db, input.field(db) / 2); let _ = tracked.field(db); // read the field of an entity we created tracked } #[test] fn one_entity() { let mut db = common::LoggerDatabase::default(); let input = MyInput::new(&db, 22); assert_eq!(final_result(&db, input), 22); db.assert_logs(expect![[r#" [ "final_result(MyInput { [salsa id]: Id(0), field: 22 })", "intermediate_result(MyInput { [salsa id]: Id(0), field: 22 })", ]"#]]); // Intermediate result is the same, so final result does // not need to be recomputed: input.set_field(&mut db).to(23); assert_eq!(final_result(&db, input), 22); db.assert_logs(expect![[r#" [ "intermediate_result(MyInput { [salsa id]: Id(0), field: 23 })", ]"#]]); input.set_field(&mut db).to(24); assert_eq!(final_result(&db, input), 24); db.assert_logs(expect![[r#" [ "intermediate_result(MyInput { [salsa id]: Id(0), field: 24 })", "final_result(MyInput { [salsa id]: Id(0), field: 24 })", ]"#]]); } /// Create and mutate a distinct input. No re-execution required. #[test] fn red_herring() { let mut db = common::LoggerDatabase::default(); let input = MyInput::new(&db, 22); assert_eq!(final_result(&db, input), 22); db.assert_logs(expect![[r#" [ "final_result(MyInput { [salsa id]: Id(0), field: 22 })", "intermediate_result(MyInput { [salsa id]: Id(0), field: 22 })", ]"#]]); // Create a distinct input and mutate it. // This will trigger a new revision in the database // but shouldn't actually invalidate our existing ones. let input2 = MyInput::new(&db, 44); input2.set_field(&mut db).to(66); // Re-run the query on the original input. Nothing re-executes! assert_eq!(final_result(&db, input), 22); db.assert_logs(expect![[r#" []"#]]); } salsa-0.23.0/tests/tracked_fn_read_own_specify.rs000064400000000000000000000023461046102023000202260ustar 00000000000000use expect_test::expect; mod common; use common::LogDatabase; use salsa::Database; #[salsa::input(debug)] struct MyInput { field: u32, } #[salsa::tracked(debug)] struct MyTracked<'db> { field: u32, } #[salsa::tracked] fn tracked_fn(db: &dyn LogDatabase, input: MyInput) -> u32 { db.push_log(format!("tracked_fn({input:?})")); let t = MyTracked::new(db, input.field(db) * 2); tracked_fn_extra::specify(db, t, 2222); tracked_fn_extra(db, t) } #[salsa::tracked(specify)] fn tracked_fn_extra<'db>(db: &'db dyn LogDatabase, input: MyTracked<'db>) -> u32 { db.push_log(format!("tracked_fn_extra({input:?})")); 0 } #[test] fn execute() { let mut db = common::LoggerDatabase::default(); let input = MyInput::new(&db, 22); assert_eq!(tracked_fn(&db, input), 2222); db.assert_logs(expect![[r#" [ "tracked_fn(MyInput { [salsa id]: Id(0), field: 22 })", ]"#]]); // A "synthetic write" causes the system to act *as though* some // input of durability `durability` has changed. db.synthetic_write(salsa::Durability::LOW); // Re-run the query on the original input. Nothing re-executes! assert_eq!(tracked_fn(&db, input), 2222); db.assert_logs(expect!["[]"]); } salsa-0.23.0/tests/tracked_fn_return_ref.rs000064400000000000000000000011101046102023000170450ustar 00000000000000use salsa::Database; #[salsa::input] struct Input { number: usize, } #[salsa::tracked(returns(ref))] fn test(db: &dyn salsa::Database, input: Input) -> Vec { (0..input.number(db)).map(|i| format!("test {i}")).collect() } #[test] fn invoke() { salsa::DatabaseImpl::new().attach(|db| { let input = Input::new(db, 3); let x: &Vec = test(db, input); expect_test::expect![[r#" [ "test 0", "test 1", "test 2", ] "#]] .assert_debug_eq(x); }) } salsa-0.23.0/tests/tracked_method.rs000064400000000000000000000025431046102023000155020ustar 00000000000000//! Test that a `tracked` fn on a `salsa::input` //! compiles and executes successfully. #![allow(warnings)] use common::LogDatabase as _; use expect_test::expect; mod common; trait TrackedTrait { fn tracked_trait_fn(self, db: &dyn salsa::Database) -> u32; } #[salsa::input] struct MyInput { field: u32, } #[salsa::tracked] impl MyInput { #[salsa::tracked] fn tracked_fn(self, db: &dyn salsa::Database) -> u32 { self.field(db) * 2 } #[salsa::tracked(returns(ref))] fn tracked_fn_ref(self, db: &dyn salsa::Database) -> u32 { self.field(db) * 3 } } #[salsa::tracked] impl TrackedTrait for MyInput { #[salsa::tracked] fn tracked_trait_fn(self, db: &dyn salsa::Database) -> u32 { self.field(db) * 4 } } #[test] fn execute() { let mut db = salsa::DatabaseImpl::new(); let object = MyInput::new(&mut db, 22); // assert_eq!(object.tracked_fn(&db), 44); // assert_eq!(*object.tracked_fn_ref(&db), 66); assert_eq!(object.tracked_trait_fn(&db), 88); } #[test] fn debug_name() { let mut db = common::ExecuteValidateLoggerDatabase::default(); let object = MyInput::new(&mut db, 22); assert_eq!(object.tracked_trait_fn(&db), 88); db.assert_logs(expect![[r#" [ "salsa_event(WillExecute { database_key: MyInput::tracked_trait_fn_(Id(0)) })", ]"#]]); } salsa-0.23.0/tests/tracked_method_inherent_return_deref.rs000064400000000000000000000011651046102023000221410ustar 00000000000000use salsa::Database; #[salsa::input] struct Input { number: usize, } #[salsa::tracked] impl Input { #[salsa::tracked(returns(deref))] fn test(self, db: &dyn salsa::Database) -> Vec { (0..self.number(db)).map(|i| format!("test {i}")).collect() } } #[test] fn invoke() { salsa::DatabaseImpl::new().attach(|db| { let input = Input::new(db, 3); let x: &[String] = input.test(db); assert_eq!( x, &[ "test 0".to_string(), "test 1".to_string(), "test 2".to_string() ] ); }) } salsa-0.23.0/tests/tracked_method_inherent_return_ref.rs000064400000000000000000000011571046102023000216310ustar 00000000000000use salsa::Database; #[salsa::input] struct Input { number: usize, } #[salsa::tracked] impl Input { #[salsa::tracked(returns(ref))] fn test(self, db: &dyn salsa::Database) -> Vec { (0..self.number(db)).map(|i| format!("test {i}")).collect() } } #[test] fn invoke() { salsa::DatabaseImpl::new().attach(|db| { let input = Input::new(db, 3); let x: &Vec = input.test(db); expect_test::expect![[r#" [ "test 0", "test 1", "test 2", ] "#]] .assert_debug_eq(x); }) } salsa-0.23.0/tests/tracked_method_on_tracked_struct.rs000064400000000000000000000027221046102023000212760ustar 00000000000000use salsa::Database; #[derive(Debug, PartialEq, Eq, Hash)] pub struct Item {} #[salsa::input] pub struct Input { name: String, } #[salsa::tracked] impl Input { #[salsa::tracked] pub fn source_tree(self, db: &dyn Database) -> SourceTree<'_> { SourceTree::new(db, self.name(db).clone()) } } #[salsa::tracked] pub struct SourceTree<'db> { name: String, } #[salsa::tracked] impl<'db1> SourceTree<'db1> { #[salsa::tracked(returns(ref))] pub fn inherent_item_name(self, db: &'db1 dyn Database) -> String { self.name(db) } } trait ItemName<'db1> { fn trait_item_name(self, db: &'db1 dyn Database) -> &'db1 String; } #[salsa::tracked] impl<'db1> ItemName<'db1> for SourceTree<'db1> { #[salsa::tracked(returns(ref))] fn trait_item_name(self, db: &'db1 dyn Database) -> String { self.name(db) } } #[test] fn test_inherent() { salsa::DatabaseImpl::new().attach(|db| { let input = Input::new(db, "foo".to_string()); let source_tree = input.source_tree(db); expect_test::expect![[r#" "foo" "#]] .assert_debug_eq(source_tree.inherent_item_name(db)); }) } #[test] fn test_trait() { salsa::DatabaseImpl::new().attach(|db| { let input = Input::new(db, "foo".to_string()); let source_tree = input.source_tree(db); expect_test::expect![[r#" "foo" "#]] .assert_debug_eq(source_tree.trait_item_name(db)); }) } salsa-0.23.0/tests/tracked_method_trait_return_ref.rs000064400000000000000000000013071046102023000211350ustar 00000000000000use salsa::Database; #[salsa::input] struct Input { number: usize, } trait Trait { fn test(self, db: &dyn salsa::Database) -> &Vec; } #[salsa::tracked] impl Trait for Input { #[salsa::tracked(returns(ref))] fn test(self, db: &dyn salsa::Database) -> Vec { (0..self.number(db)).map(|i| format!("test {i}")).collect() } } #[test] fn invoke() { salsa::DatabaseImpl::new().attach(|db| { let input = Input::new(db, 3); let x: &Vec = input.test(db); expect_test::expect![[r#" [ "test 0", "test 1", "test 2", ] "#]] .assert_debug_eq(x); }) } salsa-0.23.0/tests/tracked_method_with_self_ty.rs000064400000000000000000000017551046102023000202660ustar 00000000000000//! Test that a `tracked` fn with `Self` in its signature or body on a `salsa::input` //! compiles and executes successfully. #![allow(warnings)] trait TrackedTrait { type Type; fn tracked_trait_fn(self, db: &dyn salsa::Database, ty: Self::Type) -> Self::Type; fn untracked_trait_fn(); } #[salsa::input] struct MyInput { field: u32, } #[salsa::tracked] impl MyInput { #[salsa::tracked] fn tracked_fn(self, db: &dyn salsa::Database, other: Self) -> u32 { self.field(db) + other.field(db) } } #[salsa::tracked] impl TrackedTrait for MyInput { type Type = u32; #[salsa::tracked] fn tracked_trait_fn(self, db: &dyn salsa::Database, ty: Self::Type) -> Self::Type { Self::untracked_trait_fn(); Self::tracked_fn(self, db, self) + ty } fn untracked_trait_fn() {} } #[test] fn execute() { let mut db = salsa::DatabaseImpl::new(); let object = MyInput::new(&mut db, 10); assert_eq!(object.tracked_trait_fn(&db, 1), 21); } salsa-0.23.0/tests/tracked_struct.rs000064400000000000000000000024201046102023000155400ustar 00000000000000mod common; use salsa::{Database, Setter}; #[salsa::tracked] struct Tracked<'db> { untracked_1: usize, untracked_2: usize, } #[salsa::input] struct MyInput { field1: usize, field2: usize, } #[salsa::tracked] fn intermediate(db: &dyn salsa::Database, input: MyInput) -> Tracked<'_> { Tracked::new(db, input.field1(db), input.field2(db)) } #[salsa::tracked] fn accumulate(db: &dyn salsa::Database, input: MyInput) -> (usize, usize) { let tracked = intermediate(db, input); let one = read_tracked_1(db, tracked); let two = read_tracked_2(db, tracked); (one, two) } #[salsa::tracked] fn read_tracked_1<'db>(db: &'db dyn Database, tracked: Tracked<'db>) -> usize { tracked.untracked_1(db) } #[salsa::tracked] fn read_tracked_2<'db>(db: &'db dyn Database, tracked: Tracked<'db>) -> usize { tracked.untracked_2(db) } #[test] fn execute() { let mut db = salsa::DatabaseImpl::default(); let input = MyInput::new(&db, 1, 1); assert_eq!(accumulate(&db, input), (1, 1)); // Should only re-execute `read_tracked_1`. input.set_field1(&mut db).to(2); assert_eq!(accumulate(&db, input), (2, 1)); // Should only re-execute `read_tracked_2`. input.set_field2(&mut db).to(2); assert_eq!(accumulate(&db, input), (2, 2)); } salsa-0.23.0/tests/tracked_struct_db1_lt.rs000064400000000000000000000005231046102023000167670ustar 00000000000000//! Test that tracked structs with lifetimes not named `'db` //! compile successfully. mod common; use test_log::test; #[salsa::input] struct MyInput { field: u32, } #[salsa::tracked] struct MyTracked1<'db1> { field: MyTracked2<'db1>, } #[salsa::tracked] struct MyTracked2<'db2> { field: u32, } #[test] fn create_db() {} salsa-0.23.0/tests/tracked_struct_disambiguates.rs000064400000000000000000000046761046102023000204600ustar 00000000000000//! Test that disambiguation works, that is when we have a revision where we track multiple structs //! that have the same hash, we can still differentiate between them. #![allow(warnings)] use std::hash::Hash; use rayon::iter::Either; use salsa::Setter; #[salsa::input] struct MyInput { field: u32, } #[salsa::input] struct MyInputs { field: Vec, } #[salsa::tracked] struct TrackedStruct<'db> { field: DumbHashable, } #[salsa::tracked] struct TrackedStruct2<'db> { field: DumbHashable, } #[derive(Debug, Clone)] pub struct DumbHashable { field: u32, } impl Eq for DumbHashable {} impl PartialEq for DumbHashable { fn eq(&self, other: &Self) -> bool { self.field == other.field } } // Force collisions, note that this is still a correct implementation wrt. PartialEq / Eq above // as keep the property that k1 == k2 -> hash(k1) == hash(k2) impl Hash for DumbHashable { fn hash(&self, state: &mut H) { (self.field % 3).hash(state); } } fn alternate( db: &dyn salsa::Database, input: MyInput, ) -> Either, TrackedStruct2<'_>> { if input.field(db) % 2 == 0 { Either::Left(TrackedStruct::new( db, DumbHashable { field: input.field(db), }, )) } else { Either::Right(TrackedStruct2::new( db, DumbHashable { field: input.field(db), }, )) } } #[salsa::tracked] fn batch( db: &dyn salsa::Database, inputs: MyInputs, ) -> Vec, TrackedStruct2<'_>>> { inputs .field(db) .iter() .map(|input| alternate(db, input.clone())) .collect() } #[test] fn execute() { let mut db = salsa::DatabaseImpl::new(); let inputs = MyInputs::new( &db, (0..64).into_iter().map(|i| MyInput::new(&db, i)).collect(), ); let trackeds = batch(&db, inputs); for (id, tracked) in trackeds.into_iter().enumerate() { assert_eq!(id % 2 == 0, tracked.is_left()); assert_eq!(id % 2 != 0, tracked.is_right()); } for input in inputs.field(&db) { let prev = input.field(&db); input.set_field(&mut db).to(prev); } let trackeds = batch(&db, inputs); for (id, tracked) in trackeds.into_iter().enumerate() { assert_eq!(id % 2 == 0, tracked.is_left()); assert_eq!(id % 2 != 0, tracked.is_right()); } } salsa-0.23.0/tests/tracked_struct_durability.rs000064400000000000000000000102361046102023000177740ustar 00000000000000/// Test that high durabilities can't cause "access tracked struct from previous revision" panic. /// /// The test models a situation where we have two File inputs (0, 1), where `File(0)` has LOW /// durability and `File(1)` has HIGH durability. We can query an `index` for each file, and a /// `definitions` from that index (just a sub-part of the index), and we can `infer` each file. The /// `index` and `definitions` queries depend only on the `File` they operate on, but the `infer` /// query has some other dependencies: `infer(0)` depends on `infer(1)`, and `infer(1)` also /// depends directly on `File(0)`. /// /// The panic occurs (in versions of Salsa without a fix) because `definitions(1)` is high /// durability, and depends on `index(1)` which is also high durability. `index(1)` creates the /// tracked struct `Definition(1)`, and `infer(1)` (which is low durability) depends on /// `Definition.file(1)`. /// /// After a change to `File(0)` (low durability), we only shallowly verify `definitions(1)` -- it /// passes shallow verification due to durability. We take care to mark-validated the outputs of /// `definitions(1)`, but we never verify `index(1)` at all (deeply or shallowly), which means we /// never mark `Definition(1)` validated. So when we deep-verify `infer(1)`, we try to access its /// dependency `Definition.file(1)`, and hit the panic because we are accessing a tracked struct /// that has never been re-validated or re-recreated in R2. use salsa::{Durability, Setter}; #[salsa::db] trait Db: salsa::Database { fn file(&self, idx: usize) -> File; } #[salsa::input] struct File { field: usize, } #[salsa::tracked] struct Definition<'db> { #[tracked] file: File, } #[salsa::tracked] struct Index<'db> { #[tracked] definitions: Definitions<'db>, } #[salsa::tracked] struct Definitions<'db> { #[tracked] definition: Definition<'db>, } #[salsa::tracked] struct Inference<'db> { #[tracked] definition: Definition<'db>, } #[salsa::tracked] fn index<'db>(db: &'db dyn Db, file: File) -> Index<'db> { let _ = file.field(db); Index::new(db, Definitions::new(db, Definition::new(db, file))) } #[salsa::tracked] fn definitions<'db>(db: &'db dyn Db, file: File) -> Definitions<'db> { index(db, file).definitions(db) } #[salsa::tracked] fn infer<'db>(db: &'db dyn Db, definition: Definition<'db>) -> Inference<'db> { let file = definition.file(db); if file.field(db) < 1 { let dependent_file = db.file(1); infer(db, definitions(db, dependent_file).definition(db)) } else { db.file(0).field(db); index(db, file); Inference::new(db, definition) } } #[salsa::tracked] fn check<'db>(db: &'db dyn Db, file: File) -> Inference<'db> { let defs = definitions(db, file); infer(db, defs.definition(db)) } #[test] fn execute() { #[salsa::db] #[derive(Default, Clone)] struct Database { storage: salsa::Storage, files: Vec, } #[salsa::db] impl salsa::Database for Database {} #[salsa::db] impl Db for Database { fn file(&self, idx: usize) -> File { self.files[idx] } } let mut db = Database::default(); // Create a file 0 with low durability, and a file 1 with high durability. let file0 = File::new(&db, 0); db.files.push(file0); let file1 = File::new(&db, 1); file1 .set_field(&mut db) .with_durability(Durability::HIGH) .to(1); db.files.push(file1); // check(0) -> infer(0) -> definitions(0) -> index(0) // \-> infer(1) -> definitions(1) -> index(1) assert_eq!(check(&db, file0).definition(&db).file(&db).field(&db), 1); // update the low durability file 0 file0.set_field(&mut db).to(0); // Re-query check(0). definitions(1) is high durability so it short-circuits in shallow-verify, // meaning we never verify index(1) at all, but index(1) created the tracked struct // Definition(1), so we never validate Definition(1) in R2, so when we try to verify // Definition.file(1) (as an input of infer(1) ) we hit a panic for trying to use a struct that // isn't validated in R2. check(&db, file0); } salsa-0.23.0/tests/tracked_struct_manual_update.rs000064400000000000000000000033231046102023000204420ustar 00000000000000mod common; use std::sync::atomic::{AtomicBool, Ordering}; use salsa::{Database, Setter}; static MARK1: AtomicBool = AtomicBool::new(false); static MARK2: AtomicBool = AtomicBool::new(false); #[salsa::tracked] struct Tracked<'db> { #[tracked] #[maybe_update(|dst, src| { *dst = src; MARK1.store(true, Ordering::Release); true })] tracked: usize, #[maybe_update(untracked_update)] untracked: usize, } unsafe fn untracked_update(dst: *mut usize, src: usize) -> bool { unsafe { *dst = src }; MARK2.store(true, Ordering::Release); true } #[salsa::input] struct MyInput { field1: usize, field2: usize, } #[salsa::tracked] fn intermediate(db: &dyn salsa::Database, input: MyInput) -> Tracked<'_> { Tracked::new(db, input.field1(db), input.field2(db)) } #[salsa::tracked] fn accumulate(db: &dyn salsa::Database, input: MyInput) -> (usize, usize) { let tracked = intermediate(db, input); let one = read_tracked(db, tracked); let two = read_untracked(db, tracked); (one, two) } #[salsa::tracked] fn read_tracked<'db>(db: &'db dyn Database, tracked: Tracked<'db>) -> usize { tracked.tracked(db) } #[salsa::tracked] fn read_untracked<'db>(db: &'db dyn Database, tracked: Tracked<'db>) -> usize { tracked.untracked(db) } #[test] fn execute() { let mut db = salsa::DatabaseImpl::default(); let input = MyInput::new(&db, 1, 1); assert_eq!(accumulate(&db, input), (1, 1)); assert!(!MARK1.load(Ordering::Acquire)); assert!(!MARK2.load(Ordering::Acquire)); input.set_field1(&mut db).to(2); assert_eq!(accumulate(&db, input), (2, 1)); assert!(MARK1.load(Ordering::Acquire)); assert!(MARK2.load(Ordering::Acquire)); } salsa-0.23.0/tests/tracked_struct_mixed_tracked_fields.rs000064400000000000000000000030361046102023000217550ustar 00000000000000mod common; use salsa::{Database, Setter}; // A tracked struct with mixed tracked and untracked fields to ensure // the correct field indices are used when tracking dependencies. #[salsa::tracked] struct Tracked<'db> { untracked_1: usize, #[tracked] tracked_1: usize, untracked_2: usize, untracked_3: usize, #[tracked] tracked_2: usize, untracked_4: usize, } #[salsa::input] struct MyInput { field1: usize, field2: usize, } #[salsa::tracked] fn intermediate(db: &dyn salsa::Database, input: MyInput) -> Tracked<'_> { Tracked::new(db, 0, input.field1(db), 0, 0, input.field2(db), 0) } #[salsa::tracked] fn accumulate(db: &dyn salsa::Database, input: MyInput) -> (usize, usize) { let tracked = intermediate(db, input); let one = read_tracked_1(db, tracked); let two = read_tracked_2(db, tracked); (one, two) } #[salsa::tracked] fn read_tracked_1<'db>(db: &'db dyn Database, tracked: Tracked<'db>) -> usize { tracked.tracked_1(db) } #[salsa::tracked] fn read_tracked_2<'db>(db: &'db dyn Database, tracked: Tracked<'db>) -> usize { tracked.tracked_2(db) } #[test] fn execute() { let mut db = salsa::DatabaseImpl::default(); let input = MyInput::new(&db, 1, 1); assert_eq!(accumulate(&db, input), (1, 1)); // Should only re-execute `read_tracked_1`. input.set_field1(&mut db).to(2); assert_eq!(accumulate(&db, input), (2, 1)); // Should only re-execute `read_tracked_2`. input.set_field2(&mut db).to(2); assert_eq!(accumulate(&db, input), (2, 2)); } salsa-0.23.0/tests/tracked_struct_recreate_new_revision.rs000064400000000000000000000014431046102023000222050ustar 00000000000000//! Test that re-creating a `tracked` struct after it was deleted in a previous //! revision doesn't panic. #![allow(warnings)] use salsa::Setter; #[salsa::input] struct MyInput { field: u32, } #[salsa::tracked(debug)] struct TrackedStruct<'db> { field: u32, } #[salsa::tracked] fn tracked_fn(db: &dyn salsa::Database, input: MyInput) -> Option> { if input.field(db) == 1 { Some(TrackedStruct::new(db, 1)) } else { None } } #[test] fn execute() { let mut db = salsa::DatabaseImpl::new(); let input = MyInput::new(&db, 1); assert!(tracked_fn(&db, input).is_some()); input.set_field(&mut db).to(0); assert_eq!(tracked_fn(&db, input), None); input.set_field(&mut db).to(1); assert!(tracked_fn(&db, input).is_some()); } salsa-0.23.0/tests/tracked_struct_with_interned_query.rs000064400000000000000000000026461046102023000217220ustar 00000000000000mod common; use salsa::Setter; #[salsa::input] struct MyInput { value: usize, } #[salsa::tracked] struct Tracked<'db> { value: String, } #[salsa::tracked] fn query_tracked(db: &dyn salsa::Database, input: MyInput) -> Tracked<'_> { Tracked::new(db, format!("{value}", value = input.value(db))) } #[salsa::tracked] fn join<'db>(db: &'db dyn salsa::Database, tracked: Tracked<'db>, with: String) -> String { format!("{}{}", tracked.value(db), with) } #[test] fn execute() { let mut db = salsa::DatabaseImpl::default(); let input = MyInput::new(&db, 1); let tracked = query_tracked(&db, input); let joined = join(&db, tracked, "world".to_string()); assert_eq!(joined, "1world"); // Create a new revision: This puts the tracked struct created in revision 0 // into the free list. input.set_value(&mut db).to(2); let tracked = query_tracked(&db, input); let joined = join(&db, tracked, "world".to_string()); assert_eq!(joined, "2world"); // Create a new revision: The tracked struct created in revision 0 is now // reused, including its id. The argument to `join` will hash and compare // equal to the argument used in revision 0 but the return value should be // 3world and not 1world. input.set_value(&mut db).to(3); let tracked = query_tracked(&db, input); let joined = join(&db, tracked, "world".to_string()); assert_eq!(joined, "3world"); } salsa-0.23.0/tests/tracked_with_intern.rs000064400000000000000000000005401046102023000165470ustar 00000000000000//! Test that a setting a field on a `#[salsa::input]` //! overwrites and returns the old value. use test_log::test; #[salsa::input] struct MyInput { field: String, } #[salsa::tracked] struct MyTracked<'db> { #[tracked] field: MyInterned<'db>, } #[salsa::interned] struct MyInterned<'db> { field: String, } #[test] fn execute() {} salsa-0.23.0/tests/tracked_with_struct_db.rs000064400000000000000000000030201046102023000172350ustar 00000000000000//! Test that a setting a field on a `#[salsa::input]` //! overwrites and returns the old value. use salsa::{Database, DatabaseImpl, Update}; use test_log::test; #[salsa::input(debug)] struct MyInput { field: String, } #[salsa::tracked(debug)] struct MyTracked<'db> { #[tracked] data: MyInput, #[tracked] next: MyList<'db>, } #[derive(PartialEq, Eq, Clone, Debug, Update)] enum MyList<'db> { None, Next(MyTracked<'db>), } #[salsa::tracked] fn create_tracked_list(db: &dyn Database, input: MyInput) -> MyTracked<'_> { let t0 = MyTracked::new(db, input, MyList::None); let t1 = MyTracked::new(db, input, MyList::Next(t0)); t1 } #[test] fn execute() { DatabaseImpl::new().attach(|db| { let input = MyInput::new(db, "foo".to_string()); let t0: MyTracked = create_tracked_list(db, input); let t1 = create_tracked_list(db, input); expect_test::expect![[r#" MyTracked { [salsa id]: Id(401), data: MyInput { [salsa id]: Id(0), field: "foo", }, next: Next( MyTracked { [salsa id]: Id(400), data: MyInput { [salsa id]: Id(0), field: "foo", }, next: None, }, ), } "#]] .assert_debug_eq(&t0); assert_eq!(t0, t1); }) } salsa-0.23.0/tests/tracked_with_struct_ord.rs000064400000000000000000000014531046102023000174440ustar 00000000000000//! Test that `PartialOrd` and `Ord` can be derived for tracked structs use salsa::{Database, DatabaseImpl}; use test_log::test; #[salsa::input] #[derive(PartialOrd, Ord)] struct Input { value: usize, } #[salsa::tracked(debug)] #[derive(Ord, PartialOrd)] struct MyTracked<'db> { value: usize, } #[salsa::tracked] fn create_tracked(db: &dyn Database, input: Input) -> MyTracked<'_> { MyTracked::new(db, input.value(db)) } #[test] fn execute() { DatabaseImpl::new().attach(|db| { let input1 = Input::new(db, 20); let input2 = Input::new(db, 10); // Compares by ID and not by value. assert!(input1 <= input2); let t0: MyTracked = create_tracked(db, input1); let t1: MyTracked = create_tracked(db, input2); assert!(t0 <= t1); }) } salsa-0.23.0/tests/warnings/main.rs000064400000000000000000000002211046102023000152700ustar 00000000000000//! Test that macros don't generate code with warnings #![deny(warnings)] mod needless_borrow; mod needless_lifetimes; mod unused_variable_db; salsa-0.23.0/tests/warnings/needless_borrow.rs000064400000000000000000000002321046102023000175420ustar 00000000000000#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] enum Token {} #[salsa::tracked] struct TokenTree<'db> { #[returns(ref)] tokens: Vec, } salsa-0.23.0/tests/warnings/needless_lifetimes.rs000064400000000000000000000010161046102023000202120ustar 00000000000000#[salsa::db] pub trait Db: salsa::Database {} #[derive(Debug, PartialEq, Eq, Hash)] pub struct Item {} #[salsa::tracked] pub struct SourceTree<'db> {} #[salsa::tracked] impl<'db> SourceTree<'db> { #[salsa::tracked(returns(ref))] pub fn all_items(self, _db: &'db dyn Db) -> Vec { todo!() } } #[salsa::tracked(returns(ref))] fn use_tree<'db>(_db: &'db dyn Db, _tree: SourceTree<'db>) {} #[allow(unused)] fn use_it(db: &dyn Db, tree: SourceTree) { tree.all_items(db); use_tree(db, tree); } salsa-0.23.0/tests/warnings/unused_variable_db.rs000064400000000000000000000000531046102023000201640ustar 00000000000000#[salsa::interned] struct Keywords<'db> {}