hyper-util-0.1.19/.cargo_vcs_info.json0000644000000001361046102023000132500ustar { "git": { "sha1": "d5740116a55cbf7af13d1142b365c56b1d684f3a" }, "path_in_vcs": "" }hyper-util-0.1.19/.github/workflows/CI.yml000064400000000000000000000061411046102023000164330ustar 00000000000000name: CI on: pull_request: push: branches: - master env: RUST_BACKTRACE: 1 jobs: ci-pass: name: CI is green runs-on: ubuntu-latest needs: - style - test - msrv - miri - features - semver - doc steps: - run: exit 0 style: name: Check Style runs-on: ubuntu-latest steps: - uses: actions/checkout@v5 - uses: dtolnay/rust-toolchain@stable with: components: rustfmt - run: cargo fmt --all --check test: name: Test ${{ matrix.rust }} on ${{ matrix.os }} needs: [style] strategy: matrix: rust: - stable - beta - nightly os: - ubuntu-latest - windows-latest - macos-latest runs-on: ${{ matrix.os }} steps: - uses: actions/checkout@v5 - name: Install Rust (${{ matrix.rust }}) uses: dtolnay/rust-toolchain@master with: toolchain: ${{ matrix.rust }} - run: cargo test --all-features msrv: name: Check MSRV (${{ matrix.rust }}) on ${{ matrix.os }} needs: [style] strategy: matrix: rust: [ 1.63 ] # keep in sync with 'rust-version' in Cargo.toml os: - ubuntu-latest - windows-latest - macos-latest runs-on: ${{ matrix.os }} steps: - uses: actions/checkout@v5 - uses: dtolnay/rust-toolchain@stable - uses: Swatinem/rust-cache@v2 - name: Resolve MSRV aware dependencies run: | cargo update cargo update system-configuration --precise 0.5.0 env: CARGO_RESOLVER_INCOMPATIBLE_RUST_VERSIONS: fallback - name: Install Rust (${{ matrix.rust }}) uses: dtolnay/rust-toolchain@master with: toolchain: ${{ matrix.rust }} - run: cargo check --features full miri: name: Test with Miri needs: [style] runs-on: ubuntu-latest steps: - uses: actions/checkout@v5 - uses: dtolnay/rust-toolchain@nightly with: components: miri - name: Test env: # Can't enable tcp feature since Miri does not support the tokio runtime MIRIFLAGS: "-Zmiri-disable-isolation" run: cargo miri test --all-features features: name: features needs: [style] runs-on: ubuntu-latest steps: - uses: actions/checkout@v5 - uses: dtolnay/rust-toolchain@stable - uses: taiki-e/install-action@cargo-hack - run: cargo hack --no-dev-deps check --feature-powerset --depth 2 semver: name: semver runs-on: ubuntu-latest steps: - uses: actions/checkout@v5 - name: Check semver uses: obi1kenobi/cargo-semver-checks-action@v2 with: feature-group: only-explicit-features features: full release-type: minor doc: name: Build docs needs: [style, test] runs-on: ubuntu-latest steps: - uses: actions/checkout@v5 - uses: dtolnay/rust-toolchain@nightly - run: cargo rustdoc --features full -- --cfg docsrs -D rustdoc::broken_intra_doc_links hyper-util-0.1.19/.github/workflows/rustdoc-preview.yml000064400000000000000000000063671046102023000213140ustar 00000000000000name: Rustdoc PR Preview on: issue_comment: types: [created] pull_request: types: [closed] jobs: rustdoc-preview: # Only run on issue_comment, not on PR close if: github.event_name == 'issue_comment' && github.event.issue.pull_request && contains(github.event.comment.body, '/rustdoc-preview') runs-on: ubuntu-latest steps: - name: Check if commenter is a collaborator id: collaborator-check uses: actions/github-script@v7 with: github-token: ${{ secrets.GITHUB_TOKEN }} script: | const commenter = context.payload.comment.user.login; const owner = context.repo.owner; const repo = context.repo.repo; try { await github.rest.repos.checkCollaborator({ owner, repo, username: commenter }); return true; } catch (e) { return false; } # Only continue if the check passes - name: Fail if not collaborator if: steps.collaborator-check.outputs.result != 'true' run: | echo "Commenter is not a collaborator. Skipping preview build." exit 1 - name: Checkout PR branch uses: actions/checkout@v4 with: # Check out the PR's branch ref: refs/pull/${{ github.event.issue.number }}/head - name: Install Rust toolchain uses: dtolnay/rust-toolchain@nightly - name: Build rustdoc run: cargo rustdoc --features full -- --cfg docsrs - name: Deploy rustdoc to gh-pages/pr- uses: peaceiris/actions-gh-pages@v3 with: github_token: ${{ secrets.GITHUB_TOKEN }} publish_dir: ./target/doc # Publish to pr- subdir destination_dir: pr-${{ github.event.issue.number }} keep_files: true - name: Comment preview link on PR uses: actions/github-script@v7 with: github-token: ${{ secrets.GITHUB_TOKEN }} script: | const pr_number = context.issue.number; const repo = context.repo.repo; const owner = context.repo.owner; const url = `https://${owner}.github.io/${repo}/pr-${pr_number}/hyper_util/`; github.rest.issues.createComment({ issue_number: pr_number, owner, repo, body: `📝 Rustdoc preview for this PR: [View docs](${url})` }); rustdoc-preview-cleanup: # Only run on PR close/merge if: github.event_name == 'pull_request' && github.event.action == 'closed' runs-on: ubuntu-latest steps: - name: Checkout gh-pages branch uses: actions/checkout@v4 with: ref: gh-pages persist-credentials: true - name: Remove PR preview directory run: | rm -rf pr-${{ github.event.pull_request.number }} - name: Commit and push removal run: | git config user.name "github-actions" git config user.email "github-actions@github.com" git add . git commit -m "Remove rustdoc preview for PR #${{ github.event.pull_request.number }}" || echo "Nothing to commit" git push hyper-util-0.1.19/.gitignore000064400000000000000000000005001046102023000140010ustar 00000000000000# Generated by Cargo # will have compiled files and executables /target/ # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html Cargo.lock # These are backup files generated by rustfmt **/*.rs.bk hyper-util-0.1.19/CHANGELOG.md000064400000000000000000000123331046102023000136310ustar 00000000000000# 0.1.19 (2025-12-03) - Add `client::pool` module for composable pools. Enable with the `client-pool` feature. - Add `pool::singleton` for sharing a single cloneable connection. - Add `pool::cache` for caching a list of connections. - Add `pool::negotiate` for combining two pools with upgrade and fallback negotiation. - Add `pool::map` for customizable mapping of keys and connections. # 0.1.18 (2025-11-13) - Fix `rt::TokioTimer` to support Tokio's paused time. - Fix `client::proxy::match::Matcher` to parse auth without passwords. # 0.1.17 (2025-09-15) - Fix `legacy::Client` to allow absolute-form URIs when `Connected::proxy(true)` is passed and the scheme is `https`. # 0.1.16 (2025-07-22) - Add `impl Clone` for `proxy::Tunnel` service. - Fix `proxy::Matcher` to detect SOCKS4 schemes. - Fix `legacy::Client` pool idle checker to trigger less aggresively, saving CPU. # 0.1.15 (2025-07-07) - Add header casing options to `auto::Builder`. - Fix `proxy::Socksv5` to check for enough bytes before parsing ipv6 responses. - Fix including `client-proxy` in the `full` feature set. # 0.1.14 (2025-06-04) - Fix `HttpConnector` to defer address family order to resolver sort order. - Fix `proxy::Matcher` to find HTTPS system proxies on Windows. # 0.1.13 (2025-05-27) - Fix `HttpConnector` to always prefer IPv6 addresses first, if happy eyeballs is enabled. - Fix `legacy::Client` to return better errors if available on the connection. # 0.1.12 (2025-05-19) - Add `client::legacy::proxy::Tunnel` connector that wraps another connector with HTTP tunneling. - Add `client::legacy::proxy::{SocksV4, SocksV5}` connectors that wraps another connector with SOCKS. - Add `client::proxy::matcher::Matcher` type that can use environment variables to match proxy rules. - Add `server::graceful::Watcher` type that can be sent to watch a connection in another task. - Add `GracefulShutdown::count()` method to get number of currently watched connections. - Fix missing `must_use` attributes on `Connection` futures. - Fix tracing span in GAI resolver that can cause panics. # 0.1.11 (2025-03-31) - Add `tracing` crate feature with support in `TokioExecutor`. - Add `HttpConnector::interface()` support for macOS and Solarish systems. - Add `rt::WithHyperIo` and `rt::WithTokioIo` combinators. - Add `auto_date_header()` for auto server builder. - Add `max_local_error_reset_streams()` for auto server builder. - Add `ignore_invalid_headers()` for auto server builder. - Add methods to determine if auto server is configured for HTTP/1 or HTTP/2. - Implement `Connection` for `UnixStream` and `NamedPipeClient`. - Fix HTTP/2 websocket requests sent through `legacy::Client`. # 0.1.10 (2024-10-28) - Add `http2_max_header_list_size(num)` option to legacy client builder. - Add `set_tcp_user_timeout(dur)` option to legacy `HttpConnector`. # 0.1.9 (2024-09-24) - Add support for `client::legacy` DNS resolvers to set non-zero ports on returned addresses. - Fix `client::legacy` wrongly retrying pooled connections that were created successfully but failed immediately after, resulting in a retry loop. # 0.1.8 (2024-09-09) - Add `server::conn::auto::upgrade::downcast()` for use with auto connection upgrades. # 0.1.7 (2024-08-06) - Add `Connected::poison()` to `legacy` client, a port from hyper v0.14.x. - Add `Error::connect_info()` to `legacy` client, a port from hyper v0.14.x. # 0.1.6 (2024-07-01) - Add support for AIX operating system to `legacy` client. - Fix `legacy` client to better use dying pooled connections. # 0.1.5 (2024-05-28) - Add `server::graceful::GracefulShutdown` helper to coordinate over many connections. - Add `server::conn::auto::Connection::into_owned()` to unlink lifetime from `Builder`. - Allow `service` module to be available with only `service` feature enabled. # 0.1.4 (2024-05-24) - Add `initial_max_send_streams()` to `legacy` client builder - Add `max_pending_accept_reset_streams()` to `legacy` client builder - Add `max_headers(usize)` to `auto` server builder - Add `http1_onl()` and `http2_only()` to `auto` server builder - Add connection capturing API to `legacy` client - Add `impl Connection for TokioIo` - Fix graceful shutdown hanging on reading the HTTP version # 0.1.3 (2024-01-31) ### Added - Add `Error::is_connect()` which returns true if error came from client `Connect`. - Add timer support to `legacy` pool. - Add support to enable http1/http2 parts of `auto::Builder` individually. ### Fixed - Fix `auto` connection so it can handle requests shorter than the h2 preface. - Fix `legacy::Client` to no longer error when keep-alive is diabled. # 0.1.2 (2023-12-20) ### Added - Add `graceful_shutdown()` method to `auto` connections. - Add `rt::TokioTimer` type that implements `hyper::rt::Timer`. - Add `service::TowerToHyperService` adapter, allowing using `tower::Service`s as a `hyper::service::Service`. - Implement `Clone` for `auto::Builder`. - Exports `legacy::{Builder, ResponseFuture}`. ### Fixed - Enable HTTP/1 upgrades on the `legacy::Client`. - Prevent divide by zero if DNS returns 0 addresses. # 0.1.1 (2023-11-17) ### Added - Make `server-auto` enable the `server` feature. ### Fixed - Reduce `Send` bounds requirements for `auto` connections. - Docs: enable all features when generating. # 0.1.0 (2023-11-16) Initial release. hyper-util-0.1.19/Cargo.lock0000644000000543311046102023000112310ustar # This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 3 [[package]] name = "aho-corasick" version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301" dependencies = [ "memchr", ] [[package]] name = "async-stream" version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" dependencies = [ "async-stream-impl", "futures-core", "pin-project-lite", ] [[package]] name = "async-stream-impl" version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "atomic-waker" version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" [[package]] name = "base64" version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "bitflags" version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" [[package]] name = "bytes" version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" [[package]] name = "core-foundation" version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" dependencies = [ "core-foundation-sys", "libc", ] [[package]] name = "core-foundation-sys" version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "env_logger" version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4cd405aab171cb85d6735e5c8d9db038c17d3ca007a4d2c25f337935c3d90580" dependencies = [ "humantime", "is-terminal", "log", "regex", "termcolor", ] [[package]] name = "equivalent" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "fnv" version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "futures-channel" version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", ] [[package]] name = "futures-core" version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-sink" version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" [[package]] name = "futures-task" version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] name = "futures-util" version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ "futures-core", "futures-task", "pin-project-lite", "pin-utils", ] [[package]] name = "h2" version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3c0b69cfcb4e1b9f1bf2f53f95f766e4661169728ec61cd3fe5a0166f2d1386" dependencies = [ "atomic-waker", "bytes", "fnv", "futures-core", "futures-sink", "http", "indexmap", "slab", "tokio", "tokio-util", "tracing", ] [[package]] name = "hashbrown" version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" [[package]] name = "hermit-abi" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" [[package]] name = "http" version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" dependencies = [ "bytes", "fnv", "itoa", ] [[package]] name = "http-body" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", "http", ] [[package]] name = "http-body-util" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" dependencies = [ "bytes", "futures-core", "http", "http-body", "pin-project-lite", ] [[package]] name = "httparse" version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" [[package]] name = "httpdate" version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "humantime" version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "135b12329e5e3ce057a9f972339ea52bc954fe1e9358ef27f95e89716fbc5424" [[package]] name = "hyper" version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1744436df46f0bde35af3eda22aeaba453aada65d8f1c171cd8a5f59030bd69f" dependencies = [ "atomic-waker", "bytes", "futures-channel", "futures-core", "h2", "http", "http-body", "httparse", "httpdate", "itoa", "pin-project-lite", "pin-utils", "smallvec", "tokio", "want", ] [[package]] name = "hyper-util" version = "0.1.19" dependencies = [ "base64", "bytes", "futures-channel", "futures-core", "futures-util", "http", "http-body", "http-body-util", "hyper", "ipnet", "libc", "percent-encoding", "pin-project-lite", "pnet_datalink", "pretty_env_logger", "socket2", "system-configuration", "tokio", "tokio-test", "tower-layer", "tower-service", "tower-test", "tracing", "windows-registry", ] [[package]] name = "indexmap" version = "2.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6717a8d2a5a929a1a2eb43a12812498ed141a0bcfb7e8f7844fbdbe4303bba9f" dependencies = [ "equivalent", "hashbrown", ] [[package]] name = "ipnet" version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" [[package]] name = "ipnetwork" version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf466541e9d546596ee94f9f69590f89473455f88372423e0008fc1a7daf100e" dependencies = [ "serde", ] [[package]] name = "is-terminal" version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" dependencies = [ "hermit-abi", "libc", "windows-sys 0.61.2", ] [[package]] name = "itoa" version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" [[package]] name = "libc" version = "0.2.177" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976" [[package]] name = "log" version = "0.4.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" [[package]] name = "memchr" version = "2.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" [[package]] name = "mio" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69d83b0086dc8ecf3ce9ae2874b2d1290252e2a30720bea58a5c6639b0092873" dependencies = [ "libc", "wasi", "windows-sys 0.61.2", ] [[package]] name = "no-std-net" version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43794a0ace135be66a25d3ae77d41b91615fb68ae937f904090203e81f755b65" [[package]] name = "once_cell" version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" [[package]] name = "percent-encoding" version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" [[package]] name = "pin-project" version = "1.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" version = "1.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "pin-project-lite" version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" [[package]] name = "pin-utils" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pnet_base" version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffc190d4067df16af3aba49b3b74c469e611cad6314676eaf1157f31aa0fb2f7" dependencies = [ "no-std-net", ] [[package]] name = "pnet_datalink" version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e79e70ec0be163102a332e1d2d5586d362ad76b01cec86f830241f2b6452a7b7" dependencies = [ "ipnetwork", "libc", "pnet_base", "pnet_sys", "winapi", ] [[package]] name = "pnet_sys" version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7d4643d3d4db6b08741050c2f3afa9a892c4244c085a72fcda93c9c2c9a00f4b" dependencies = [ "libc", "winapi", ] [[package]] name = "pretty_env_logger" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "865724d4dbe39d9f3dd3b52b88d859d66bcb2d6a0acfd5ea68a65fb66d4bdc1c" dependencies = [ "env_logger", "log", ] [[package]] name = "proc-macro2" version = "1.0.103" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8" dependencies = [ "unicode-ident", ] [[package]] name = "quote" version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f" dependencies = [ "proc-macro2", ] [[package]] name = "regex" version = "1.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "843bc0191f75f3e22651ae5f1e72939ab2f72a4bc30fa80a066bd66edefc24d4" dependencies = [ "aho-corasick", "memchr", "regex-automata", "regex-syntax", ] [[package]] name = "regex-automata" version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c" dependencies = [ "aho-corasick", "memchr", "regex-syntax", ] [[package]] name = "regex-syntax" version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" [[package]] name = "serde" version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" dependencies = [ "serde_core", ] [[package]] name = "serde_core" version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "signal-hook-registry" version = "1.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2a4719bff48cee6b39d12c020eeb490953ad2443b7055bd0b21fca26bd8c28b" dependencies = [ "libc", ] [[package]] name = "slab" version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" [[package]] name = "smallvec" version = "1.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" [[package]] name = "socket2" version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17129e116933cf371d018bb80ae557e889637989d8638274fb25622827b03881" dependencies = [ "libc", "windows-sys 0.60.2", ] [[package]] name = "syn" version = "2.0.110" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a99801b5bd34ede4cf3fc688c5919368fea4e4814a4664359503e6015b280aea" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "system-configuration" version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" dependencies = [ "bitflags", "core-foundation", "system-configuration-sys", ] [[package]] name = "system-configuration-sys" version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" dependencies = [ "core-foundation-sys", "libc", ] [[package]] name = "termcolor" version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" dependencies = [ "winapi-util", ] [[package]] name = "tokio" version = "1.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff360e02eab121e0bc37a2d3b4d4dc622e6eda3a8e5253d5435ecf5bd4c68408" dependencies = [ "bytes", "libc", "mio", "pin-project-lite", "signal-hook-registry", "socket2", "tokio-macros", "windows-sys 0.61.2", ] [[package]] name = "tokio-macros" version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "tokio-stream" version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" dependencies = [ "futures-core", "pin-project-lite", "tokio", ] [[package]] name = "tokio-test" version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2468baabc3311435b55dd935f702f42cd1b8abb7e754fb7dfb16bd36aa88f9f7" dependencies = [ "async-stream", "bytes", "futures-core", "tokio", "tokio-stream", ] [[package]] name = "tokio-util" version = "0.7.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2efa149fe76073d6e8fd97ef4f4eca7b67f599660115591483572e406e165594" dependencies = [ "bytes", "futures-core", "futures-sink", "pin-project-lite", "tokio", ] [[package]] name = "tower-layer" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" [[package]] name = "tower-service" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tower-test" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4546773ffeab9e4ea02b8872faa49bb616a80a7da66afc2f32688943f97efa7" dependencies = [ "futures-util", "pin-project", "tokio", "tokio-test", "tower-layer", "tower-service", ] [[package]] name = "tracing" version = "0.1.41" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" dependencies = [ "pin-project-lite", "tracing-core", ] [[package]] name = "tracing-core" version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" dependencies = [ "once_cell", ] [[package]] name = "try-lock" version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "unicode-ident" version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" [[package]] name = "want" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" dependencies = [ "try-lock", ] [[package]] name = "wasi" version = "0.11.1+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] name = "winapi" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" dependencies = [ "winapi-i686-pc-windows-gnu", "winapi-x86_64-pc-windows-gnu", ] [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ "windows-sys 0.61.2", ] [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-link" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" [[package]] name = "windows-registry" version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "02752bf7fbdcce7f2a27a742f798510f3e5ad88dbe84871e5168e2120c3d5720" dependencies = [ "windows-link", "windows-result", "windows-strings", ] [[package]] name = "windows-result" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" dependencies = [ "windows-link", ] [[package]] name = "windows-strings" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" dependencies = [ "windows-link", ] [[package]] name = "windows-sys" version = "0.60.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" dependencies = [ "windows-targets", ] [[package]] name = "windows-sys" version = "0.61.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" dependencies = [ "windows-link", ] [[package]] name = "windows-targets" version = "0.53.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" dependencies = [ "windows-link", "windows_aarch64_gnullvm", "windows_aarch64_msvc", "windows_i686_gnu", "windows_i686_gnullvm", "windows_i686_msvc", "windows_x86_64_gnu", "windows_x86_64_gnullvm", "windows_x86_64_msvc", ] [[package]] name = "windows_aarch64_gnullvm" version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" [[package]] name = "windows_aarch64_msvc" version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" [[package]] name = "windows_i686_gnu" version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3" [[package]] name = "windows_i686_gnullvm" version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" [[package]] name = "windows_i686_msvc" version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" [[package]] name = "windows_x86_64_gnu" version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" [[package]] name = "windows_x86_64_gnullvm" version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" [[package]] name = "windows_x86_64_msvc" version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" hyper-util-0.1.19/Cargo.toml0000644000000111131046102023000112430ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" rust-version = "1.63" name = "hyper-util" version = "0.1.19" authors = ["Sean McArthur "] build = false autolib = false autobins = false autoexamples = false autotests = false autobenches = false description = "hyper utilities" homepage = "https://hyper.rs" documentation = "https://docs.rs/hyper-util" readme = "README.md" keywords = [ "http", "hyper", "hyperium", ] categories = [ "network-programming", "web-programming::http-client", "web-programming::http-server", ] license = "MIT" repository = "https://github.com/hyperium/hyper-util" [package.metadata.docs.rs] features = ["full"] rustdoc-args = [ "--cfg", "docsrs", ] [features] __internal_happy_eyeballs_tests = [] client = [ "hyper/client", "tokio/net", "dep:tracing", "dep:futures-channel", "dep:tower-service", ] client-legacy = [ "client", "dep:socket2", "tokio/sync", "dep:libc", "dep:futures-util", ] client-pool = [ "client", "dep:futures-util", "dep:tower-layer", ] client-proxy = [ "client", "dep:base64", "dep:ipnet", "dep:percent-encoding", ] client-proxy-system = [ "dep:system-configuration", "dep:windows-registry", ] default = [] full = [ "client", "client-legacy", "client-pool", "client-proxy", "client-proxy-system", "server", "server-auto", "server-graceful", "service", "http1", "http2", "tokio", "tracing", ] http1 = ["hyper/http1"] http2 = ["hyper/http2"] server = ["hyper/server"] server-auto = [ "server", "http1", "http2", ] server-graceful = [ "server", "tokio/sync", ] service = ["dep:tower-service"] tokio = [ "dep:tokio", "tokio/rt", "tokio/time", ] tracing = ["dep:tracing"] [lib] name = "hyper_util" path = "src/lib.rs" [[example]] name = "client" path = "examples/client.rs" required-features = [ "client-legacy", "http1", "tokio", ] [[example]] name = "server" path = "examples/server.rs" required-features = [ "server", "http1", "tokio", ] [[example]] name = "server_graceful" path = "examples/server_graceful.rs" required-features = [ "tokio", "server-graceful", "server-auto", ] [[test]] name = "legacy_client" path = "tests/legacy_client.rs" [[test]] name = "proxy" path = "tests/proxy.rs" [dependencies.base64] version = "0.22" optional = true [dependencies.bytes] version = "1.7.1" [dependencies.futures-channel] version = "0.3" optional = true [dependencies.futures-core] version = "0.3" [dependencies.futures-util] version = "0.3.16" optional = true default-features = false [dependencies.http] version = "1.0" [dependencies.http-body] version = "1.0.0" [dependencies.hyper] version = "1.8.0" [dependencies.ipnet] version = "2.9" optional = true [dependencies.libc] version = "0.2" optional = true [dependencies.percent-encoding] version = "2.3" optional = true [dependencies.pin-project-lite] version = "0.2.4" [dependencies.socket2] version = ">=0.5.9, <0.7" features = ["all"] optional = true [dependencies.tokio] version = "1" optional = true default-features = false [dependencies.tower-layer] version = "0.3" optional = true [dependencies.tower-service] version = "0.3" optional = true [dependencies.tracing] version = "0.1" features = ["std"] optional = true default-features = false [dev-dependencies.bytes] version = "1" [dev-dependencies.futures-util] version = "0.3.16" features = ["alloc"] default-features = false [dev-dependencies.http-body-util] version = "0.1.0" [dev-dependencies.hyper] version = "1.4.0" features = ["full"] [dev-dependencies.pretty_env_logger] version = "0.5" [dev-dependencies.tokio] version = "1" features = [ "macros", "test-util", "signal", ] [dev-dependencies.tokio-test] version = "0.4" [dev-dependencies.tower-test] version = "0.4" [target.'cfg(any(target_os = "linux", target_os = "macos"))'.dev-dependencies.pnet_datalink] version = "0.35.0" [target.'cfg(target_os = "macos")'.dependencies.system-configuration] version = ">=0.5, <0.7" optional = true [target."cfg(windows)".dependencies.windows-registry] version = ">=0.3, <0.7" optional = true hyper-util-0.1.19/Cargo.toml.orig000064400000000000000000000064101046102023000147060ustar 00000000000000[package] name = "hyper-util" version = "0.1.19" description = "hyper utilities" readme = "README.md" homepage = "https://hyper.rs" documentation = "https://docs.rs/hyper-util" repository = "https://github.com/hyperium/hyper-util" license = "MIT" authors = ["Sean McArthur "] keywords = ["http", "hyper", "hyperium"] categories = ["network-programming", "web-programming::http-client", "web-programming::http-server"] edition = "2021" rust-version = "1.63" [package.metadata.docs.rs] features = ["full"] rustdoc-args = ["--cfg", "docsrs"] [dependencies] base64 = { version = "0.22", optional = true } bytes = "1.7.1" futures-channel = { version = "0.3", optional = true } futures-core = { version = "0.3" } futures-util = { version = "0.3.16", default-features = false, optional = true } http = "1.0" http-body = "1.0.0" hyper = "1.8.0" ipnet = { version = "2.9", optional = true } libc = { version = "0.2", optional = true } percent-encoding = { version = "2.3", optional = true } pin-project-lite = "0.2.4" socket2 = { version = ">=0.5.9, <0.7", optional = true, features = ["all"] } tracing = { version = "0.1", default-features = false, features = ["std"], optional = true } tokio = { version = "1", optional = true, default-features = false } tower-layer = { version = "0.3", optional = true } tower-service = { version = "0.3", optional = true } [dev-dependencies] hyper = { version = "1.4.0", features = ["full"] } bytes = "1" futures-util = { version = "0.3.16", default-features = false, features = ["alloc"] } http-body-util = "0.1.0" tokio = { version = "1", features = ["macros", "test-util", "signal"] } tokio-test = "0.4" tower-test = "0.4" pretty_env_logger = "0.5" [target.'cfg(any(target_os = "linux", target_os = "macos"))'.dev-dependencies] pnet_datalink = "0.35.0" [target.'cfg(target_os = "macos")'.dependencies] system-configuration = { version = ">=0.5, <0.7", optional = true } [target.'cfg(windows)'.dependencies] windows-registry = { version = ">=0.3, <0.7", optional = true } [features] default = [] # Shorthand to enable everything full = [ "client", "client-legacy", "client-pool", "client-proxy", "client-proxy-system", "server", "server-auto", "server-graceful", "service", "http1", "http2", "tokio", "tracing", ] client = ["hyper/client", "tokio/net", "dep:tracing", "dep:futures-channel", "dep:tower-service"] client-legacy = ["client", "dep:socket2", "tokio/sync", "dep:libc", "dep:futures-util"] client-pool = ["client", "dep:futures-util", "dep:tower-layer"] client-proxy = ["client", "dep:base64", "dep:ipnet", "dep:percent-encoding"] client-proxy-system = ["dep:system-configuration", "dep:windows-registry"] server = ["hyper/server"] server-auto = ["server", "http1", "http2"] server-graceful = ["server", "tokio/sync"] service = ["dep:tower-service"] http1 = ["hyper/http1"] http2 = ["hyper/http2"] tokio = ["dep:tokio", "tokio/rt", "tokio/time"] tracing = ["dep:tracing"] # internal features used in CI __internal_happy_eyeballs_tests = [] [[example]] name = "client" required-features = ["client-legacy", "http1", "tokio"] [[example]] name = "server" required-features = ["server", "http1", "tokio"] [[example]] name = "server_graceful" required-features = ["tokio", "server-graceful", "server-auto"] hyper-util-0.1.19/LICENSE000064400000000000000000000020461046102023000130250ustar 00000000000000Copyright (c) 2023-2025 Sean McArthur Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. hyper-util-0.1.19/README.md000064400000000000000000000006641046102023000133030ustar 00000000000000# hyper-util [![crates.io](https://img.shields.io/crates/v/hyper-util.svg)](https://crates.io/crates/hyper-util) [![Released API docs](https://docs.rs/hyper-util/badge.svg)](https://docs.rs/hyper-util) [![MIT licensed](https://img.shields.io/badge/license-MIT-blue.svg)](./LICENSE) A collection of utilities to do common things with [hyper](https://hyper.rs). ## License This project is licensed under the [MIT license](./LICENSE). hyper-util-0.1.19/examples/client.rs000064400000000000000000000020561046102023000154630ustar 00000000000000use std::env; use http_body_util::Empty; use hyper::Request; use hyper_util::client::legacy::{connect::HttpConnector, Client}; #[tokio::main(flavor = "current_thread")] async fn main() -> Result<(), Box> { let url = match env::args().nth(1) { Some(url) => url, None => { eprintln!("Usage: client "); return Ok(()); } }; // HTTPS requires picking a TLS implementation, so give a better // warning if the user tries to request an 'https' URL. let url = url.parse::()?; if url.scheme_str() != Some("http") { eprintln!("This example only works with 'http' URLs."); return Ok(()); } let client = Client::builder(hyper_util::rt::TokioExecutor::new()).build(HttpConnector::new()); let req = Request::builder() .uri(url) .body(Empty::::new())?; let resp = client.request(req).await?; eprintln!("{:?} {:?}", resp.version(), resp.status()); eprintln!("{:#?}", resp.headers()); Ok(()) } hyper-util-0.1.19/examples/server.rs000064400000000000000000000053421046102023000155140ustar 00000000000000//! This example runs a server that responds to any request with "Hello, world!" use std::{convert::Infallible, error::Error}; use bytes::Bytes; use http::{header::CONTENT_TYPE, Request, Response}; use http_body_util::{combinators::BoxBody, BodyExt, Full}; use hyper::{body::Incoming, service::service_fn}; use hyper_util::{ rt::{TokioExecutor, TokioIo}, server::conn::auto::Builder, }; use tokio::{net::TcpListener, task::JoinSet}; /// Function from an incoming request to an outgoing response /// /// This function gets turned into a [`hyper::service::Service`] later via /// [`service_fn`]. Instead of doing this, you could also write a type that /// implements [`hyper::service::Service`] directly and pass that in place of /// writing a function like this and calling [`service_fn`]. /// /// This function could use [`Full`] as the body type directly since that's /// the only type that can be returned in this case, but this uses [`BoxBody`] /// anyway for demonstration purposes, since this is what's usually used when /// writing a more complex webserver library. async fn handle_request( _request: Request, ) -> Result>, Infallible> { let response = Response::builder() .header(CONTENT_TYPE, "text/plain") .body(Full::new(Bytes::from("Hello, world!\n")).boxed()) .expect("values provided to the builder should be valid"); Ok(response) } #[tokio::main(flavor = "current_thread")] async fn main() -> Result<(), Box> { let listen_addr = "127.0.0.1:8000"; let tcp_listener = TcpListener::bind(listen_addr).await?; println!("listening on http://{listen_addr}"); let mut join_set = JoinSet::new(); loop { let (stream, addr) = match tcp_listener.accept().await { Ok(x) => x, Err(e) => { eprintln!("failed to accept connection: {e}"); continue; } }; let serve_connection = async move { println!("handling a request from {addr}"); let result = Builder::new(TokioExecutor::new()) .serve_connection(TokioIo::new(stream), service_fn(handle_request)) .await; if let Err(e) = result { eprintln!("error serving {addr}: {e}"); } println!("handled a request from {addr}"); }; join_set.spawn(serve_connection); } // If you add a method for breaking the above loop (i.e. graceful shutdown), // then you may also want to wait for all existing connections to finish // being served before terminating the program, which can be done like this: // // while let Some(_) = join_set.join_next().await {} } hyper-util-0.1.19/examples/server_graceful.rs000064400000000000000000000044141046102023000173630ustar 00000000000000use bytes::Bytes; use std::convert::Infallible; use std::pin::pin; use std::time::Duration; use tokio::net::TcpListener; #[tokio::main(flavor = "current_thread")] async fn main() -> Result<(), Box> { let listener = TcpListener::bind("127.0.0.1:8080").await?; let server = hyper_util::server::conn::auto::Builder::new(hyper_util::rt::TokioExecutor::new()); let graceful = hyper_util::server::graceful::GracefulShutdown::new(); let mut ctrl_c = pin!(tokio::signal::ctrl_c()); loop { tokio::select! { conn = listener.accept() => { let (stream, peer_addr) = match conn { Ok(conn) => conn, Err(e) => { eprintln!("accept error: {}", e); tokio::time::sleep(Duration::from_secs(1)).await; continue; } }; eprintln!("incomming connection accepted: {}", peer_addr); let stream = hyper_util::rt::TokioIo::new(Box::pin(stream)); let conn = server.serve_connection_with_upgrades(stream, hyper::service::service_fn(|_| async move { tokio::time::sleep(Duration::from_secs(5)).await; // emulate slow request let body = http_body_util::Full::::from("Hello World!".to_owned()); Ok::<_, Infallible>(http::Response::new(body)) })); let conn = graceful.watch(conn.into_owned()); tokio::spawn(async move { if let Err(err) = conn.await { eprintln!("connection error: {}", err); } eprintln!("connection dropped: {}", peer_addr); }); }, _ = ctrl_c.as_mut() => { drop(listener); eprintln!("Ctrl-C received, starting shutdown"); break; } } } tokio::select! { _ = graceful.shutdown() => { eprintln!("Gracefully shutdown!"); }, _ = tokio::time::sleep(Duration::from_secs(10)) => { eprintln!("Waited 10 seconds for graceful shutdown, aborting..."); } } Ok(()) } hyper-util-0.1.19/src/client/legacy/client.rs000064400000000000000000001722561046102023000171700ustar 00000000000000//! The legacy HTTP Client from 0.14.x //! //! This `Client` will eventually be deconstructed into more composable parts. //! For now, to enable people to use hyper 1.0 quicker, this `Client` exists //! in much the same way it did in hyper 0.14. use std::error::Error as StdError; use std::fmt; use std::future::Future; use std::pin::Pin; use std::task::{self, Poll}; use std::time::Duration; use futures_util::future::{self, Either, FutureExt, TryFutureExt}; use http::uri::Scheme; use hyper::client::conn::TrySendError as ConnTrySendError; use hyper::header::{HeaderValue, HOST}; use hyper::rt::Timer; use hyper::{body::Body, Method, Request, Response, Uri, Version}; use tracing::{debug, trace, warn}; use super::connect::capture::CaptureConnectionExtension; #[cfg(feature = "tokio")] use super::connect::HttpConnector; use super::connect::{Alpn, Connect, Connected, Connection}; use super::pool::{self, Ver}; use crate::common::future::poll_fn; use crate::common::{lazy as hyper_lazy, timer, Exec, Lazy, SyncWrapper}; type BoxSendFuture = Pin + Send>>; /// A Client to make outgoing HTTP requests. /// /// `Client` is cheap to clone and cloning is the recommended way to share a `Client`. The /// underlying connection pool will be reused. #[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] pub struct Client { config: Config, connector: C, exec: Exec, #[cfg(feature = "http1")] h1_builder: hyper::client::conn::http1::Builder, #[cfg(feature = "http2")] h2_builder: hyper::client::conn::http2::Builder, pool: pool::Pool, PoolKey>, } #[derive(Clone, Copy, Debug)] struct Config { retry_canceled_requests: bool, set_host: bool, ver: Ver, } /// Client errors pub struct Error { kind: ErrorKind, source: Option>, #[cfg(any(feature = "http1", feature = "http2"))] connect_info: Option, } #[derive(Debug)] enum ErrorKind { Canceled, ChannelClosed, Connect, UserUnsupportedRequestMethod, UserUnsupportedVersion, UserAbsoluteUriRequired, SendRequest, } macro_rules! e { ($kind:ident) => { Error { kind: ErrorKind::$kind, source: None, connect_info: None, } }; ($kind:ident, $src:expr) => { Error { kind: ErrorKind::$kind, source: Some($src.into()), connect_info: None, } }; } // We might change this... :shrug: type PoolKey = (http::uri::Scheme, http::uri::Authority); enum TrySendError { Retryable { error: Error, req: Request, connection_reused: bool, }, Nope(Error), } /// A `Future` that will resolve to an HTTP Response. /// /// This is returned by `Client::request` (and `Client::get`). #[must_use = "futures do nothing unless polled"] pub struct ResponseFuture { inner: SyncWrapper< Pin, Error>> + Send>>, >, } // ===== impl Client ===== impl Client<(), ()> { /// Create a builder to configure a new `Client`. /// /// # Example /// /// ``` /// # #[cfg(feature = "tokio")] /// # fn run () { /// use std::time::Duration; /// use hyper_util::client::legacy::Client; /// use hyper_util::rt::{TokioExecutor, TokioTimer}; /// /// let client = Client::builder(TokioExecutor::new()) /// .pool_timer(TokioTimer::new()) /// .pool_idle_timeout(Duration::from_secs(30)) /// .http2_only(true) /// .build_http(); /// # let infer: Client<_, http_body_util::Full> = client; /// # drop(infer); /// # } /// # fn main() {} /// ``` pub fn builder(executor: E) -> Builder where E: hyper::rt::Executor + Send + Sync + Clone + 'static, { Builder::new(executor) } } impl Client where C: Connect + Clone + Send + Sync + 'static, B: Body + Send + 'static + Unpin, B::Data: Send, B::Error: Into>, { /// Send a `GET` request to the supplied `Uri`. /// /// # Note /// /// This requires that the `Body` type have a `Default` implementation. /// It *should* return an "empty" version of itself, such that /// `Body::is_end_stream` is `true`. /// /// # Example /// /// ``` /// # #[cfg(feature = "tokio")] /// # fn run () { /// use hyper::Uri; /// use hyper_util::client::legacy::Client; /// use hyper_util::rt::TokioExecutor; /// use bytes::Bytes; /// use http_body_util::Full; /// /// let client: Client<_, Full> = Client::builder(TokioExecutor::new()).build_http(); /// /// let future = client.get(Uri::from_static("http://httpbin.org/ip")); /// # } /// # fn main() {} /// ``` pub fn get(&self, uri: Uri) -> ResponseFuture where B: Default, { let body = B::default(); if !body.is_end_stream() { warn!("default Body used for get() does not return true for is_end_stream"); } let mut req = Request::new(body); *req.uri_mut() = uri; self.request(req) } /// Send a constructed `Request` using this `Client`. /// /// # Example /// /// ``` /// # #[cfg(feature = "tokio")] /// # fn run () { /// use hyper::{Method, Request}; /// use hyper_util::client::legacy::Client; /// use http_body_util::Full; /// use hyper_util::rt::TokioExecutor; /// use bytes::Bytes; /// /// let client: Client<_, Full> = Client::builder(TokioExecutor::new()).build_http(); /// /// let req: Request> = Request::builder() /// .method(Method::POST) /// .uri("http://httpbin.org/post") /// .body(Full::from("Hallo!")) /// .expect("request builder"); /// /// let future = client.request(req); /// # } /// # fn main() {} /// ``` pub fn request(&self, mut req: Request) -> ResponseFuture { let is_http_connect = req.method() == Method::CONNECT; match req.version() { Version::HTTP_11 => (), Version::HTTP_10 => { if is_http_connect { warn!("CONNECT is not allowed for HTTP/1.0"); return ResponseFuture::new(future::err(e!(UserUnsupportedRequestMethod))); } } Version::HTTP_2 => (), // completely unsupported HTTP version (like HTTP/0.9)! other => return ResponseFuture::error_version(other), }; let pool_key = match extract_domain(req.uri_mut(), is_http_connect) { Ok(s) => s, Err(err) => { return ResponseFuture::new(future::err(err)); } }; ResponseFuture::new(self.clone().send_request(req, pool_key)) } async fn send_request( self, mut req: Request, pool_key: PoolKey, ) -> Result, Error> { let uri = req.uri().clone(); loop { req = match self.try_send_request(req, pool_key.clone()).await { Ok(resp) => return Ok(resp), Err(TrySendError::Nope(err)) => return Err(err), Err(TrySendError::Retryable { mut req, error, connection_reused, }) => { if !self.config.retry_canceled_requests || !connection_reused { // if client disabled, don't retry // a fresh connection means we definitely can't retry return Err(error); } trace!( "unstarted request canceled, trying again (reason={:?})", error ); *req.uri_mut() = uri.clone(); req } } } } async fn try_send_request( &self, mut req: Request, pool_key: PoolKey, ) -> Result, TrySendError> { let mut pooled = self .connection_for(pool_key) .await // `connection_for` already retries checkout errors, so if // it returns an error, there's not much else to retry .map_err(TrySendError::Nope)?; if let Some(conn) = req.extensions_mut().get_mut::() { conn.set(&pooled.conn_info); } if pooled.is_http1() { if req.version() == Version::HTTP_2 { warn!("Connection is HTTP/1, but request requires HTTP/2"); return Err(TrySendError::Nope( e!(UserUnsupportedVersion).with_connect_info(pooled.conn_info.clone()), )); } if self.config.set_host { let uri = req.uri().clone(); req.headers_mut().entry(HOST).or_insert_with(|| { let hostname = uri.host().expect("authority implies host"); if let Some(port) = get_non_default_port(&uri) { let s = format!("{hostname}:{port}"); HeaderValue::from_maybe_shared(bytes::Bytes::from(s)) } else { HeaderValue::from_str(hostname) } .expect("uri host is valid header value") }); } // CONNECT always sends authority-form, so check it first... if req.method() == Method::CONNECT { authority_form(req.uri_mut()); } else if pooled.conn_info.is_proxied { absolute_form(req.uri_mut()); } else { origin_form(req.uri_mut()); } } else if req.method() == Method::CONNECT && !pooled.is_http2() { authority_form(req.uri_mut()); } let mut res = match pooled.try_send_request(req).await { Ok(res) => res, Err(mut err) => { return if let Some(req) = err.take_message() { Err(TrySendError::Retryable { connection_reused: pooled.is_reused(), error: e!(Canceled, err.into_error()) .with_connect_info(pooled.conn_info.clone()), req, }) } else { Err(TrySendError::Nope( e!(SendRequest, err.into_error()) .with_connect_info(pooled.conn_info.clone()), )) } } }; // If the Connector included 'extra' info, add to Response... if let Some(extra) = &pooled.conn_info.extra { extra.set(res.extensions_mut()); } // If pooled is HTTP/2, we can toss this reference immediately. // // when pooled is dropped, it will try to insert back into the // pool. To delay that, spawn a future that completes once the // sender is ready again. // // This *should* only be once the related `Connection` has polled // for a new request to start. // // It won't be ready if there is a body to stream. if pooled.is_http2() || !pooled.is_pool_enabled() || pooled.is_ready() { drop(pooled); } else { let on_idle = poll_fn(move |cx| pooled.poll_ready(cx)).map(|_| ()); self.exec.execute(on_idle); } Ok(res) } async fn connection_for( &self, pool_key: PoolKey, ) -> Result, PoolKey>, Error> { loop { match self.one_connection_for(pool_key.clone()).await { Ok(pooled) => return Ok(pooled), Err(ClientConnectError::Normal(err)) => return Err(err), Err(ClientConnectError::CheckoutIsClosed(reason)) => { if !self.config.retry_canceled_requests { return Err(e!(Connect, reason)); } trace!( "unstarted request canceled, trying again (reason={:?})", reason, ); continue; } }; } } async fn one_connection_for( &self, pool_key: PoolKey, ) -> Result, PoolKey>, ClientConnectError> { // Return a single connection if pooling is not enabled if !self.pool.is_enabled() { return self .connect_to(pool_key) .await .map_err(ClientConnectError::Normal); } // This actually races 2 different futures to try to get a ready // connection the fastest, and to reduce connection churn. // // - If the pool has an idle connection waiting, that's used // immediately. // - Otherwise, the Connector is asked to start connecting to // the destination Uri. // - Meanwhile, the pool Checkout is watching to see if any other // request finishes and tries to insert an idle connection. // - If a new connection is started, but the Checkout wins after // (an idle connection became available first), the started // connection future is spawned into the runtime to complete, // and then be inserted into the pool as an idle connection. let checkout = self.pool.checkout(pool_key.clone()); let connect = self.connect_to(pool_key); let is_ver_h2 = self.config.ver == Ver::Http2; // The order of the `select` is depended on below... match future::select(checkout, connect).await { // Checkout won, connect future may have been started or not. // // If it has, let it finish and insert back into the pool, // so as to not waste the socket... Either::Left((Ok(checked_out), connecting)) => { // This depends on the `select` above having the correct // order, such that if the checkout future were ready // immediately, the connect future will never have been // started. // // If it *wasn't* ready yet, then the connect future will // have been started... if connecting.started() { let bg = connecting .map_err(|err| { trace!("background connect error: {}", err); }) .map(|_pooled| { // dropping here should just place it in // the Pool for us... }); // An execute error here isn't important, we're just trying // to prevent a waste of a socket... self.exec.execute(bg); } Ok(checked_out) } // Connect won, checkout can just be dropped. Either::Right((Ok(connected), _checkout)) => Ok(connected), // Either checkout or connect could get canceled: // // 1. Connect is canceled if this is HTTP/2 and there is // an outstanding HTTP/2 connecting task. // 2. Checkout is canceled if the pool cannot deliver an // idle connection reliably. // // In both cases, we should just wait for the other future. Either::Left((Err(err), connecting)) => { if err.is_canceled() { connecting.await.map_err(ClientConnectError::Normal) } else { Err(ClientConnectError::Normal(e!(Connect, err))) } } Either::Right((Err(err), checkout)) => { if err.is_canceled() { checkout.await.map_err(move |err| { if is_ver_h2 && err.is_canceled() { ClientConnectError::CheckoutIsClosed(err) } else { ClientConnectError::Normal(e!(Connect, err)) } }) } else { Err(ClientConnectError::Normal(err)) } } } } #[cfg(any(feature = "http1", feature = "http2"))] fn connect_to( &self, pool_key: PoolKey, ) -> impl Lazy, PoolKey>, Error>> + Send + Unpin { let executor = self.exec.clone(); let pool = self.pool.clone(); #[cfg(feature = "http1")] let h1_builder = self.h1_builder.clone(); #[cfg(feature = "http2")] let h2_builder = self.h2_builder.clone(); let ver = self.config.ver; let is_ver_h2 = ver == Ver::Http2; let connector = self.connector.clone(); let dst = domain_as_uri(pool_key.clone()); hyper_lazy(move || { // Try to take a "connecting lock". // // If the pool_key is for HTTP/2, and there is already a // connection being established, then this can't take a // second lock. The "connect_to" future is Canceled. let connecting = match pool.connecting(&pool_key, ver) { Some(lock) => lock, None => { let canceled = e!(Canceled); // TODO //crate::Error::new_canceled().with("HTTP/2 connection in progress"); return Either::Right(future::err(canceled)); } }; Either::Left( connector .connect(super::connect::sealed::Internal, dst) .map_err(|src| e!(Connect, src)) .and_then(move |io| { let connected = io.connected(); // If ALPN is h2 and we aren't http2_only already, // then we need to convert our pool checkout into // a single HTTP2 one. let connecting = if connected.alpn == Alpn::H2 && !is_ver_h2 { match connecting.alpn_h2(&pool) { Some(lock) => { trace!("ALPN negotiated h2, updating pool"); lock } None => { // Another connection has already upgraded, // the pool checkout should finish up for us. let canceled = e!(Canceled, "ALPN upgraded to HTTP/2"); return Either::Right(future::err(canceled)); } } } else { connecting }; #[cfg_attr(not(feature = "http2"), allow(unused))] let is_h2 = is_ver_h2 || connected.alpn == Alpn::H2; Either::Left(Box::pin(async move { let tx = if is_h2 { #[cfg(feature = "http2")] { let (mut tx, conn) = h2_builder.handshake(io).await.map_err(Error::tx)?; trace!( "http2 handshake complete, spawning background dispatcher task" ); executor.execute( conn.map_err(|e| debug!("client connection error: {}", e)) .map(|_| ()), ); // Wait for 'conn' to ready up before we // declare this tx as usable tx.ready().await.map_err(Error::tx)?; PoolTx::Http2(tx) } #[cfg(not(feature = "http2"))] panic!("http2 feature is not enabled"); } else { #[cfg(feature = "http1")] { // Perform the HTTP/1.1 handshake on the provided I/O stream. // Uses the h1_builder to establish a connection, returning a sender (tx) for requests // and a connection task (conn) that manages the connection lifecycle. let (mut tx, conn) = h1_builder.handshake(io).await.map_err(crate::client::legacy::client::Error::tx)?; // Log that the HTTP/1.1 handshake has completed successfully. // This indicates the connection is established and ready for request processing. trace!( "http1 handshake complete, spawning background dispatcher task" ); // Create a oneshot channel to communicate errors from the connection task. // err_tx sends errors from the connection task, and err_rx receives them // to correlate connection failures with request readiness errors. let (err_tx, err_rx) = tokio::sync::oneshot::channel(); // Spawn the connection task in the background using the executor. // The task manages the HTTP/1.1 connection, including upgrades (e.g., WebSocket). // Errors are sent via err_tx to ensure they can be checked if the sender (tx) fails. executor.execute( conn.with_upgrades() .map_err(|e| { // Log the connection error at debug level for diagnostic purposes. debug!("client connection error: {:?}", e); // Log that the error is being sent to the error channel. trace!("sending connection error to error channel"); // Send the error via the oneshot channel, ignoring send failures // (e.g., if the receiver is dropped, which is handled later). let _ =err_tx.send(e); }) .map(|_| ()), ); // Log that the client is waiting for the connection to be ready. // Readiness indicates the sender (tx) can accept a request without blocking. trace!("waiting for connection to be ready"); // Check if the sender is ready to accept a request. // This ensures the connection is fully established before proceeding. // aka: // Wait for 'conn' to ready up before we // declare this tx as usable match tx.ready().await { // If ready, the connection is usable for sending requests. Ok(_) => { // Log that the connection is ready for use. trace!("connection is ready"); // Drop the error receiver, as it’s no longer needed since the sender is ready. // This prevents waiting for errors that won’t occur in a successful case. drop(err_rx); // Wrap the sender in PoolTx::Http1 for use in the connection pool. PoolTx::Http1(tx) } // If the sender fails with a closed channel error, check for a specific connection error. // This distinguishes between a vague ChannelClosed error and an actual connection failure. Err(e) if e.is_closed() => { // Log that the channel is closed, indicating a potential connection issue. trace!("connection channel closed, checking for connection error"); // Check the oneshot channel for a specific error from the connection task. match err_rx.await { // If an error was received, it’s a specific connection failure. Ok(err) => { // Log the specific connection error for diagnostics. trace!("received connection error: {:?}", err); // Return the error wrapped in Error::tx to propagate it. return Err(crate::client::legacy::client::Error::tx(err)); } // If the error channel is closed, no specific error was sent. // Fall back to the vague ChannelClosed error. Err(_) => { // Log that the error channel is closed, indicating no specific error. trace!("error channel closed, returning the vague ChannelClosed error"); // Return the original error wrapped in Error::tx. return Err(crate::client::legacy::client::Error::tx(e)); } } } // For other errors (e.g., timeout, I/O issues), propagate them directly. // These are not ChannelClosed errors and don’t require error channel checks. Err(e) => { // Log the specific readiness failure for diagnostics. trace!("connection readiness failed: {:?}", e); // Return the error wrapped in Error::tx to propagate it. return Err(crate::client::legacy::client::Error::tx(e)); } } } #[cfg(not(feature = "http1"))] { panic!("http1 feature is not enabled"); } }; Ok(pool.pooled( connecting, PoolClient { conn_info: connected, tx, }, )) })) }), ) }) } } impl tower_service::Service> for Client where C: Connect + Clone + Send + Sync + 'static, B: Body + Send + 'static + Unpin, B::Data: Send, B::Error: Into>, { type Response = Response; type Error = Error; type Future = ResponseFuture; fn poll_ready(&mut self, _: &mut task::Context<'_>) -> Poll> { Poll::Ready(Ok(())) } fn call(&mut self, req: Request) -> Self::Future { self.request(req) } } impl tower_service::Service> for &'_ Client where C: Connect + Clone + Send + Sync + 'static, B: Body + Send + 'static + Unpin, B::Data: Send, B::Error: Into>, { type Response = Response; type Error = Error; type Future = ResponseFuture; fn poll_ready(&mut self, _: &mut task::Context<'_>) -> Poll> { Poll::Ready(Ok(())) } fn call(&mut self, req: Request) -> Self::Future { self.request(req) } } impl Clone for Client { fn clone(&self) -> Client { Client { config: self.config, exec: self.exec.clone(), #[cfg(feature = "http1")] h1_builder: self.h1_builder.clone(), #[cfg(feature = "http2")] h2_builder: self.h2_builder.clone(), connector: self.connector.clone(), pool: self.pool.clone(), } } } impl fmt::Debug for Client { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Client").finish() } } // ===== impl ResponseFuture ===== impl ResponseFuture { fn new(value: F) -> Self where F: Future, Error>> + Send + 'static, { Self { inner: SyncWrapper::new(Box::pin(value)), } } fn error_version(ver: Version) -> Self { warn!("Request has unsupported version \"{:?}\"", ver); ResponseFuture::new(Box::pin(future::err(e!(UserUnsupportedVersion)))) } } impl fmt::Debug for ResponseFuture { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.pad("Future") } } impl Future for ResponseFuture { type Output = Result, Error>; fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { self.inner.get_mut().as_mut().poll(cx) } } // ===== impl PoolClient ===== // FIXME: allow() required due to `impl Trait` leaking types to this lint #[allow(missing_debug_implementations)] struct PoolClient { conn_info: Connected, tx: PoolTx, } enum PoolTx { #[cfg(feature = "http1")] Http1(hyper::client::conn::http1::SendRequest), #[cfg(feature = "http2")] Http2(hyper::client::conn::http2::SendRequest), } impl PoolClient { fn poll_ready( &mut self, #[allow(unused_variables)] cx: &mut task::Context<'_>, ) -> Poll> { match self.tx { #[cfg(feature = "http1")] PoolTx::Http1(ref mut tx) => tx.poll_ready(cx).map_err(Error::closed), #[cfg(feature = "http2")] PoolTx::Http2(_) => Poll::Ready(Ok(())), } } fn is_http1(&self) -> bool { !self.is_http2() } fn is_http2(&self) -> bool { match self.tx { #[cfg(feature = "http1")] PoolTx::Http1(_) => false, #[cfg(feature = "http2")] PoolTx::Http2(_) => true, } } fn is_poisoned(&self) -> bool { self.conn_info.poisoned.poisoned() } fn is_ready(&self) -> bool { match self.tx { #[cfg(feature = "http1")] PoolTx::Http1(ref tx) => tx.is_ready(), #[cfg(feature = "http2")] PoolTx::Http2(ref tx) => tx.is_ready(), } } } impl PoolClient { fn try_send_request( &mut self, req: Request, ) -> impl Future, ConnTrySendError>>> where B: Send, { #[cfg(all(feature = "http1", feature = "http2"))] return match self.tx { #[cfg(feature = "http1")] PoolTx::Http1(ref mut tx) => Either::Left(tx.try_send_request(req)), #[cfg(feature = "http2")] PoolTx::Http2(ref mut tx) => Either::Right(tx.try_send_request(req)), }; #[cfg(feature = "http1")] #[cfg(not(feature = "http2"))] return match self.tx { #[cfg(feature = "http1")] PoolTx::Http1(ref mut tx) => tx.try_send_request(req), }; #[cfg(not(feature = "http1"))] #[cfg(feature = "http2")] return match self.tx { #[cfg(feature = "http2")] PoolTx::Http2(ref mut tx) => tx.try_send_request(req), }; } } impl pool::Poolable for PoolClient where B: Send + 'static, { fn is_open(&self) -> bool { !self.is_poisoned() && self.is_ready() } fn reserve(self) -> pool::Reservation { match self.tx { #[cfg(feature = "http1")] PoolTx::Http1(tx) => pool::Reservation::Unique(PoolClient { conn_info: self.conn_info, tx: PoolTx::Http1(tx), }), #[cfg(feature = "http2")] PoolTx::Http2(tx) => { let b = PoolClient { conn_info: self.conn_info.clone(), tx: PoolTx::Http2(tx.clone()), }; let a = PoolClient { conn_info: self.conn_info, tx: PoolTx::Http2(tx), }; pool::Reservation::Shared(a, b) } } } fn can_share(&self) -> bool { self.is_http2() } } enum ClientConnectError { Normal(Error), CheckoutIsClosed(pool::Error), } fn origin_form(uri: &mut Uri) { let path = match uri.path_and_query() { Some(path) if path.as_str() != "/" => { let mut parts = ::http::uri::Parts::default(); parts.path_and_query = Some(path.clone()); Uri::from_parts(parts).expect("path is valid uri") } _none_or_just_slash => { debug_assert!(Uri::default() == "/"); Uri::default() } }; *uri = path } fn absolute_form(uri: &mut Uri) { debug_assert!(uri.scheme().is_some(), "absolute_form needs a scheme"); debug_assert!( uri.authority().is_some(), "absolute_form needs an authority" ); } fn authority_form(uri: &mut Uri) { if let Some(path) = uri.path_and_query() { // `https://hyper.rs` would parse with `/` path, don't // annoy people about that... if path != "/" { warn!("HTTP/1.1 CONNECT request stripping path: {:?}", path); } } *uri = match uri.authority() { Some(auth) => { let mut parts = ::http::uri::Parts::default(); parts.authority = Some(auth.clone()); Uri::from_parts(parts).expect("authority is valid") } None => { unreachable!("authority_form with relative uri"); } }; } fn extract_domain(uri: &mut Uri, is_http_connect: bool) -> Result { let uri_clone = uri.clone(); match (uri_clone.scheme(), uri_clone.authority()) { (Some(scheme), Some(auth)) => Ok((scheme.clone(), auth.clone())), (None, Some(auth)) if is_http_connect => { let scheme = match auth.port_u16() { Some(443) => { set_scheme(uri, Scheme::HTTPS); Scheme::HTTPS } _ => { set_scheme(uri, Scheme::HTTP); Scheme::HTTP } }; Ok((scheme, auth.clone())) } _ => { debug!("Client requires absolute-form URIs, received: {:?}", uri); Err(e!(UserAbsoluteUriRequired)) } } } fn domain_as_uri((scheme, auth): PoolKey) -> Uri { http::uri::Builder::new() .scheme(scheme) .authority(auth) .path_and_query("/") .build() .expect("domain is valid Uri") } fn set_scheme(uri: &mut Uri, scheme: Scheme) { debug_assert!( uri.scheme().is_none(), "set_scheme expects no existing scheme" ); let old = std::mem::take(uri); let mut parts: ::http::uri::Parts = old.into(); parts.scheme = Some(scheme); parts.path_and_query = Some("/".parse().expect("slash is a valid path")); *uri = Uri::from_parts(parts).expect("scheme is valid"); } fn get_non_default_port(uri: &Uri) -> Option> { match (uri.port().map(|p| p.as_u16()), is_schema_secure(uri)) { (Some(443), true) => None, (Some(80), false) => None, _ => uri.port(), } } fn is_schema_secure(uri: &Uri) -> bool { uri.scheme_str() .map(|scheme_str| matches!(scheme_str, "wss" | "https")) .unwrap_or_default() } /// A builder to configure a new [`Client`](Client). /// /// # Example /// /// ``` /// # #[cfg(feature = "tokio")] /// # fn run () { /// use std::time::Duration; /// use hyper_util::client::legacy::Client; /// use hyper_util::rt::TokioExecutor; /// /// let client = Client::builder(TokioExecutor::new()) /// .pool_idle_timeout(Duration::from_secs(30)) /// .http2_only(true) /// .build_http(); /// # let infer: Client<_, http_body_util::Full> = client; /// # drop(infer); /// # } /// # fn main() {} /// ``` #[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] #[derive(Clone)] pub struct Builder { client_config: Config, exec: Exec, #[cfg(feature = "http1")] h1_builder: hyper::client::conn::http1::Builder, #[cfg(feature = "http2")] h2_builder: hyper::client::conn::http2::Builder, pool_config: pool::Config, pool_timer: Option, } impl Builder { /// Construct a new Builder. pub fn new(executor: E) -> Self where E: hyper::rt::Executor + Send + Sync + Clone + 'static, { let exec = Exec::new(executor); Self { client_config: Config { retry_canceled_requests: true, set_host: true, ver: Ver::Auto, }, exec: exec.clone(), #[cfg(feature = "http1")] h1_builder: hyper::client::conn::http1::Builder::new(), #[cfg(feature = "http2")] h2_builder: hyper::client::conn::http2::Builder::new(exec), pool_config: pool::Config { idle_timeout: Some(Duration::from_secs(90)), max_idle_per_host: usize::MAX, }, pool_timer: None, } } /// Set an optional timeout for idle sockets being kept-alive. /// A `Timer` is required for this to take effect. See `Builder::pool_timer` /// /// Pass `None` to disable timeout. /// /// Default is 90 seconds. /// /// # Example /// /// ``` /// # #[cfg(feature = "tokio")] /// # fn run () { /// use std::time::Duration; /// use hyper_util::client::legacy::Client; /// use hyper_util::rt::{TokioExecutor, TokioTimer}; /// /// let client = Client::builder(TokioExecutor::new()) /// .pool_idle_timeout(Duration::from_secs(30)) /// .pool_timer(TokioTimer::new()) /// .build_http(); /// /// # let infer: Client<_, http_body_util::Full> = client; /// # } /// # fn main() {} /// ``` pub fn pool_idle_timeout(&mut self, val: D) -> &mut Self where D: Into>, { self.pool_config.idle_timeout = val.into(); self } #[doc(hidden)] #[deprecated(note = "renamed to `pool_max_idle_per_host`")] pub fn max_idle_per_host(&mut self, max_idle: usize) -> &mut Self { self.pool_config.max_idle_per_host = max_idle; self } /// Sets the maximum idle connection per host allowed in the pool. /// /// Default is `usize::MAX` (no limit). pub fn pool_max_idle_per_host(&mut self, max_idle: usize) -> &mut Self { self.pool_config.max_idle_per_host = max_idle; self } // HTTP/1 options /// Sets the exact size of the read buffer to *always* use. /// /// Note that setting this option unsets the `http1_max_buf_size` option. /// /// Default is an adaptive read buffer. #[cfg(feature = "http1")] #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] pub fn http1_read_buf_exact_size(&mut self, sz: usize) -> &mut Self { self.h1_builder.read_buf_exact_size(Some(sz)); self } /// Set the maximum buffer size for the connection. /// /// Default is ~400kb. /// /// Note that setting this option unsets the `http1_read_exact_buf_size` option. /// /// # Panics /// /// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum. #[cfg(feature = "http1")] #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] pub fn http1_max_buf_size(&mut self, max: usize) -> &mut Self { self.h1_builder.max_buf_size(max); self } /// Set whether HTTP/1 connections will accept spaces between header names /// and the colon that follow them in responses. /// /// Newline codepoints (`\r` and `\n`) will be transformed to spaces when /// parsing. /// /// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has /// to say about it: /// /// > No whitespace is allowed between the header field-name and colon. In /// > the past, differences in the handling of such whitespace have led to /// > security vulnerabilities in request routing and response handling. A /// > server MUST reject any received request message that contains /// > whitespace between a header field-name and colon with a response code /// > of 400 (Bad Request). A proxy MUST remove any such whitespace from a /// > response message before forwarding the message downstream. /// /// Note that this setting does not affect HTTP/2. /// /// Default is false. /// /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4 #[cfg(feature = "http1")] #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] pub fn http1_allow_spaces_after_header_name_in_responses(&mut self, val: bool) -> &mut Self { self.h1_builder .allow_spaces_after_header_name_in_responses(val); self } /// Set whether HTTP/1 connections will accept obsolete line folding for /// header values. /// /// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has /// to say about it: /// /// > A server that receives an obs-fold in a request message that is not /// > within a message/http container MUST either reject the message by /// > sending a 400 (Bad Request), preferably with a representation /// > explaining that obsolete line folding is unacceptable, or replace /// > each received obs-fold with one or more SP octets prior to /// > interpreting the field value or forwarding the message downstream. /// /// > A proxy or gateway that receives an obs-fold in a response message /// > that is not within a message/http container MUST either discard the /// > message and replace it with a 502 (Bad Gateway) response, preferably /// > with a representation explaining that unacceptable line folding was /// > received, or replace each received obs-fold with one or more SP /// > octets prior to interpreting the field value or forwarding the /// > message downstream. /// /// > A user agent that receives an obs-fold in a response message that is /// > not within a message/http container MUST replace each received /// > obs-fold with one or more SP octets prior to interpreting the field /// > value. /// /// Note that this setting does not affect HTTP/2. /// /// Default is false. /// /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4 #[cfg(feature = "http1")] #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] pub fn http1_allow_obsolete_multiline_headers_in_responses(&mut self, val: bool) -> &mut Self { self.h1_builder .allow_obsolete_multiline_headers_in_responses(val); self } /// Sets whether invalid header lines should be silently ignored in HTTP/1 responses. /// /// This mimics the behaviour of major browsers. You probably don't want this. /// You should only want this if you are implementing a proxy whose main /// purpose is to sit in front of browsers whose users access arbitrary content /// which may be malformed, and they expect everything that works without /// the proxy to keep working with the proxy. /// /// This option will prevent Hyper's client from returning an error encountered /// when parsing a header, except if the error was caused by the character NUL /// (ASCII code 0), as Chrome specifically always reject those. /// /// The ignorable errors are: /// * empty header names; /// * characters that are not allowed in header names, except for `\0` and `\r`; /// * when `allow_spaces_after_header_name_in_responses` is not enabled, /// spaces and tabs between the header name and the colon; /// * missing colon between header name and colon; /// * characters that are not allowed in header values except for `\0` and `\r`. /// /// If an ignorable error is encountered, the parser tries to find the next /// line in the input to resume parsing the rest of the headers. An error /// will be emitted nonetheless if it finds `\0` or a lone `\r` while /// looking for the next line. #[cfg(feature = "http1")] #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] pub fn http1_ignore_invalid_headers_in_responses(&mut self, val: bool) -> &mut Builder { self.h1_builder.ignore_invalid_headers_in_responses(val); self } /// Set whether HTTP/1 connections should try to use vectored writes, /// or always flatten into a single buffer. /// /// Note that setting this to false may mean more copies of body data, /// but may also improve performance when an IO transport doesn't /// support vectored writes well, such as most TLS implementations. /// /// Setting this to true will force hyper to use queued strategy /// which may eliminate unnecessary cloning on some TLS backends /// /// Default is `auto`. In this mode hyper will try to guess which /// mode to use #[cfg(feature = "http1")] #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] pub fn http1_writev(&mut self, enabled: bool) -> &mut Builder { self.h1_builder.writev(enabled); self } /// Set whether HTTP/1 connections will write header names as title case at /// the socket level. /// /// Note that this setting does not affect HTTP/2. /// /// Default is false. #[cfg(feature = "http1")] #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] pub fn http1_title_case_headers(&mut self, val: bool) -> &mut Self { self.h1_builder.title_case_headers(val); self } /// Set whether to support preserving original header cases. /// /// Currently, this will record the original cases received, and store them /// in a private extension on the `Response`. It will also look for and use /// such an extension in any provided `Request`. /// /// Since the relevant extension is still private, there is no way to /// interact with the original cases. The only effect this can have now is /// to forward the cases in a proxy-like fashion. /// /// Note that this setting does not affect HTTP/2. /// /// Default is false. #[cfg(feature = "http1")] #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] pub fn http1_preserve_header_case(&mut self, val: bool) -> &mut Self { self.h1_builder.preserve_header_case(val); self } /// Set the maximum number of headers. /// /// When a response is received, the parser will reserve a buffer to store headers for optimal /// performance. /// /// If client receives more headers than the buffer size, the error "message header too large" /// is returned. /// /// The headers is allocated on the stack by default, which has higher performance. After /// setting this value, headers will be allocated in heap memory, that is, heap memory /// allocation will occur for each response, and there will be a performance drop of about 5%. /// /// Note that this setting does not affect HTTP/2. /// /// Default is 100. #[cfg(feature = "http1")] #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] pub fn http1_max_headers(&mut self, val: usize) -> &mut Self { self.h1_builder.max_headers(val); self } /// Set whether HTTP/0.9 responses should be tolerated. /// /// Default is false. #[cfg(feature = "http1")] #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] pub fn http09_responses(&mut self, val: bool) -> &mut Self { self.h1_builder.http09_responses(val); self } /// Set whether the connection **must** use HTTP/2. /// /// The destination must either allow HTTP2 Prior Knowledge, or the /// `Connect` should be configured to do use ALPN to upgrade to `h2` /// as part of the connection process. This will not make the `Client` /// utilize ALPN by itself. /// /// Note that setting this to true prevents HTTP/1 from being allowed. /// /// Default is false. #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_only(&mut self, val: bool) -> &mut Self { self.client_config.ver = if val { Ver::Http2 } else { Ver::Auto }; self } /// Configures the maximum number of pending reset streams allowed before a GOAWAY will be sent. /// /// This will default to the default value set by the [`h2` crate](https://crates.io/crates/h2). /// As of v0.4.0, it is 20. /// /// See for more information. #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_max_pending_accept_reset_streams( &mut self, max: impl Into>, ) -> &mut Self { self.h2_builder.max_pending_accept_reset_streams(max.into()); self } /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2 /// stream-level flow control. /// /// Passing `None` will do nothing. /// /// If not set, hyper will use a default. /// /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_initial_stream_window_size(&mut self, sz: impl Into>) -> &mut Self { self.h2_builder.initial_stream_window_size(sz.into()); self } /// Sets the max connection-level flow control for HTTP2 /// /// Passing `None` will do nothing. /// /// If not set, hyper will use a default. #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_initial_connection_window_size( &mut self, sz: impl Into>, ) -> &mut Self { self.h2_builder.initial_connection_window_size(sz.into()); self } /// Sets the initial maximum of locally initiated (send) streams. /// /// This value will be overwritten by the value included in the initial /// SETTINGS frame received from the peer as part of a [connection preface]. /// /// Passing `None` will do nothing. /// /// If not set, hyper will use a default. /// /// [connection preface]: https://httpwg.org/specs/rfc9113.html#preface #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_initial_max_send_streams( &mut self, initial: impl Into>, ) -> &mut Self { self.h2_builder.initial_max_send_streams(initial); self } /// Sets whether to use an adaptive flow control. /// /// Enabling this will override the limits set in /// `http2_initial_stream_window_size` and /// `http2_initial_connection_window_size`. #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_adaptive_window(&mut self, enabled: bool) -> &mut Self { self.h2_builder.adaptive_window(enabled); self } /// Sets the maximum frame size to use for HTTP2. /// /// Passing `None` will do nothing. /// /// If not set, hyper will use a default. #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_max_frame_size(&mut self, sz: impl Into>) -> &mut Self { self.h2_builder.max_frame_size(sz); self } /// Sets the max size of received header frames for HTTP2. /// /// Default is currently 16KB, but can change. #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_max_header_list_size(&mut self, max: u32) -> &mut Self { self.h2_builder.max_header_list_size(max); self } /// Sets an interval for HTTP2 Ping frames should be sent to keep a /// connection alive. /// /// Pass `None` to disable HTTP2 keep-alive. /// /// Default is currently disabled. /// /// # Cargo Feature /// /// Requires the `tokio` cargo feature to be enabled. #[cfg(feature = "tokio")] #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_keep_alive_interval( &mut self, interval: impl Into>, ) -> &mut Self { self.h2_builder.keep_alive_interval(interval); self } /// Sets a timeout for receiving an acknowledgement of the keep-alive ping. /// /// If the ping is not acknowledged within the timeout, the connection will /// be closed. Does nothing if `http2_keep_alive_interval` is disabled. /// /// Default is 20 seconds. /// /// # Cargo Feature /// /// Requires the `tokio` cargo feature to be enabled. #[cfg(feature = "tokio")] #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self { self.h2_builder.keep_alive_timeout(timeout); self } /// Sets whether HTTP2 keep-alive should apply while the connection is idle. /// /// If disabled, keep-alive pings are only sent while there are open /// request/responses streams. If enabled, pings are also sent when no /// streams are active. Does nothing if `http2_keep_alive_interval` is /// disabled. /// /// Default is `false`. /// /// # Cargo Feature /// /// Requires the `tokio` cargo feature to be enabled. #[cfg(feature = "tokio")] #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_keep_alive_while_idle(&mut self, enabled: bool) -> &mut Self { self.h2_builder.keep_alive_while_idle(enabled); self } /// Sets the maximum number of HTTP2 concurrent locally reset streams. /// /// See the documentation of [`h2::client::Builder::max_concurrent_reset_streams`] for more /// details. /// /// The default value is determined by the `h2` crate. /// /// [`h2::client::Builder::max_concurrent_reset_streams`]: https://docs.rs/h2/client/struct.Builder.html#method.max_concurrent_reset_streams #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_max_concurrent_reset_streams(&mut self, max: usize) -> &mut Self { self.h2_builder.max_concurrent_reset_streams(max); self } /// Provide a timer to be used for h2 /// /// See the documentation of [`h2::client::Builder::timer`] for more /// details. /// /// [`h2::client::Builder::timer`]: https://docs.rs/h2/client/struct.Builder.html#method.timer pub fn timer(&mut self, timer: M) -> &mut Self where M: Timer + Send + Sync + 'static, { #[cfg(feature = "http2")] self.h2_builder.timer(timer); self } /// Provide a timer to be used for timeouts and intervals in connection pools. pub fn pool_timer(&mut self, timer: M) -> &mut Self where M: Timer + Clone + Send + Sync + 'static, { self.pool_timer = Some(timer::Timer::new(timer.clone())); self } /// Set the maximum write buffer size for each HTTP/2 stream. /// /// Default is currently 1MB, but may change. /// /// # Panics /// /// The value must be no larger than `u32::MAX`. #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_max_send_buf_size(&mut self, max: usize) -> &mut Self { self.h2_builder.max_send_buf_size(max); self } /// Set whether to retry requests that get disrupted before ever starting /// to write. /// /// This means a request that is queued, and gets given an idle, reused /// connection, and then encounters an error immediately as the idle /// connection was found to be unusable. /// /// When this is set to `false`, the related `ResponseFuture` would instead /// resolve to an `Error::Cancel`. /// /// Default is `true`. #[inline] pub fn retry_canceled_requests(&mut self, val: bool) -> &mut Self { self.client_config.retry_canceled_requests = val; self } /// Set whether to automatically add the `Host` header to requests. /// /// If true, and a request does not include a `Host` header, one will be /// added automatically, derived from the authority of the `Uri`. /// /// Default is `true`. #[inline] pub fn set_host(&mut self, val: bool) -> &mut Self { self.client_config.set_host = val; self } /// Build a client with this configuration and the default `HttpConnector`. #[cfg(feature = "tokio")] pub fn build_http(&self) -> Client where B: Body + Send, B::Data: Send, { let mut connector = HttpConnector::new(); if self.pool_config.is_enabled() { connector.set_keepalive(self.pool_config.idle_timeout); } self.build(connector) } /// Combine the configuration of this builder with a connector to create a `Client`. pub fn build(&self, connector: C) -> Client where C: Connect + Clone, B: Body + Send, B::Data: Send, { let exec = self.exec.clone(); let timer = self.pool_timer.clone(); Client { config: self.client_config, exec: exec.clone(), #[cfg(feature = "http1")] h1_builder: self.h1_builder.clone(), #[cfg(feature = "http2")] h2_builder: self.h2_builder.clone(), connector, pool: pool::Pool::new(self.pool_config, exec, timer), } } } impl fmt::Debug for Builder { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Builder") .field("client_config", &self.client_config) .field("pool_config", &self.pool_config) .finish() } } // ==== impl Error ==== impl fmt::Debug for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut f = f.debug_tuple("hyper_util::client::legacy::Error"); f.field(&self.kind); if let Some(ref cause) = self.source { f.field(cause); } f.finish() } } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "client error ({:?})", self.kind) } } impl StdError for Error { fn source(&self) -> Option<&(dyn StdError + 'static)> { self.source.as_ref().map(|e| &**e as _) } } impl Error { /// Returns true if this was an error from `Connect`. pub fn is_connect(&self) -> bool { matches!(self.kind, ErrorKind::Connect) } /// Returns the info of the client connection on which this error occurred. #[cfg(any(feature = "http1", feature = "http2"))] pub fn connect_info(&self) -> Option<&Connected> { self.connect_info.as_ref() } #[cfg(any(feature = "http1", feature = "http2"))] fn with_connect_info(self, connect_info: Connected) -> Self { Self { connect_info: Some(connect_info), ..self } } fn is_canceled(&self) -> bool { matches!(self.kind, ErrorKind::Canceled) } fn tx(src: hyper::Error) -> Self { e!(SendRequest, src) } fn closed(src: hyper::Error) -> Self { e!(ChannelClosed, src) } } hyper-util-0.1.19/src/client/legacy/connect/capture.rs000064400000000000000000000140411046102023000207710ustar 00000000000000use std::{ops::Deref, sync::Arc}; use http::Request; use tokio::sync::watch; use super::Connected; /// [`CaptureConnection`] allows callers to capture [`Connected`] information /// /// To capture a connection for a request, use [`capture_connection`]. #[derive(Debug, Clone)] pub struct CaptureConnection { rx: watch::Receiver>, } /// Capture the connection for a given request /// /// When making a request with Hyper, the underlying connection must implement the [`Connection`] trait. /// [`capture_connection`] allows a caller to capture the returned [`Connected`] structure as soon /// as the connection is established. /// /// [`Connection`]: crate::client::legacy::connect::Connection /// /// *Note*: If establishing a connection fails, [`CaptureConnection::connection_metadata`] will always return none. /// /// # Examples /// /// **Synchronous access**: /// The [`CaptureConnection::connection_metadata`] method allows callers to check if a connection has been /// established. This is ideal for situations where you are certain the connection has already /// been established (e.g. after the response future has already completed). /// ```rust /// use hyper_util::client::legacy::connect::capture_connection; /// let mut request = http::Request::builder() /// .uri("http://foo.com") /// .body(()) /// .unwrap(); /// /// let captured_connection = capture_connection(&mut request); /// // some time later after the request has been sent... /// let connection_info = captured_connection.connection_metadata(); /// println!("we are connected! {:?}", connection_info.as_ref()); /// ``` /// /// **Asynchronous access**: /// The [`CaptureConnection::wait_for_connection_metadata`] method returns a future resolves as soon as the /// connection is available. /// /// ```rust /// # #[cfg(feature = "tokio")] /// # async fn example() { /// use hyper_util::client::legacy::connect::capture_connection; /// use hyper_util::client::legacy::Client; /// use hyper_util::rt::TokioExecutor; /// use bytes::Bytes; /// use http_body_util::Empty; /// let mut request = http::Request::builder() /// .uri("http://foo.com") /// .body(Empty::::new()) /// .unwrap(); /// /// let mut captured = capture_connection(&mut request); /// tokio::task::spawn(async move { /// let connection_info = captured.wait_for_connection_metadata().await; /// println!("we are connected! {:?}", connection_info.as_ref()); /// }); /// /// let client = Client::builder(TokioExecutor::new()).build_http(); /// client.request(request).await.expect("request failed"); /// # } /// ``` pub fn capture_connection(request: &mut Request) -> CaptureConnection { let (tx, rx) = CaptureConnection::new(); request.extensions_mut().insert(tx); rx } /// TxSide for [`CaptureConnection`] /// /// This is inserted into `Extensions` to allow Hyper to back channel connection info #[derive(Clone)] pub(crate) struct CaptureConnectionExtension { tx: Arc>>, } impl CaptureConnectionExtension { pub(crate) fn set(&self, connected: &Connected) { self.tx.send_replace(Some(connected.clone())); } } impl CaptureConnection { /// Internal API to create the tx and rx half of [`CaptureConnection`] pub(crate) fn new() -> (CaptureConnectionExtension, Self) { let (tx, rx) = watch::channel(None); ( CaptureConnectionExtension { tx: Arc::new(tx) }, CaptureConnection { rx }, ) } /// Retrieve the connection metadata, if available pub fn connection_metadata(&self) -> impl Deref> + '_ { self.rx.borrow() } /// Wait for the connection to be established /// /// If a connection was established, this will always return `Some(...)`. If the request never /// successfully connected (e.g. DNS resolution failure), this method will never return. pub async fn wait_for_connection_metadata( &mut self, ) -> impl Deref> + '_ { if self.rx.borrow().is_some() { return self.rx.borrow(); } let _ = self.rx.changed().await; self.rx.borrow() } } #[cfg(test)] mod test { use super::*; #[test] fn test_sync_capture_connection() { let (tx, rx) = CaptureConnection::new(); assert!( rx.connection_metadata().is_none(), "connection has not been set" ); tx.set(&Connected::new().proxy(true)); assert!(rx .connection_metadata() .as_ref() .expect("connected should be set") .is_proxied()); // ensure it can be called multiple times assert!(rx .connection_metadata() .as_ref() .expect("connected should be set") .is_proxied()); } #[tokio::test] async fn async_capture_connection() { let (tx, mut rx) = CaptureConnection::new(); assert!( rx.connection_metadata().is_none(), "connection has not been set" ); let test_task = tokio::spawn(async move { assert!(rx .wait_for_connection_metadata() .await .as_ref() .expect("connection should be set") .is_proxied()); // can be awaited multiple times assert!( rx.wait_for_connection_metadata().await.is_some(), "should be awaitable multiple times" ); assert!(rx.connection_metadata().is_some()); }); // can't be finished, we haven't set the connection yet assert!(!test_task.is_finished()); tx.set(&Connected::new().proxy(true)); assert!(test_task.await.is_ok()); } #[tokio::test] async fn capture_connection_sender_side_dropped() { let (tx, mut rx) = CaptureConnection::new(); assert!( rx.connection_metadata().is_none(), "connection has not been set" ); drop(tx); assert!(rx.wait_for_connection_metadata().await.is_none()); } } hyper-util-0.1.19/src/client/legacy/connect/dns.rs000064400000000000000000000237541046102023000201250ustar 00000000000000//! DNS Resolution used by the `HttpConnector`. //! //! This module contains: //! //! - A [`GaiResolver`] that is the default resolver for the `HttpConnector`. //! - The `Name` type used as an argument to custom resolvers. //! //! # Resolvers are `Service`s //! //! A resolver is just a //! `Service>`. //! //! A simple resolver that ignores the name and always returns a specific //! address: //! //! ```rust,ignore //! use std::{convert::Infallible, iter, net::SocketAddr}; //! //! let resolver = tower::service_fn(|_name| async { //! Ok::<_, Infallible>(iter::once(SocketAddr::from(([127, 0, 0, 1], 8080)))) //! }); //! ``` use std::error::Error; use std::future::Future; use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6, ToSocketAddrs}; use std::pin::Pin; use std::str::FromStr; use std::task::{self, Poll}; use std::{fmt, io, vec}; use tokio::task::JoinHandle; use tower_service::Service; pub(super) use self::sealed::Resolve; /// A domain name to resolve into IP addresses. #[derive(Clone, Hash, Eq, PartialEq)] pub struct Name { host: Box, } /// A resolver using blocking `getaddrinfo` calls in a threadpool. #[derive(Clone)] pub struct GaiResolver { _priv: (), } /// An iterator of IP addresses returned from `getaddrinfo`. pub struct GaiAddrs { inner: SocketAddrs, } /// A future to resolve a name returned by `GaiResolver`. pub struct GaiFuture { inner: JoinHandle>, } impl Name { pub(super) fn new(host: Box) -> Name { Name { host } } /// View the hostname as a string slice. pub fn as_str(&self) -> &str { &self.host } } impl fmt::Debug for Name { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(&self.host, f) } } impl fmt::Display for Name { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Display::fmt(&self.host, f) } } impl FromStr for Name { type Err = InvalidNameError; fn from_str(host: &str) -> Result { // Possibly add validation later Ok(Name::new(host.into())) } } /// Error indicating a given string was not a valid domain name. #[derive(Debug)] pub struct InvalidNameError(()); impl fmt::Display for InvalidNameError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str("Not a valid domain name") } } impl Error for InvalidNameError {} impl GaiResolver { /// Construct a new `GaiResolver`. pub fn new() -> Self { GaiResolver { _priv: () } } } impl Service for GaiResolver { type Response = GaiAddrs; type Error = io::Error; type Future = GaiFuture; fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> Poll> { Poll::Ready(Ok(())) } fn call(&mut self, name: Name) -> Self::Future { let blocking = tokio::task::spawn_blocking(move || { (&*name.host, 0) .to_socket_addrs() .map(|i| SocketAddrs { iter: i }) }); GaiFuture { inner: blocking } } } impl fmt::Debug for GaiResolver { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.pad("GaiResolver") } } impl Future for GaiFuture { type Output = Result; fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { Pin::new(&mut self.inner).poll(cx).map(|res| match res { Ok(Ok(addrs)) => Ok(GaiAddrs { inner: addrs }), Ok(Err(err)) => Err(err), Err(join_err) => { if join_err.is_cancelled() { Err(io::Error::new(io::ErrorKind::Interrupted, join_err)) } else { panic!("gai background task failed: {join_err:?}") } } }) } } impl fmt::Debug for GaiFuture { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.pad("GaiFuture") } } impl Drop for GaiFuture { fn drop(&mut self) { self.inner.abort(); } } impl Iterator for GaiAddrs { type Item = SocketAddr; fn next(&mut self) -> Option { self.inner.next() } } impl fmt::Debug for GaiAddrs { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.pad("GaiAddrs") } } pub(super) struct SocketAddrs { iter: vec::IntoIter, } impl SocketAddrs { pub(super) fn new(addrs: Vec) -> Self { SocketAddrs { iter: addrs.into_iter(), } } pub(super) fn try_parse(host: &str, port: u16) -> Option { if let Ok(addr) = host.parse::() { let addr = SocketAddrV4::new(addr, port); return Some(SocketAddrs { iter: vec![SocketAddr::V4(addr)].into_iter(), }); } if let Ok(addr) = host.parse::() { let addr = SocketAddrV6::new(addr, port, 0, 0); return Some(SocketAddrs { iter: vec![SocketAddr::V6(addr)].into_iter(), }); } None } #[inline] fn filter(self, predicate: impl FnMut(&SocketAddr) -> bool) -> SocketAddrs { SocketAddrs::new(self.iter.filter(predicate).collect()) } pub(super) fn split_by_preference( self, local_addr_ipv4: Option, local_addr_ipv6: Option, ) -> (SocketAddrs, SocketAddrs) { match (local_addr_ipv4, local_addr_ipv6) { (Some(_), None) => (self.filter(SocketAddr::is_ipv4), SocketAddrs::new(vec![])), (None, Some(_)) => (self.filter(SocketAddr::is_ipv6), SocketAddrs::new(vec![])), _ => { let preferring_v6 = self .iter .as_slice() .first() .map(SocketAddr::is_ipv6) .unwrap_or(false); let (preferred, fallback) = self .iter .partition::, _>(|addr| addr.is_ipv6() == preferring_v6); (SocketAddrs::new(preferred), SocketAddrs::new(fallback)) } } } pub(super) fn is_empty(&self) -> bool { self.iter.as_slice().is_empty() } pub(super) fn len(&self) -> usize { self.iter.as_slice().len() } } impl Iterator for SocketAddrs { type Item = SocketAddr; #[inline] fn next(&mut self) -> Option { self.iter.next() } } mod sealed { use std::future::Future; use std::task::{self, Poll}; use super::{Name, SocketAddr}; use tower_service::Service; // "Trait alias" for `Service` pub trait Resolve { type Addrs: Iterator; type Error: Into>; type Future: Future>; fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll>; fn resolve(&mut self, name: Name) -> Self::Future; } impl Resolve for S where S: Service, S::Response: Iterator, S::Error: Into>, { type Addrs = S::Response; type Error = S::Error; type Future = S::Future; fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { Service::poll_ready(self, cx) } fn resolve(&mut self, name: Name) -> Self::Future { Service::call(self, name) } } } pub(super) async fn resolve(resolver: &mut R, name: Name) -> Result where R: Resolve, { crate::common::future::poll_fn(|cx| resolver.poll_ready(cx)).await?; resolver.resolve(name).await } #[cfg(test)] mod tests { use super::*; use std::net::{Ipv4Addr, Ipv6Addr}; #[test] fn test_ip_addrs_split_by_preference() { let ip_v4 = Ipv4Addr::new(127, 0, 0, 1); let ip_v6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); let v4_addr = (ip_v4, 80).into(); let v6_addr = (ip_v6, 80).into(); let (mut preferred, mut fallback) = SocketAddrs { iter: vec![v4_addr, v6_addr].into_iter(), } .split_by_preference(None, None); assert!(preferred.next().unwrap().is_ipv4()); assert!(fallback.next().unwrap().is_ipv6()); let (mut preferred, mut fallback) = SocketAddrs { iter: vec![v6_addr, v4_addr].into_iter(), } .split_by_preference(None, None); assert!(preferred.next().unwrap().is_ipv6()); assert!(fallback.next().unwrap().is_ipv4()); let (mut preferred, mut fallback) = SocketAddrs { iter: vec![v4_addr, v6_addr].into_iter(), } .split_by_preference(Some(ip_v4), Some(ip_v6)); assert!(preferred.next().unwrap().is_ipv4()); assert!(fallback.next().unwrap().is_ipv6()); let (mut preferred, mut fallback) = SocketAddrs { iter: vec![v6_addr, v4_addr].into_iter(), } .split_by_preference(Some(ip_v4), Some(ip_v6)); assert!(preferred.next().unwrap().is_ipv6()); assert!(fallback.next().unwrap().is_ipv4()); let (mut preferred, fallback) = SocketAddrs { iter: vec![v4_addr, v6_addr].into_iter(), } .split_by_preference(Some(ip_v4), None); assert!(preferred.next().unwrap().is_ipv4()); assert!(fallback.is_empty()); let (mut preferred, fallback) = SocketAddrs { iter: vec![v4_addr, v6_addr].into_iter(), } .split_by_preference(None, Some(ip_v6)); assert!(preferred.next().unwrap().is_ipv6()); assert!(fallback.is_empty()); } #[test] fn test_name_from_str() { const DOMAIN: &str = "test.example.com"; let name = Name::from_str(DOMAIN).expect("Should be a valid domain"); assert_eq!(name.as_str(), DOMAIN); assert_eq!(name.to_string(), DOMAIN); } } hyper-util-0.1.19/src/client/legacy/connect/http.rs000064400000000000000000001360201046102023000203070ustar 00000000000000use std::error::Error as StdError; use std::fmt; use std::future::Future; use std::io; use std::marker::PhantomData; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::pin::Pin; use std::sync::Arc; use std::task::{self, Poll}; use std::time::Duration; use futures_core::ready; use futures_util::future::Either; use http::uri::{Scheme, Uri}; use pin_project_lite::pin_project; use socket2::TcpKeepalive; use tokio::net::{TcpSocket, TcpStream}; use tokio::time::Sleep; use tracing::{debug, trace, warn}; use super::dns::{self, resolve, GaiResolver, Resolve}; use super::{Connected, Connection}; use crate::rt::TokioIo; /// A connector for the `http` scheme. /// /// Performs DNS resolution in a thread pool, and then connects over TCP. /// /// # Note /// /// Sets the [`HttpInfo`](HttpInfo) value on responses, which includes /// transport information such as the remote socket address used. #[derive(Clone)] pub struct HttpConnector { config: Arc, resolver: R, } /// Extra information about the transport when an HttpConnector is used. /// /// # Example /// /// ``` /// # fn doc(res: http::Response<()>) { /// use hyper_util::client::legacy::connect::HttpInfo; /// /// // res = http::Response /// res /// .extensions() /// .get::() /// .map(|info| { /// println!("remote addr = {}", info.remote_addr()); /// }); /// # } /// ``` /// /// # Note /// /// If a different connector is used besides [`HttpConnector`](HttpConnector), /// this value will not exist in the extensions. Consult that specific /// connector to see what "extra" information it might provide to responses. #[derive(Clone, Debug)] pub struct HttpInfo { remote_addr: SocketAddr, local_addr: SocketAddr, } #[derive(Clone)] struct Config { connect_timeout: Option, enforce_http: bool, happy_eyeballs_timeout: Option, tcp_keepalive_config: TcpKeepaliveConfig, local_address_ipv4: Option, local_address_ipv6: Option, nodelay: bool, reuse_address: bool, send_buffer_size: Option, recv_buffer_size: Option, #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] interface: Option, #[cfg(any( target_os = "illumos", target_os = "ios", target_os = "macos", target_os = "solaris", target_os = "tvos", target_os = "visionos", target_os = "watchos", ))] interface: Option, #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] tcp_user_timeout: Option, } #[derive(Default, Debug, Clone, Copy)] struct TcpKeepaliveConfig { time: Option, interval: Option, retries: Option, } impl TcpKeepaliveConfig { /// Converts into a `socket2::TcpKeealive` if there is any keep alive configuration. fn into_tcpkeepalive(self) -> Option { let mut dirty = false; let mut ka = TcpKeepalive::new(); if let Some(time) = self.time { ka = ka.with_time(time); dirty = true } if let Some(interval) = self.interval { ka = Self::ka_with_interval(ka, interval, &mut dirty) }; if let Some(retries) = self.retries { ka = Self::ka_with_retries(ka, retries, &mut dirty) }; if dirty { Some(ka) } else { None } } #[cfg( // See https://docs.rs/socket2/0.5.8/src/socket2/lib.rs.html#511-525 any( target_os = "android", target_os = "dragonfly", target_os = "freebsd", target_os = "fuchsia", target_os = "illumos", target_os = "ios", target_os = "visionos", target_os = "linux", target_os = "macos", target_os = "netbsd", target_os = "tvos", target_os = "watchos", target_os = "windows", ) )] fn ka_with_interval(ka: TcpKeepalive, interval: Duration, dirty: &mut bool) -> TcpKeepalive { *dirty = true; ka.with_interval(interval) } #[cfg(not( // See https://docs.rs/socket2/0.5.8/src/socket2/lib.rs.html#511-525 any( target_os = "android", target_os = "dragonfly", target_os = "freebsd", target_os = "fuchsia", target_os = "illumos", target_os = "ios", target_os = "visionos", target_os = "linux", target_os = "macos", target_os = "netbsd", target_os = "tvos", target_os = "watchos", target_os = "windows", ) ))] fn ka_with_interval(ka: TcpKeepalive, _: Duration, _: &mut bool) -> TcpKeepalive { ka // no-op as keepalive interval is not supported on this platform } #[cfg( // See https://docs.rs/socket2/0.5.8/src/socket2/lib.rs.html#557-570 any( target_os = "android", target_os = "dragonfly", target_os = "freebsd", target_os = "fuchsia", target_os = "illumos", target_os = "ios", target_os = "visionos", target_os = "linux", target_os = "macos", target_os = "netbsd", target_os = "tvos", target_os = "watchos", ) )] fn ka_with_retries(ka: TcpKeepalive, retries: u32, dirty: &mut bool) -> TcpKeepalive { *dirty = true; ka.with_retries(retries) } #[cfg(not( // See https://docs.rs/socket2/0.5.8/src/socket2/lib.rs.html#557-570 any( target_os = "android", target_os = "dragonfly", target_os = "freebsd", target_os = "fuchsia", target_os = "illumos", target_os = "ios", target_os = "visionos", target_os = "linux", target_os = "macos", target_os = "netbsd", target_os = "tvos", target_os = "watchos", ) ))] fn ka_with_retries(ka: TcpKeepalive, _: u32, _: &mut bool) -> TcpKeepalive { ka // no-op as keepalive retries is not supported on this platform } } // ===== impl HttpConnector ===== impl HttpConnector { /// Construct a new HttpConnector. pub fn new() -> HttpConnector { HttpConnector::new_with_resolver(GaiResolver::new()) } } impl HttpConnector { /// Construct a new HttpConnector. /// /// Takes a [`Resolver`](crate::client::legacy::connect::dns#resolvers-are-services) to handle DNS lookups. pub fn new_with_resolver(resolver: R) -> HttpConnector { HttpConnector { config: Arc::new(Config { connect_timeout: None, enforce_http: true, happy_eyeballs_timeout: Some(Duration::from_millis(300)), tcp_keepalive_config: TcpKeepaliveConfig::default(), local_address_ipv4: None, local_address_ipv6: None, nodelay: false, reuse_address: false, send_buffer_size: None, recv_buffer_size: None, #[cfg(any( target_os = "android", target_os = "fuchsia", target_os = "illumos", target_os = "ios", target_os = "linux", target_os = "macos", target_os = "solaris", target_os = "tvos", target_os = "visionos", target_os = "watchos", ))] interface: None, #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] tcp_user_timeout: None, }), resolver, } } /// Option to enforce all `Uri`s have the `http` scheme. /// /// Enabled by default. #[inline] pub fn enforce_http(&mut self, is_enforced: bool) { self.config_mut().enforce_http = is_enforced; } /// Set that all sockets have `SO_KEEPALIVE` set with the supplied duration /// to remain idle before sending TCP keepalive probes. /// /// If `None`, keepalive is disabled. /// /// Default is `None`. #[inline] pub fn set_keepalive(&mut self, time: Option) { self.config_mut().tcp_keepalive_config.time = time; } /// Set the duration between two successive TCP keepalive retransmissions, /// if acknowledgement to the previous keepalive transmission is not received. #[inline] pub fn set_keepalive_interval(&mut self, interval: Option) { self.config_mut().tcp_keepalive_config.interval = interval; } /// Set the number of retransmissions to be carried out before declaring that remote end is not available. #[inline] pub fn set_keepalive_retries(&mut self, retries: Option) { self.config_mut().tcp_keepalive_config.retries = retries; } /// Set that all sockets have `SO_NODELAY` set to the supplied value `nodelay`. /// /// Default is `false`. #[inline] pub fn set_nodelay(&mut self, nodelay: bool) { self.config_mut().nodelay = nodelay; } /// Sets the value of the SO_SNDBUF option on the socket. #[inline] pub fn set_send_buffer_size(&mut self, size: Option) { self.config_mut().send_buffer_size = size; } /// Sets the value of the SO_RCVBUF option on the socket. #[inline] pub fn set_recv_buffer_size(&mut self, size: Option) { self.config_mut().recv_buffer_size = size; } /// Set that all sockets are bound to the configured address before connection. /// /// If `None`, the sockets will not be bound. /// /// Default is `None`. #[inline] pub fn set_local_address(&mut self, addr: Option) { let (v4, v6) = match addr { Some(IpAddr::V4(a)) => (Some(a), None), Some(IpAddr::V6(a)) => (None, Some(a)), _ => (None, None), }; let cfg = self.config_mut(); cfg.local_address_ipv4 = v4; cfg.local_address_ipv6 = v6; } /// Set that all sockets are bound to the configured IPv4 or IPv6 address (depending on host's /// preferences) before connection. #[inline] pub fn set_local_addresses(&mut self, addr_ipv4: Ipv4Addr, addr_ipv6: Ipv6Addr) { let cfg = self.config_mut(); cfg.local_address_ipv4 = Some(addr_ipv4); cfg.local_address_ipv6 = Some(addr_ipv6); } /// Set the connect timeout. /// /// If a domain resolves to multiple IP addresses, the timeout will be /// evenly divided across them. /// /// Default is `None`. #[inline] pub fn set_connect_timeout(&mut self, dur: Option) { self.config_mut().connect_timeout = dur; } /// Set timeout for [RFC 6555 (Happy Eyeballs)][RFC 6555] algorithm. /// /// If hostname resolves to both IPv4 and IPv6 addresses and connection /// cannot be established using preferred address family before timeout /// elapses, then connector will in parallel attempt connection using other /// address family. /// /// If `None`, parallel connection attempts are disabled. /// /// Default is 300 milliseconds. /// /// [RFC 6555]: https://tools.ietf.org/html/rfc6555 #[inline] pub fn set_happy_eyeballs_timeout(&mut self, dur: Option) { self.config_mut().happy_eyeballs_timeout = dur; } /// Set that all socket have `SO_REUSEADDR` set to the supplied value `reuse_address`. /// /// Default is `false`. #[inline] pub fn set_reuse_address(&mut self, reuse_address: bool) -> &mut Self { self.config_mut().reuse_address = reuse_address; self } /// Sets the name of the interface to bind sockets produced by this /// connector. /// /// On Linux, this sets the `SO_BINDTODEVICE` option on this socket (see /// [`man 7 socket`] for details). On macOS (and macOS-derived systems like /// iOS), illumos, and Solaris, this will instead use the `IP_BOUND_IF` /// socket option (see [`man 7p ip`]). /// /// If a socket is bound to an interface, only packets received from that particular /// interface are processed by the socket. Note that this only works for some socket /// types, particularly `AF_INET`` sockets. /// /// On Linux it can be used to specify a [VRF], but the binary needs /// to either have `CAP_NET_RAW` or to be run as root. /// /// This function is only available on the following operating systems: /// - Linux, including Android /// - Fuchsia /// - illumos and Solaris /// - macOS, iOS, visionOS, watchOS, and tvOS /// /// [VRF]: https://www.kernel.org/doc/Documentation/networking/vrf.txt /// [`man 7 socket`] https://man7.org/linux/man-pages/man7/socket.7.html /// [`man 7p ip`]: https://docs.oracle.com/cd/E86824_01/html/E54777/ip-7p.html #[cfg(any( target_os = "android", target_os = "fuchsia", target_os = "illumos", target_os = "ios", target_os = "linux", target_os = "macos", target_os = "solaris", target_os = "tvos", target_os = "visionos", target_os = "watchos", ))] #[inline] pub fn set_interface>(&mut self, interface: S) -> &mut Self { let interface = interface.into(); #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] { self.config_mut().interface = Some(interface); } #[cfg(not(any(target_os = "android", target_os = "fuchsia", target_os = "linux")))] { let interface = std::ffi::CString::new(interface) .expect("interface name should not have nulls in it"); self.config_mut().interface = Some(interface); } self } /// Sets the value of the TCP_USER_TIMEOUT option on the socket. #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] #[inline] pub fn set_tcp_user_timeout(&mut self, time: Option) { self.config_mut().tcp_user_timeout = time; } // private fn config_mut(&mut self) -> &mut Config { // If the are HttpConnector clones, this will clone the inner // config. So mutating the config won't ever affect previous // clones. Arc::make_mut(&mut self.config) } } static INVALID_NOT_HTTP: &str = "invalid URL, scheme is not http"; static INVALID_MISSING_SCHEME: &str = "invalid URL, scheme is missing"; static INVALID_MISSING_HOST: &str = "invalid URL, host is missing"; // R: Debug required for now to allow adding it to debug output later... impl fmt::Debug for HttpConnector { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("HttpConnector").finish() } } impl tower_service::Service for HttpConnector where R: Resolve + Clone + Send + Sync + 'static, R::Future: Send, { type Response = TokioIo; type Error = ConnectError; type Future = HttpConnecting; fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { ready!(self.resolver.poll_ready(cx)).map_err(ConnectError::dns)?; Poll::Ready(Ok(())) } fn call(&mut self, dst: Uri) -> Self::Future { let mut self_ = self.clone(); HttpConnecting { fut: Box::pin(async move { self_.call_async(dst).await }), _marker: PhantomData, } } } fn get_host_port<'u>(config: &Config, dst: &'u Uri) -> Result<(&'u str, u16), ConnectError> { trace!( "Http::connect; scheme={:?}, host={:?}, port={:?}", dst.scheme(), dst.host(), dst.port(), ); if config.enforce_http { if dst.scheme() != Some(&Scheme::HTTP) { return Err(ConnectError { msg: INVALID_NOT_HTTP, addr: None, cause: None, }); } } else if dst.scheme().is_none() { return Err(ConnectError { msg: INVALID_MISSING_SCHEME, addr: None, cause: None, }); } let host = match dst.host() { Some(s) => s, None => { return Err(ConnectError { msg: INVALID_MISSING_HOST, addr: None, cause: None, }); } }; let port = match dst.port() { Some(port) => port.as_u16(), None => { if dst.scheme() == Some(&Scheme::HTTPS) { 443 } else { 80 } } }; Ok((host, port)) } impl HttpConnector where R: Resolve, { async fn call_async(&mut self, dst: Uri) -> Result, ConnectError> { let config = &self.config; let (host, port) = get_host_port(config, &dst)?; let host = host.trim_start_matches('[').trim_end_matches(']'); // If the host is already an IP addr (v4 or v6), // skip resolving the dns and start connecting right away. let addrs = if let Some(addrs) = dns::SocketAddrs::try_parse(host, port) { addrs } else { let addrs = resolve(&mut self.resolver, dns::Name::new(host.into())) .await .map_err(ConnectError::dns)?; let addrs = addrs .map(|mut addr| { set_port(&mut addr, port, dst.port().is_some()); addr }) .collect(); dns::SocketAddrs::new(addrs) }; let c = ConnectingTcp::new(addrs, config); let sock = c.connect().await?; if let Err(e) = sock.set_nodelay(config.nodelay) { warn!("tcp set_nodelay error: {}", e); } Ok(TokioIo::new(sock)) } } impl Connection for TcpStream { fn connected(&self) -> Connected { let connected = Connected::new(); if let (Ok(remote_addr), Ok(local_addr)) = (self.peer_addr(), self.local_addr()) { connected.extra(HttpInfo { remote_addr, local_addr, }) } else { connected } } } #[cfg(unix)] impl Connection for tokio::net::UnixStream { fn connected(&self) -> Connected { Connected::new() } } #[cfg(windows)] impl Connection for tokio::net::windows::named_pipe::NamedPipeClient { fn connected(&self) -> Connected { Connected::new() } } // Implement `Connection` for generic `TokioIo` so that external crates can // implement their own `HttpConnector` with `TokioIo`. impl Connection for TokioIo where T: Connection, { fn connected(&self) -> Connected { self.inner().connected() } } impl HttpInfo { /// Get the remote address of the transport used. pub fn remote_addr(&self) -> SocketAddr { self.remote_addr } /// Get the local address of the transport used. pub fn local_addr(&self) -> SocketAddr { self.local_addr } } pin_project! { // Not publicly exported (so missing_docs doesn't trigger). // // We return this `Future` instead of the `Pin>` directly // so that users don't rely on it fitting in a `Pin>` slot // (and thus we can change the type in the future). #[must_use = "futures do nothing unless polled"] #[allow(missing_debug_implementations)] pub struct HttpConnecting { #[pin] fut: BoxConnecting, _marker: PhantomData, } } type ConnectResult = Result, ConnectError>; type BoxConnecting = Pin + Send>>; impl Future for HttpConnecting { type Output = ConnectResult; fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { self.project().fut.poll(cx) } } // Not publicly exported (so missing_docs doesn't trigger). pub struct ConnectError { msg: &'static str, addr: Option, cause: Option>, } impl ConnectError { fn new(msg: &'static str, cause: E) -> ConnectError where E: Into>, { ConnectError { msg, addr: None, cause: Some(cause.into()), } } fn dns(cause: E) -> ConnectError where E: Into>, { ConnectError::new("dns error", cause) } fn m(msg: &'static str) -> impl FnOnce(E) -> ConnectError where E: Into>, { move |cause| ConnectError::new(msg, cause) } } impl fmt::Debug for ConnectError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut b = f.debug_tuple("ConnectError"); b.field(&self.msg); if let Some(ref addr) = self.addr { b.field(addr); } if let Some(ref cause) = self.cause { b.field(cause); } b.finish() } } impl fmt::Display for ConnectError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(self.msg) } } impl StdError for ConnectError { fn source(&self) -> Option<&(dyn StdError + 'static)> { self.cause.as_ref().map(|e| &**e as _) } } struct ConnectingTcp<'a> { preferred: ConnectingTcpRemote, fallback: Option, config: &'a Config, } impl<'a> ConnectingTcp<'a> { fn new(remote_addrs: dns::SocketAddrs, config: &'a Config) -> Self { if let Some(fallback_timeout) = config.happy_eyeballs_timeout { let (preferred_addrs, fallback_addrs) = remote_addrs .split_by_preference(config.local_address_ipv4, config.local_address_ipv6); if fallback_addrs.is_empty() { return ConnectingTcp { preferred: ConnectingTcpRemote::new(preferred_addrs, config.connect_timeout), fallback: None, config, }; } ConnectingTcp { preferred: ConnectingTcpRemote::new(preferred_addrs, config.connect_timeout), fallback: Some(ConnectingTcpFallback { delay: tokio::time::sleep(fallback_timeout), remote: ConnectingTcpRemote::new(fallback_addrs, config.connect_timeout), }), config, } } else { ConnectingTcp { preferred: ConnectingTcpRemote::new(remote_addrs, config.connect_timeout), fallback: None, config, } } } } struct ConnectingTcpFallback { delay: Sleep, remote: ConnectingTcpRemote, } struct ConnectingTcpRemote { addrs: dns::SocketAddrs, connect_timeout: Option, } impl ConnectingTcpRemote { fn new(addrs: dns::SocketAddrs, connect_timeout: Option) -> Self { let connect_timeout = connect_timeout.and_then(|t| t.checked_div(addrs.len() as u32)); Self { addrs, connect_timeout, } } } impl ConnectingTcpRemote { async fn connect(&mut self, config: &Config) -> Result { let mut err = None; for addr in &mut self.addrs { debug!("connecting to {}", addr); match connect(&addr, config, self.connect_timeout)?.await { Ok(tcp) => { debug!("connected to {}", addr); return Ok(tcp); } Err(mut e) => { trace!("connect error for {}: {:?}", addr, e); e.addr = Some(addr); // only return the first error, we assume it's the most relevant if err.is_none() { err = Some(e); } } } } match err { Some(e) => Err(e), None => Err(ConnectError::new( "tcp connect error", std::io::Error::new(std::io::ErrorKind::NotConnected, "Network unreachable"), )), } } } fn bind_local_address( socket: &socket2::Socket, dst_addr: &SocketAddr, local_addr_ipv4: &Option, local_addr_ipv6: &Option, ) -> io::Result<()> { match (*dst_addr, local_addr_ipv4, local_addr_ipv6) { (SocketAddr::V4(_), Some(addr), _) => { socket.bind(&SocketAddr::new((*addr).into(), 0).into())?; } (SocketAddr::V6(_), _, Some(addr)) => { socket.bind(&SocketAddr::new((*addr).into(), 0).into())?; } _ => { if cfg!(windows) { // Windows requires a socket be bound before calling connect let any: SocketAddr = match *dst_addr { SocketAddr::V4(_) => ([0, 0, 0, 0], 0).into(), SocketAddr::V6(_) => ([0, 0, 0, 0, 0, 0, 0, 0], 0).into(), }; socket.bind(&any.into())?; } } } Ok(()) } fn connect( addr: &SocketAddr, config: &Config, connect_timeout: Option, ) -> Result>, ConnectError> { // TODO(eliza): if Tokio's `TcpSocket` gains support for setting the // keepalive timeout, it would be nice to use that instead of socket2, // and avoid the unsafe `into_raw_fd`/`from_raw_fd` dance... use socket2::{Domain, Protocol, Socket, Type}; let domain = Domain::for_address(*addr); let socket = Socket::new(domain, Type::STREAM, Some(Protocol::TCP)) .map_err(ConnectError::m("tcp open error"))?; // When constructing a Tokio `TcpSocket` from a raw fd/socket, the user is // responsible for ensuring O_NONBLOCK is set. socket .set_nonblocking(true) .map_err(ConnectError::m("tcp set_nonblocking error"))?; if let Some(tcp_keepalive) = &config.tcp_keepalive_config.into_tcpkeepalive() { if let Err(e) = socket.set_tcp_keepalive(tcp_keepalive) { warn!("tcp set_keepalive error: {}", e); } } // That this only works for some socket types, particularly AF_INET sockets. #[cfg(any( target_os = "android", target_os = "fuchsia", target_os = "illumos", target_os = "ios", target_os = "linux", target_os = "macos", target_os = "solaris", target_os = "tvos", target_os = "visionos", target_os = "watchos", ))] if let Some(interface) = &config.interface { // On Linux-like systems, set the interface to bind using // `SO_BINDTODEVICE`. #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] socket .bind_device(Some(interface.as_bytes())) .map_err(ConnectError::m("tcp bind interface error"))?; // On macOS-like and Solaris-like systems, we instead use `IP_BOUND_IF`. // This socket option desires an integer index for the interface, so we // must first determine the index of the requested interface name using // `if_nametoindex`. #[cfg(any( target_os = "illumos", target_os = "ios", target_os = "macos", target_os = "solaris", target_os = "tvos", target_os = "visionos", target_os = "watchos", ))] { let idx = unsafe { libc::if_nametoindex(interface.as_ptr()) }; let idx = std::num::NonZeroU32::new(idx).ok_or_else(|| { // If the index is 0, check errno and return an I/O error. ConnectError::new( "error converting interface name to index", io::Error::last_os_error(), ) })?; // Different setsockopt calls are necessary depending on whether the // address is IPv4 or IPv6. match addr { SocketAddr::V4(_) => socket.bind_device_by_index_v4(Some(idx)), SocketAddr::V6(_) => socket.bind_device_by_index_v6(Some(idx)), } .map_err(ConnectError::m("tcp bind interface error"))?; } } #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] if let Some(tcp_user_timeout) = &config.tcp_user_timeout { if let Err(e) = socket.set_tcp_user_timeout(Some(*tcp_user_timeout)) { warn!("tcp set_tcp_user_timeout error: {}", e); } } bind_local_address( &socket, addr, &config.local_address_ipv4, &config.local_address_ipv6, ) .map_err(ConnectError::m("tcp bind local error"))?; // Convert the `Socket` to a Tokio `TcpSocket`. let socket = TcpSocket::from_std_stream(socket.into()); if config.reuse_address { if let Err(e) = socket.set_reuseaddr(true) { warn!("tcp set_reuse_address error: {}", e); } } if let Some(size) = config.send_buffer_size { if let Err(e) = socket.set_send_buffer_size(size.try_into().unwrap_or(u32::MAX)) { warn!("tcp set_buffer_size error: {}", e); } } if let Some(size) = config.recv_buffer_size { if let Err(e) = socket.set_recv_buffer_size(size.try_into().unwrap_or(u32::MAX)) { warn!("tcp set_recv_buffer_size error: {}", e); } } let connect = socket.connect(*addr); Ok(async move { match connect_timeout { Some(dur) => match tokio::time::timeout(dur, connect).await { Ok(Ok(s)) => Ok(s), Ok(Err(e)) => Err(e), Err(e) => Err(io::Error::new(io::ErrorKind::TimedOut, e)), }, None => connect.await, } .map_err(ConnectError::m("tcp connect error")) }) } impl ConnectingTcp<'_> { async fn connect(mut self) -> Result { match self.fallback { None => self.preferred.connect(self.config).await, Some(mut fallback) => { let preferred_fut = self.preferred.connect(self.config); futures_util::pin_mut!(preferred_fut); let fallback_fut = fallback.remote.connect(self.config); futures_util::pin_mut!(fallback_fut); let fallback_delay = fallback.delay; futures_util::pin_mut!(fallback_delay); let (result, future) = match futures_util::future::select(preferred_fut, fallback_delay).await { Either::Left((result, _fallback_delay)) => { (result, Either::Right(fallback_fut)) } Either::Right(((), preferred_fut)) => { // Delay is done, start polling both the preferred and the fallback futures_util::future::select(preferred_fut, fallback_fut) .await .factor_first() } }; if result.is_err() { // Fallback to the remaining future (could be preferred or fallback) // if we get an error future.await } else { result } } } } } /// Respect explicit ports in the URI, if none, either /// keep non `0` ports resolved from a custom dns resolver, /// or use the default port for the scheme. fn set_port(addr: &mut SocketAddr, host_port: u16, explicit: bool) { if explicit || addr.port() == 0 { addr.set_port(host_port) }; } #[cfg(test)] mod tests { use std::io; use std::net::SocketAddr; use ::http::Uri; use crate::client::legacy::connect::http::TcpKeepaliveConfig; use super::super::sealed::{Connect, ConnectSvc}; use super::{Config, ConnectError, HttpConnector}; use super::set_port; async fn connect( connector: C, dst: Uri, ) -> Result<::Connection, ::Error> where C: Connect, { connector.connect(super::super::sealed::Internal, dst).await } #[tokio::test] async fn test_errors_enforce_http() { let dst = "https://example.domain/foo/bar?baz".parse().unwrap(); let connector = HttpConnector::new(); let err = connect(connector, dst).await.unwrap_err(); assert_eq!(&*err.msg, super::INVALID_NOT_HTTP); } #[cfg(any(target_os = "linux", target_os = "macos"))] fn get_local_ips() -> (Option, Option) { use std::net::{IpAddr, TcpListener}; let mut ip_v4 = None; let mut ip_v6 = None; let ips = pnet_datalink::interfaces() .into_iter() .flat_map(|i| i.ips.into_iter().map(|n| n.ip())); for ip in ips { match ip { IpAddr::V4(ip) if TcpListener::bind((ip, 0)).is_ok() => ip_v4 = Some(ip), IpAddr::V6(ip) if TcpListener::bind((ip, 0)).is_ok() => ip_v6 = Some(ip), _ => (), } if ip_v4.is_some() && ip_v6.is_some() { break; } } (ip_v4, ip_v6) } #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] fn default_interface() -> Option { pnet_datalink::interfaces() .iter() .find(|e| e.is_up() && !e.is_loopback() && !e.ips.is_empty()) .map(|e| e.name.clone()) } #[tokio::test] async fn test_errors_missing_scheme() { let dst = "example.domain".parse().unwrap(); let mut connector = HttpConnector::new(); connector.enforce_http(false); let err = connect(connector, dst).await.unwrap_err(); assert_eq!(&*err.msg, super::INVALID_MISSING_SCHEME); } // NOTE: pnet crate that we use in this test doesn't compile on Windows #[cfg(any(target_os = "linux", target_os = "macos"))] #[cfg_attr(miri, ignore)] #[tokio::test] async fn local_address() { use std::net::{IpAddr, TcpListener}; let (bind_ip_v4, bind_ip_v6) = get_local_ips(); let server4 = TcpListener::bind("127.0.0.1:0").unwrap(); let port = server4.local_addr().unwrap().port(); let server6 = TcpListener::bind(format!("[::1]:{port}")).unwrap(); let assert_client_ip = |dst: String, server: TcpListener, expected_ip: IpAddr| async move { let mut connector = HttpConnector::new(); match (bind_ip_v4, bind_ip_v6) { (Some(v4), Some(v6)) => connector.set_local_addresses(v4, v6), (Some(v4), None) => connector.set_local_address(Some(v4.into())), (None, Some(v6)) => connector.set_local_address(Some(v6.into())), _ => unreachable!(), } connect(connector, dst.parse().unwrap()).await.unwrap(); let (_, client_addr) = server.accept().unwrap(); assert_eq!(client_addr.ip(), expected_ip); }; if let Some(ip) = bind_ip_v4 { assert_client_ip(format!("http://127.0.0.1:{port}"), server4, ip.into()).await; } if let Some(ip) = bind_ip_v6 { assert_client_ip(format!("http://[::1]:{port}"), server6, ip.into()).await; } } // NOTE: pnet crate that we use in this test doesn't compile on Windows #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] #[tokio::test] #[ignore = "setting `SO_BINDTODEVICE` requires the `CAP_NET_RAW` capability (works when running as root)"] async fn interface() { use socket2::{Domain, Protocol, Socket, Type}; use std::net::TcpListener; let interface: Option = default_interface(); let server4 = TcpListener::bind("127.0.0.1:0").unwrap(); let port = server4.local_addr().unwrap().port(); let server6 = TcpListener::bind(format!("[::1]:{port}")).unwrap(); let assert_interface_name = |dst: String, server: TcpListener, bind_iface: Option, expected_interface: Option| async move { let mut connector = HttpConnector::new(); if let Some(iface) = bind_iface { connector.set_interface(iface); } connect(connector, dst.parse().unwrap()).await.unwrap(); let domain = Domain::for_address(server.local_addr().unwrap()); let socket = Socket::new(domain, Type::STREAM, Some(Protocol::TCP)).unwrap(); assert_eq!( socket.device().unwrap().as_deref(), expected_interface.as_deref().map(|val| val.as_bytes()) ); }; assert_interface_name( format!("http://127.0.0.1:{port}"), server4, interface.clone(), interface.clone(), ) .await; assert_interface_name( format!("http://[::1]:{port}"), server6, interface.clone(), interface.clone(), ) .await; } #[test] #[ignore] // TODO #[cfg_attr(not(feature = "__internal_happy_eyeballs_tests"), ignore)] fn client_happy_eyeballs() { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, TcpListener}; use std::time::{Duration, Instant}; use super::dns; use super::ConnectingTcp; let server4 = TcpListener::bind("127.0.0.1:0").unwrap(); let addr = server4.local_addr().unwrap(); let _server6 = TcpListener::bind(format!("[::1]:{}", addr.port())).unwrap(); let rt = tokio::runtime::Builder::new_current_thread() .enable_all() .build() .unwrap(); let local_timeout = Duration::default(); let unreachable_v4_timeout = measure_connect(unreachable_ipv4_addr()).1; let unreachable_v6_timeout = measure_connect(unreachable_ipv6_addr()).1; let fallback_timeout = std::cmp::max(unreachable_v4_timeout, unreachable_v6_timeout) + Duration::from_millis(250); let scenarios = &[ // Fast primary, without fallback. (&[local_ipv4_addr()][..], 4, local_timeout, false), (&[local_ipv6_addr()][..], 6, local_timeout, false), // Fast primary, with (unused) fallback. ( &[local_ipv4_addr(), local_ipv6_addr()][..], 4, local_timeout, false, ), ( &[local_ipv6_addr(), local_ipv4_addr()][..], 6, local_timeout, false, ), // Unreachable + fast primary, without fallback. ( &[unreachable_ipv4_addr(), local_ipv4_addr()][..], 4, unreachable_v4_timeout, false, ), ( &[unreachable_ipv6_addr(), local_ipv6_addr()][..], 6, unreachable_v6_timeout, false, ), // Unreachable + fast primary, with (unused) fallback. ( &[ unreachable_ipv4_addr(), local_ipv4_addr(), local_ipv6_addr(), ][..], 4, unreachable_v4_timeout, false, ), ( &[ unreachable_ipv6_addr(), local_ipv6_addr(), local_ipv4_addr(), ][..], 6, unreachable_v6_timeout, true, ), // Slow primary, with (used) fallback. ( &[slow_ipv4_addr(), local_ipv4_addr(), local_ipv6_addr()][..], 6, fallback_timeout, false, ), ( &[slow_ipv6_addr(), local_ipv6_addr(), local_ipv4_addr()][..], 4, fallback_timeout, true, ), // Slow primary, with (used) unreachable + fast fallback. ( &[slow_ipv4_addr(), unreachable_ipv6_addr(), local_ipv6_addr()][..], 6, fallback_timeout + unreachable_v6_timeout, false, ), ( &[slow_ipv6_addr(), unreachable_ipv4_addr(), local_ipv4_addr()][..], 4, fallback_timeout + unreachable_v4_timeout, true, ), ]; // Scenarios for IPv6 -> IPv4 fallback require that host can access IPv6 network. // Otherwise, connection to "slow" IPv6 address will error-out immediately. let ipv6_accessible = measure_connect(slow_ipv6_addr()).0; for &(hosts, family, timeout, needs_ipv6_access) in scenarios { if needs_ipv6_access && !ipv6_accessible { continue; } let (start, stream) = rt .block_on(async move { let addrs = hosts .iter() .map(|host| (*host, addr.port()).into()) .collect(); let cfg = Config { local_address_ipv4: None, local_address_ipv6: None, connect_timeout: None, tcp_keepalive_config: TcpKeepaliveConfig::default(), happy_eyeballs_timeout: Some(fallback_timeout), nodelay: false, reuse_address: false, enforce_http: false, send_buffer_size: None, recv_buffer_size: None, #[cfg(any( target_os = "android", target_os = "fuchsia", target_os = "linux" ))] interface: None, #[cfg(any( target_os = "illumos", target_os = "ios", target_os = "macos", target_os = "solaris", target_os = "tvos", target_os = "visionos", target_os = "watchos", ))] interface: None, #[cfg(any( target_os = "android", target_os = "fuchsia", target_os = "linux" ))] tcp_user_timeout: None, }; let connecting_tcp = ConnectingTcp::new(dns::SocketAddrs::new(addrs), &cfg); let start = Instant::now(); Ok::<_, ConnectError>((start, ConnectingTcp::connect(connecting_tcp).await?)) }) .unwrap(); let res = if stream.peer_addr().unwrap().is_ipv4() { 4 } else { 6 }; let duration = start.elapsed(); // Allow actual duration to be +/- 150ms off. let min_duration = if timeout >= Duration::from_millis(150) { timeout - Duration::from_millis(150) } else { Duration::default() }; let max_duration = timeout + Duration::from_millis(150); assert_eq!(res, family); assert!(duration >= min_duration); assert!(duration <= max_duration); } fn local_ipv4_addr() -> IpAddr { Ipv4Addr::new(127, 0, 0, 1).into() } fn local_ipv6_addr() -> IpAddr { Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1).into() } fn unreachable_ipv4_addr() -> IpAddr { Ipv4Addr::new(127, 0, 0, 2).into() } fn unreachable_ipv6_addr() -> IpAddr { Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 2).into() } fn slow_ipv4_addr() -> IpAddr { // RFC 6890 reserved IPv4 address. Ipv4Addr::new(198, 18, 0, 25).into() } fn slow_ipv6_addr() -> IpAddr { // RFC 6890 reserved IPv6 address. Ipv6Addr::new(2001, 2, 0, 0, 0, 0, 0, 254).into() } fn measure_connect(addr: IpAddr) -> (bool, Duration) { let start = Instant::now(); let result = std::net::TcpStream::connect_timeout(&(addr, 80).into(), Duration::from_secs(1)); let reachable = result.is_ok() || result.unwrap_err().kind() == io::ErrorKind::TimedOut; let duration = start.elapsed(); (reachable, duration) } } use std::time::Duration; #[test] fn no_tcp_keepalive_config() { assert!(TcpKeepaliveConfig::default().into_tcpkeepalive().is_none()); } #[test] fn tcp_keepalive_time_config() { let kac = TcpKeepaliveConfig { time: Some(Duration::from_secs(60)), ..Default::default() }; if let Some(tcp_keepalive) = kac.into_tcpkeepalive() { assert!(format!("{tcp_keepalive:?}").contains("time: Some(60s)")); } else { panic!("test failed"); } } #[cfg(not(any(target_os = "openbsd", target_os = "redox", target_os = "solaris")))] #[test] fn tcp_keepalive_interval_config() { let kac = TcpKeepaliveConfig { interval: Some(Duration::from_secs(1)), ..Default::default() }; if let Some(tcp_keepalive) = kac.into_tcpkeepalive() { assert!(format!("{tcp_keepalive:?}").contains("interval: Some(1s)")); } else { panic!("test failed"); } } #[cfg(not(any( target_os = "openbsd", target_os = "redox", target_os = "solaris", target_os = "windows" )))] #[test] fn tcp_keepalive_retries_config() { let kac = TcpKeepaliveConfig { retries: Some(3), ..Default::default() }; if let Some(tcp_keepalive) = kac.into_tcpkeepalive() { assert!(format!("{tcp_keepalive:?}").contains("retries: Some(3)")); } else { panic!("test failed"); } } #[test] fn test_set_port() { // Respect explicit ports no matter what the resolved port is. let mut addr = SocketAddr::from(([0, 0, 0, 0], 6881)); set_port(&mut addr, 42, true); assert_eq!(addr.port(), 42); // Ignore default host port, and use the socket port instead. let mut addr = SocketAddr::from(([0, 0, 0, 0], 6881)); set_port(&mut addr, 443, false); assert_eq!(addr.port(), 6881); // Use the default port if the resolved port is `0`. let mut addr = SocketAddr::from(([0, 0, 0, 0], 0)); set_port(&mut addr, 443, false); assert_eq!(addr.port(), 443); } } hyper-util-0.1.19/src/client/legacy/connect/mod.rs000064400000000000000000000301771046102023000201150ustar 00000000000000//! Connectors used by the `Client`. //! //! This module contains: //! //! - A default [`HttpConnector`][] that does DNS resolution and establishes //! connections over TCP. //! - Types to build custom connectors. //! //! # Connectors //! //! A "connector" is a [`Service`][] that takes a [`Uri`][] destination, and //! its `Response` is some type implementing [`Read`][], [`Write`][], //! and [`Connection`][]. //! //! ## Custom Connectors //! //! A simple connector that ignores the `Uri` destination and always returns //! a TCP connection to the same address could be written like this: //! //! ```rust,ignore //! let connector = tower::service_fn(|_dst| async { //! tokio::net::TcpStream::connect("127.0.0.1:1337") //! }) //! ``` //! //! Or, fully written out: //! //! ``` //! use std::{future::Future, net::SocketAddr, pin::Pin, task::{self, Poll}}; //! use http::Uri; //! use tokio::net::TcpStream; //! use tower_service::Service; //! //! #[derive(Clone)] //! struct LocalConnector; //! //! impl Service for LocalConnector { //! type Response = TcpStream; //! type Error = std::io::Error; //! // We can't "name" an `async` generated future. //! type Future = Pin> + Send //! >>; //! //! fn poll_ready(&mut self, _: &mut task::Context<'_>) -> Poll> { //! // This connector is always ready, but others might not be. //! Poll::Ready(Ok(())) //! } //! //! fn call(&mut self, _: Uri) -> Self::Future { //! Box::pin(TcpStream::connect(SocketAddr::from(([127, 0, 0, 1], 1337)))) //! } //! } //! ``` //! //! It's worth noting that for `TcpStream`s, the [`HttpConnector`][] is a //! better starting place to extend from. //! //! [`HttpConnector`]: HttpConnector //! [`Service`]: tower_service::Service //! [`Uri`]: ::http::Uri //! [`Read`]: hyper::rt::Read //! [`Write`]: hyper::rt::Write //! [`Connection`]: Connection use std::{ fmt::{self, Formatter}, sync::{ atomic::{AtomicBool, Ordering}, Arc, }, }; use ::http::Extensions; #[cfg(feature = "tokio")] pub use self::http::{HttpConnector, HttpInfo}; #[cfg(feature = "tokio")] pub mod dns; #[cfg(feature = "tokio")] mod http; pub mod proxy; pub(crate) mod capture; pub use capture::{capture_connection, CaptureConnection}; pub use self::sealed::Connect; /// Describes a type returned by a connector. pub trait Connection { /// Return metadata describing the connection. fn connected(&self) -> Connected; } /// Extra information about the connected transport. /// /// This can be used to inform recipients about things like if ALPN /// was used, or if connected to an HTTP proxy. #[derive(Debug)] pub struct Connected { pub(super) alpn: Alpn, pub(super) is_proxied: bool, pub(super) extra: Option, pub(super) poisoned: PoisonPill, } #[derive(Clone)] pub(crate) struct PoisonPill { poisoned: Arc, } impl fmt::Debug for PoisonPill { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { // print the address of the pill—this makes debugging issues much easier write!( f, "PoisonPill@{:p} {{ poisoned: {} }}", self.poisoned, self.poisoned.load(Ordering::Relaxed) ) } } impl PoisonPill { pub(crate) fn healthy() -> Self { Self { poisoned: Arc::new(AtomicBool::new(false)), } } pub(crate) fn poison(&self) { self.poisoned.store(true, Ordering::Relaxed) } pub(crate) fn poisoned(&self) -> bool { self.poisoned.load(Ordering::Relaxed) } } pub(super) struct Extra(Box); #[derive(Clone, Copy, Debug, PartialEq)] pub(super) enum Alpn { H2, None, } impl Connected { /// Create new `Connected` type with empty metadata. pub fn new() -> Connected { Connected { alpn: Alpn::None, is_proxied: false, extra: None, poisoned: PoisonPill::healthy(), } } /// Set whether the connected transport is to an HTTP proxy. /// /// This setting will affect if HTTP/1 requests written on the transport /// will have the request-target in absolute-form or origin-form: /// /// - When `proxy(false)`: /// /// ```http /// GET /guide HTTP/1.1 /// ``` /// /// - When `proxy(true)`: /// /// ```http /// GET http://hyper.rs/guide HTTP/1.1 /// ``` /// /// Default is `false`. pub fn proxy(mut self, is_proxied: bool) -> Connected { self.is_proxied = is_proxied; self } /// Determines if the connected transport is to an HTTP proxy. pub fn is_proxied(&self) -> bool { self.is_proxied } /// Set extra connection information to be set in the extensions of every `Response`. pub fn extra(mut self, extra: T) -> Connected { if let Some(prev) = self.extra { self.extra = Some(Extra(Box::new(ExtraChain(prev.0, extra)))); } else { self.extra = Some(Extra(Box::new(ExtraEnvelope(extra)))); } self } /// Copies the extra connection information into an `Extensions` map. pub fn get_extras(&self, extensions: &mut Extensions) { if let Some(extra) = &self.extra { extra.set(extensions); } } /// Set that the connected transport negotiated HTTP/2 as its next protocol. pub fn negotiated_h2(mut self) -> Connected { self.alpn = Alpn::H2; self } /// Determines if the connected transport negotiated HTTP/2 as its next protocol. pub fn is_negotiated_h2(&self) -> bool { self.alpn == Alpn::H2 } /// Poison this connection /// /// A poisoned connection will not be reused for subsequent requests by the pool pub fn poison(&self) { self.poisoned.poison(); tracing::debug!( poison_pill = ?self.poisoned, "connection was poisoned. this connection will not be reused for subsequent requests" ); } // Don't public expose that `Connected` is `Clone`, unsure if we want to // keep that contract... pub(super) fn clone(&self) -> Connected { Connected { alpn: self.alpn, is_proxied: self.is_proxied, extra: self.extra.clone(), poisoned: self.poisoned.clone(), } } } // ===== impl Extra ===== impl Extra { pub(super) fn set(&self, res: &mut Extensions) { self.0.set(res); } } impl Clone for Extra { fn clone(&self) -> Extra { Extra(self.0.clone_box()) } } impl fmt::Debug for Extra { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Extra").finish() } } trait ExtraInner: Send + Sync { fn clone_box(&self) -> Box; fn set(&self, res: &mut Extensions); } // This indirection allows the `Connected` to have a type-erased "extra" value, // while that type still knows its inner extra type. This allows the correct // TypeId to be used when inserting into `res.extensions_mut()`. #[derive(Clone)] struct ExtraEnvelope(T); impl ExtraInner for ExtraEnvelope where T: Clone + Send + Sync + 'static, { fn clone_box(&self) -> Box { Box::new(self.clone()) } fn set(&self, res: &mut Extensions) { res.insert(self.0.clone()); } } struct ExtraChain(Box, T); impl Clone for ExtraChain { fn clone(&self) -> Self { ExtraChain(self.0.clone_box(), self.1.clone()) } } impl ExtraInner for ExtraChain where T: Clone + Send + Sync + 'static, { fn clone_box(&self) -> Box { Box::new(self.clone()) } fn set(&self, res: &mut Extensions) { self.0.set(res); res.insert(self.1.clone()); } } pub(super) mod sealed { use std::error::Error as StdError; use std::future::Future; use ::http::Uri; use hyper::rt::{Read, Write}; use super::Connection; /// Connect to a destination, returning an IO transport. /// /// A connector receives a [`Uri`](::http::Uri) and returns a `Future` of the /// ready connection. /// /// # Trait Alias /// /// This is really just an *alias* for the `tower::Service` trait, with /// additional bounds set for convenience *inside* hyper. You don't actually /// implement this trait, but `tower::Service` instead. // The `Sized` bound is to prevent creating `dyn Connect`, since they cannot // fit the `Connect` bounds because of the blanket impl for `Service`. pub trait Connect: Sealed + Sized { #[doc(hidden)] type _Svc: ConnectSvc; #[doc(hidden)] fn connect(self, internal_only: Internal, dst: Uri) -> ::Future; } pub trait ConnectSvc { type Connection: Read + Write + Connection + Unpin + Send + 'static; type Error: Into>; type Future: Future> + Unpin + Send + 'static; fn connect(self, internal_only: Internal, dst: Uri) -> Self::Future; } impl Connect for S where S: tower_service::Service + Send + 'static, S::Error: Into>, S::Future: Unpin + Send, T: Read + Write + Connection + Unpin + Send + 'static, { type _Svc = S; fn connect(self, _: Internal, dst: Uri) -> crate::service::Oneshot { crate::service::Oneshot::new(self, dst) } } impl ConnectSvc for S where S: tower_service::Service + Send + 'static, S::Error: Into>, S::Future: Unpin + Send, T: Read + Write + Connection + Unpin + Send + 'static, { type Connection = T; type Error = S::Error; type Future = crate::service::Oneshot; fn connect(self, _: Internal, dst: Uri) -> Self::Future { crate::service::Oneshot::new(self, dst) } } impl Sealed for S where S: tower_service::Service + Send, S::Error: Into>, S::Future: Unpin + Send, T: Read + Write + Connection + Unpin + Send + 'static, { } pub trait Sealed {} #[allow(missing_debug_implementations)] pub struct Internal; } #[cfg(test)] mod tests { use super::Connected; #[derive(Clone, Debug, PartialEq)] struct Ex1(usize); #[derive(Clone, Debug, PartialEq)] struct Ex2(&'static str); #[derive(Clone, Debug, PartialEq)] struct Ex3(&'static str); #[test] fn test_connected_extra() { let c1 = Connected::new().extra(Ex1(41)); let mut ex = ::http::Extensions::new(); assert_eq!(ex.get::(), None); c1.extra.as_ref().expect("c1 extra").set(&mut ex); assert_eq!(ex.get::(), Some(&Ex1(41))); } #[test] fn test_connected_extra_chain() { // If a user composes connectors and at each stage, there's "extra" // info to attach, it shouldn't override the previous extras. let c1 = Connected::new() .extra(Ex1(45)) .extra(Ex2("zoom")) .extra(Ex3("pew pew")); let mut ex1 = ::http::Extensions::new(); assert_eq!(ex1.get::(), None); assert_eq!(ex1.get::(), None); assert_eq!(ex1.get::(), None); c1.extra.as_ref().expect("c1 extra").set(&mut ex1); assert_eq!(ex1.get::(), Some(&Ex1(45))); assert_eq!(ex1.get::(), Some(&Ex2("zoom"))); assert_eq!(ex1.get::(), Some(&Ex3("pew pew"))); // Just like extensions, inserting the same type overrides previous type. let c2 = Connected::new() .extra(Ex1(33)) .extra(Ex2("hiccup")) .extra(Ex1(99)); let mut ex2 = ::http::Extensions::new(); c2.extra.as_ref().expect("c2 extra").set(&mut ex2); assert_eq!(ex2.get::(), Some(&Ex1(99))); assert_eq!(ex2.get::(), Some(&Ex2("hiccup"))); } } hyper-util-0.1.19/src/client/legacy/connect/proxy/mod.rs000064400000000000000000000001611046102023000212640ustar 00000000000000//! Proxy helpers mod socks; mod tunnel; pub use self::socks::{SocksV4, SocksV5}; pub use self::tunnel::Tunnel; hyper-util-0.1.19/src/client/legacy/connect/proxy/socks/mod.rs000064400000000000000000000077431046102023000224230ustar 00000000000000mod v5; pub use v5::{SocksV5, SocksV5Error}; mod v4; pub use v4::{SocksV4, SocksV4Error}; use pin_project_lite::pin_project; use std::future::Future; use std::pin::Pin; use std::task::{Context, Poll}; use bytes::BytesMut; use hyper::rt::Read; #[derive(Debug)] pub enum SocksError { Inner(C), Io(std::io::Error), DnsFailure, MissingHost, MissingPort, V4(SocksV4Error), V5(SocksV5Error), Parsing(ParsingError), Serialize(SerializeError), } #[derive(Debug)] pub enum ParsingError { Incomplete, WouldOverflow, Other, } #[derive(Debug)] pub enum SerializeError { WouldOverflow, } async fn read_message(mut conn: &mut T, buf: &mut BytesMut) -> Result> where T: Read + Unpin, M: for<'a> TryFrom<&'a mut BytesMut, Error = ParsingError>, { let mut tmp = [0; 513]; loop { let n = crate::rt::read(&mut conn, &mut tmp).await?; buf.extend_from_slice(&tmp[..n]); match M::try_from(buf) { Err(ParsingError::Incomplete) => { if n == 0 { if buf.spare_capacity_mut().is_empty() { return Err(SocksError::Parsing(ParsingError::WouldOverflow)); } else { return Err(std::io::Error::new( std::io::ErrorKind::UnexpectedEof, "unexpected eof", ) .into()); } } } Err(err) => return Err(err.into()), Ok(res) => return Ok(res), } } } impl std::fmt::Display for SocksError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_str("SOCKS error: ")?; match self { Self::Inner(_) => f.write_str("failed to create underlying connection"), Self::Io(_) => f.write_str("io error during SOCKS handshake"), Self::DnsFailure => f.write_str("could not resolve to acceptable address type"), Self::MissingHost => f.write_str("missing destination host"), Self::MissingPort => f.write_str("missing destination port"), Self::Parsing(_) => f.write_str("failed parsing server response"), Self::Serialize(_) => f.write_str("failed serialize request"), Self::V4(e) => e.fmt(f), Self::V5(e) => e.fmt(f), } } } impl std::error::Error for SocksError {} impl From for SocksError { fn from(err: std::io::Error) -> Self { Self::Io(err) } } impl From for SocksError { fn from(err: ParsingError) -> Self { Self::Parsing(err) } } impl From for SocksError { fn from(err: SerializeError) -> Self { Self::Serialize(err) } } impl From for SocksError { fn from(err: SocksV4Error) -> Self { Self::V4(err) } } impl From for SocksError { fn from(err: SocksV5Error) -> Self { Self::V5(err) } } pin_project! { // Not publicly exported (so missing_docs doesn't trigger). // // We return this `Future` instead of the `Pin>` directly // so that users don't rely on it fitting in a `Pin>` slot // (and thus we can change the type in the future). #[must_use = "futures do nothing unless polled"] #[allow(missing_debug_implementations)] pub struct Handshaking { #[pin] fut: BoxHandshaking, _marker: std::marker::PhantomData } } type BoxHandshaking = Pin>> + Send>>; impl Future for Handshaking where F: Future>, { type Output = Result>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { self.project().fut.poll(cx) } } hyper-util-0.1.19/src/client/legacy/connect/proxy/socks/v4/errors.rs000064400000000000000000000007311046102023000234770ustar 00000000000000use super::Status; #[derive(Debug)] pub enum SocksV4Error { IpV6, Command(Status), } impl From for SocksV4Error { fn from(err: Status) -> Self { Self::Command(err) } } impl std::fmt::Display for SocksV4Error { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::IpV6 => f.write_str("IPV6 is not supported"), Self::Command(status) => status.fmt(f), } } } hyper-util-0.1.19/src/client/legacy/connect/proxy/socks/v4/messages.rs000064400000000000000000000077571046102023000240110ustar 00000000000000use super::super::{ParsingError, SerializeError}; use bytes::{Buf, BufMut, BytesMut}; use std::net::SocketAddrV4; /// +-----+-----+----+----+----+----+----+----+-------------+------+------------+------+ /// | VN | CD | DSTPORT | DSTIP | USERID | NULL | DOMAIN | NULL | /// +-----+-----+----+----+----+----+----+----+-------------+------+------------+------+ /// | 1 | 1 | 2 | 4 | Variable | 1 | Variable | 1 | /// +-----+-----+----+----+----+----+----+----+-------------+------+------------+------+ /// ^^^^^^^^^^^^^^^^^^^^^ /// optional: only do IP is 0.0.0.X #[derive(Debug)] pub struct Request<'a>(pub &'a Address); /// +-----+-----+----+----+----+----+----+----+ /// | VN | CD | DSTPORT | DSTIP | /// +-----+-----+----+----+----+----+----+----+ /// | 1 | 1 | 2 | 4 | /// +-----+-----+----+----+----+----+----+----+ /// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ /// ignore: only for SOCKSv4 BIND #[derive(Debug)] pub struct Response(pub Status); #[derive(Debug)] pub enum Address { Socket(SocketAddrV4), Domain(String, u16), } #[derive(Debug, PartialEq)] pub enum Status { Success = 90, Failed = 91, IdentFailure = 92, IdentMismatch = 93, } impl Request<'_> { pub fn write_to_buf(&self, mut buf: B) -> Result { match self.0 { Address::Socket(socket) => { if buf.remaining_mut() < 10 { return Err(SerializeError::WouldOverflow); } buf.put_u8(0x04); // Version buf.put_u8(0x01); // CONNECT buf.put_u16(socket.port()); // Port buf.put_slice(&socket.ip().octets()); // IP buf.put_u8(0x00); // USERID buf.put_u8(0x00); // NULL Ok(10) } Address::Domain(domain, port) => { if buf.remaining_mut() < 10 + domain.len() + 1 { return Err(SerializeError::WouldOverflow); } buf.put_u8(0x04); // Version buf.put_u8(0x01); // CONNECT buf.put_u16(*port); // IP buf.put_slice(&[0x00, 0x00, 0x00, 0xFF]); // Invalid IP buf.put_u8(0x00); // USERID buf.put_u8(0x00); // NULL buf.put_slice(domain.as_bytes()); // Domain buf.put_u8(0x00); // NULL Ok(10 + domain.len() + 1) } } } } impl TryFrom<&mut BytesMut> for Response { type Error = ParsingError; fn try_from(buf: &mut BytesMut) -> Result { if buf.remaining() < 8 { return Err(ParsingError::Incomplete); } if buf.get_u8() != 0x00 { return Err(ParsingError::Other); } let status = buf.get_u8().try_into()?; let _addr = { let port = buf.get_u16(); let mut ip = [0; 4]; buf.copy_to_slice(&mut ip); SocketAddrV4::new(ip.into(), port) }; Ok(Self(status)) } } impl TryFrom for Status { type Error = ParsingError; fn try_from(byte: u8) -> Result { Ok(match byte { 90 => Self::Success, 91 => Self::Failed, 92 => Self::IdentFailure, 93 => Self::IdentMismatch, _ => return Err(ParsingError::Other), }) } } impl std::fmt::Display for Status { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_str(match self { Self::Success => "success", Self::Failed => "server failed to execute command", Self::IdentFailure => "server ident service failed", Self::IdentMismatch => "server ident service did not recognise client identifier", }) } } hyper-util-0.1.19/src/client/legacy/connect/proxy/socks/v4/mod.rs000064400000000000000000000103031046102023000227360ustar 00000000000000mod errors; pub use errors::*; mod messages; use messages::*; use std::net::{IpAddr, SocketAddr, SocketAddrV4, ToSocketAddrs}; use std::task::{Context, Poll}; use http::Uri; use hyper::rt::{Read, Write}; use tower_service::Service; use bytes::BytesMut; use super::{Handshaking, SocksError}; /// Tunnel Proxy via SOCKSv4 /// /// This is a connector that can be used by the `legacy::Client`. It wraps /// another connector, and after getting an underlying connection, it establishes /// a TCP tunnel over it using SOCKSv4. #[derive(Debug, Clone)] pub struct SocksV4 { inner: C, config: SocksConfig, } #[derive(Debug, Clone)] struct SocksConfig { proxy: Uri, local_dns: bool, } impl SocksV4 { /// Create a new SOCKSv4 handshake service /// /// Wraps an underlying connector and stores the address of a tunneling /// proxying server. /// /// A `SocksV4` can then be called with any destination. The `dst` passed to /// `call` will not be used to create the underlying connection, but will /// be used in a SOCKS handshake with the proxy destination. pub fn new(proxy_dst: Uri, connector: C) -> Self { Self { inner: connector, config: SocksConfig::new(proxy_dst), } } /// Resolve domain names locally on the client, rather than on the proxy server. /// /// Disabled by default as local resolution of domain names can be detected as a /// DNS leak. pub fn local_dns(mut self, local_dns: bool) -> Self { self.config.local_dns = local_dns; self } } impl SocksConfig { pub fn new(proxy: Uri) -> Self { Self { proxy, local_dns: false, } } async fn execute(self, mut conn: T, host: String, port: u16) -> Result> where T: Read + Write + Unpin, { let address = match host.parse::() { Ok(IpAddr::V6(_)) => return Err(SocksV4Error::IpV6.into()), Ok(IpAddr::V4(ip)) => Address::Socket(SocketAddrV4::new(ip, port)), Err(_) => { if self.local_dns { (host, port) .to_socket_addrs()? .find_map(|s| { if let SocketAddr::V4(v4) = s { Some(Address::Socket(v4)) } else { None } }) .ok_or(SocksError::DnsFailure)? } else { Address::Domain(host, port) } } }; let mut send_buf = BytesMut::with_capacity(1024); let mut recv_buf = BytesMut::with_capacity(1024); // Send Request let req = Request(&address); let n = req.write_to_buf(&mut send_buf)?; crate::rt::write_all(&mut conn, &send_buf[..n]).await?; // Read Response let res: Response = super::read_message(&mut conn, &mut recv_buf).await?; if res.0 == Status::Success { Ok(conn) } else { Err(SocksV4Error::Command(res.0).into()) } } } impl Service for SocksV4 where C: Service, C::Future: Send + 'static, C::Response: Read + Write + Unpin + Send + 'static, C::Error: Send + 'static, { type Response = C::Response; type Error = SocksError; type Future = Handshaking; fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { self.inner.poll_ready(cx).map_err(SocksError::Inner) } fn call(&mut self, dst: Uri) -> Self::Future { let config = self.config.clone(); let connecting = self.inner.call(config.proxy.clone()); let fut = async move { let port = dst.port().map(|p| p.as_u16()).unwrap_or(443); let host = dst.host().ok_or(SocksError::MissingHost)?.to_string(); let conn = connecting.await.map_err(SocksError::Inner)?; config.execute(conn, host, port).await }; Handshaking { fut: Box::pin(fut), _marker: Default::default(), } } } hyper-util-0.1.19/src/client/legacy/connect/proxy/socks/v5/errors.rs000064400000000000000000000021731046102023000235020ustar 00000000000000use super::Status; #[derive(Debug)] pub enum SocksV5Error { HostTooLong, Auth(AuthError), Command(Status), } #[derive(Debug)] pub enum AuthError { Unsupported, MethodMismatch, Failed, } impl From for SocksV5Error { fn from(err: Status) -> Self { Self::Command(err) } } impl From for SocksV5Error { fn from(err: AuthError) -> Self { Self::Auth(err) } } impl std::fmt::Display for SocksV5Error { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::HostTooLong => f.write_str("host address is more than 255 characters"), Self::Command(e) => e.fmt(f), Self::Auth(e) => e.fmt(f), } } } impl std::fmt::Display for AuthError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_str(match self { Self::Unsupported => "server does not support user/pass authentication", Self::MethodMismatch => "server implements authentication incorrectly", Self::Failed => "credentials not accepted", }) } } hyper-util-0.1.19/src/client/legacy/connect/proxy/socks/v5/messages.rs000064400000000000000000000234111046102023000237730ustar 00000000000000use super::super::{ParsingError, SerializeError}; use bytes::{Buf, BufMut, BytesMut}; use std::net::SocketAddr; /// +----+----------+----------+ /// |VER | NMETHODS | METHODS | /// +----+----------+----------+ /// | 1 | 1 | 1 to 255 | /// +----+----------+----------+ #[derive(Debug)] pub struct NegotiationReq<'a>(pub &'a AuthMethod); /// +----+--------+ /// |VER | METHOD | /// +----+--------+ /// | 1 | 1 | /// +----+--------+ #[derive(Debug)] pub struct NegotiationRes(pub AuthMethod); /// +----+------+----------+------+----------+ /// |VER | ULEN | UNAME | PLEN | PASSWD | /// +----+------+----------+------+----------+ /// | 1 | 1 | 1 to 255 | 1 | 1 to 255 | /// +----+------+----------+------+----------+ #[derive(Debug)] pub struct AuthenticationReq<'a>(pub &'a str, pub &'a str); /// +----+--------+ /// |VER | STATUS | /// +----+--------+ /// | 1 | 1 | /// +----+--------+ #[derive(Debug)] pub struct AuthenticationRes(pub bool); /// +----+-----+-------+------+----------+----------+ /// |VER | CMD | RSV | ATYP | DST.ADDR | DST.PORT | /// +----+-----+-------+------+----------+----------+ /// | 1 | 1 | X'00' | 1 | Variable | 2 | /// +----+-----+-------+------+----------+----------+ #[derive(Debug)] pub struct ProxyReq<'a>(pub &'a Address); /// +----+-----+-------+------+----------+----------+ /// |VER | REP | RSV | ATYP | BND.ADDR | BND.PORT | /// +----+-----+-------+------+----------+----------+ /// | 1 | 1 | X'00' | 1 | Variable | 2 | /// +----+-----+-------+------+----------+----------+ #[derive(Debug)] pub struct ProxyRes(pub Status); #[repr(u8)] #[derive(Debug, Copy, Clone, PartialEq)] pub enum AuthMethod { NoAuth = 0x00, UserPass = 0x02, NoneAcceptable = 0xFF, } #[derive(Debug)] pub enum Address { Socket(SocketAddr), Domain(String, u16), } #[derive(Debug, Copy, Clone, PartialEq)] pub enum Status { Success, GeneralServerFailure, ConnectionNotAllowed, NetworkUnreachable, HostUnreachable, ConnectionRefused, TtlExpired, CommandNotSupported, AddressTypeNotSupported, } impl NegotiationReq<'_> { pub fn write_to_buf(&self, buf: &mut BytesMut) -> Result { if buf.capacity() - buf.len() < 3 { return Err(SerializeError::WouldOverflow); } buf.put_u8(0x05); // Version buf.put_u8(0x01); // Number of authentication methods buf.put_u8(*self.0 as u8); // Authentication method Ok(3) } } impl TryFrom<&mut BytesMut> for NegotiationRes { type Error = ParsingError; fn try_from(buf: &mut BytesMut) -> Result { if buf.remaining() < 2 { return Err(ParsingError::Incomplete); } if buf.get_u8() != 0x05 { return Err(ParsingError::Other); } let method = buf.get_u8().try_into()?; Ok(Self(method)) } } impl AuthenticationReq<'_> { pub fn write_to_buf(&self, buf: &mut BytesMut) -> Result { if buf.capacity() - buf.len() < 3 + self.0.len() + self.1.len() { return Err(SerializeError::WouldOverflow); } buf.put_u8(0x01); // Version buf.put_u8(self.0.len() as u8); // Username length (guarenteed to be 255 or less) buf.put_slice(self.0.as_bytes()); // Username buf.put_u8(self.1.len() as u8); // Password length (guarenteed to be 255 or less) buf.put_slice(self.1.as_bytes()); // Password Ok(3 + self.0.len() + self.1.len()) } } impl TryFrom<&mut BytesMut> for AuthenticationRes { type Error = ParsingError; fn try_from(buf: &mut BytesMut) -> Result { if buf.remaining() < 2 { return Err(ParsingError::Incomplete); } if buf.get_u8() != 0x01 { return Err(ParsingError::Other); } if buf.get_u8() == 0 { Ok(Self(true)) } else { Ok(Self(false)) } } } impl ProxyReq<'_> { pub fn write_to_buf(&self, buf: &mut BytesMut) -> Result { let addr_len = match self.0 { Address::Socket(SocketAddr::V4(_)) => 1 + 4 + 2, Address::Socket(SocketAddr::V6(_)) => 1 + 16 + 2, Address::Domain(ref domain, _) => 1 + 1 + domain.len() + 2, }; if buf.capacity() - buf.len() < 3 + addr_len { return Err(SerializeError::WouldOverflow); } buf.put_u8(0x05); // Version buf.put_u8(0x01); // TCP tunneling command buf.put_u8(0x00); // Reserved let _ = self.0.write_to_buf(buf); // Address Ok(3 + addr_len) } } impl TryFrom<&mut BytesMut> for ProxyRes { type Error = ParsingError; fn try_from(buf: &mut BytesMut) -> Result { if buf.remaining() < 3 { return Err(ParsingError::Incomplete); } // VER if buf.get_u8() != 0x05 { return Err(ParsingError::Other); } // REP let status = buf.get_u8().try_into()?; // RSV if buf.get_u8() != 0x00 { return Err(ParsingError::Other); } // ATYP + ADDR Address::try_from(buf)?; Ok(Self(status)) } } impl Address { pub fn write_to_buf(&self, buf: &mut BytesMut) -> Result { match self { Self::Socket(SocketAddr::V4(v4)) => { if buf.capacity() - buf.len() < 1 + 4 + 2 { return Err(SerializeError::WouldOverflow); } buf.put_u8(0x01); buf.put_slice(&v4.ip().octets()); buf.put_u16(v4.port()); // Network Order/BigEndian for port Ok(7) } Self::Socket(SocketAddr::V6(v6)) => { if buf.capacity() - buf.len() < 1 + 16 + 2 { return Err(SerializeError::WouldOverflow); } buf.put_u8(0x04); buf.put_slice(&v6.ip().octets()); buf.put_u16(v6.port()); // Network Order/BigEndian for port Ok(19) } Self::Domain(domain, port) => { if buf.capacity() - buf.len() < 1 + 1 + domain.len() + 2 { return Err(SerializeError::WouldOverflow); } buf.put_u8(0x03); buf.put_u8(domain.len() as u8); // Guarenteed to be less than 255 buf.put_slice(domain.as_bytes()); buf.put_u16(*port); Ok(4 + domain.len()) } } } } impl TryFrom<&mut BytesMut> for Address { type Error = ParsingError; fn try_from(buf: &mut BytesMut) -> Result { if buf.remaining() < 2 { return Err(ParsingError::Incomplete); } Ok(match buf.get_u8() { // IPv4 0x01 => { let mut ip = [0; 4]; if buf.remaining() < 6 { return Err(ParsingError::Incomplete); } buf.copy_to_slice(&mut ip); let port = buf.get_u16(); Self::Socket(SocketAddr::new(ip.into(), port)) } // Domain 0x03 => { let len = buf.get_u8(); if len == 0 { return Err(ParsingError::Other); } else if buf.remaining() < (len as usize) + 2 { return Err(ParsingError::Incomplete); } let domain = std::str::from_utf8(&buf[..len as usize]) .map_err(|_| ParsingError::Other)? .to_string(); let port = buf.get_u16(); Self::Domain(domain, port) } // IPv6 0x04 => { let mut ip = [0; 16]; if buf.remaining() < 18 { return Err(ParsingError::Incomplete); } buf.copy_to_slice(&mut ip); let port = buf.get_u16(); Self::Socket(SocketAddr::new(ip.into(), port)) } _ => return Err(ParsingError::Other), }) } } impl TryFrom for Status { type Error = ParsingError; fn try_from(byte: u8) -> Result { Ok(match byte { 0x00 => Self::Success, 0x01 => Self::GeneralServerFailure, 0x02 => Self::ConnectionNotAllowed, 0x03 => Self::NetworkUnreachable, 0x04 => Self::HostUnreachable, 0x05 => Self::ConnectionRefused, 0x06 => Self::TtlExpired, 0x07 => Self::CommandNotSupported, 0x08 => Self::AddressTypeNotSupported, _ => return Err(ParsingError::Other), }) } } impl TryFrom for AuthMethod { type Error = ParsingError; fn try_from(byte: u8) -> Result { Ok(match byte { 0x00 => Self::NoAuth, 0x02 => Self::UserPass, 0xFF => Self::NoneAcceptable, _ => return Err(ParsingError::Other), }) } } impl std::fmt::Display for Status { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_str(match self { Self::Success => "success", Self::GeneralServerFailure => "general server failure", Self::ConnectionNotAllowed => "connection not allowed", Self::NetworkUnreachable => "network unreachable", Self::HostUnreachable => "host unreachable", Self::ConnectionRefused => "connection refused", Self::TtlExpired => "ttl expired", Self::CommandNotSupported => "command not supported", Self::AddressTypeNotSupported => "address type not supported", }) } } hyper-util-0.1.19/src/client/legacy/connect/proxy/socks/v5/mod.rs000064400000000000000000000217571046102023000227560ustar 00000000000000mod errors; pub use errors::*; mod messages; use messages::*; use std::net::{IpAddr, SocketAddr, ToSocketAddrs}; use std::task::{Context, Poll}; use http::Uri; use hyper::rt::{Read, Write}; use tower_service::Service; use bytes::BytesMut; use super::{Handshaking, SocksError}; /// Tunnel Proxy via SOCKSv5 /// /// This is a connector that can be used by the `legacy::Client`. It wraps /// another connector, and after getting an underlying connection, it establishes /// a TCP tunnel over it using SOCKSv5. #[derive(Debug, Clone)] pub struct SocksV5 { inner: C, config: SocksConfig, } #[derive(Debug, Clone)] pub struct SocksConfig { proxy: Uri, proxy_auth: Option<(String, String)>, local_dns: bool, optimistic: bool, } #[derive(Debug)] enum State { SendingNegReq, ReadingNegRes, SendingAuthReq, ReadingAuthRes, SendingProxyReq, ReadingProxyRes, } impl SocksV5 { /// Create a new SOCKSv5 handshake service. /// /// Wraps an underlying connector and stores the address of a tunneling /// proxying server. /// /// A `SocksV5` can then be called with any destination. The `dst` passed to /// `call` will not be used to create the underlying connection, but will /// be used in a SOCKS handshake with the proxy destination. pub fn new(proxy_dst: Uri, connector: C) -> Self { Self { inner: connector, config: SocksConfig::new(proxy_dst), } } /// Use User/Pass authentication method during handshake. /// /// Username and Password must be maximum of 255 characters each. /// 0 length strings are allowed despite RFC prohibiting it. This is done for /// compatablity with server implementations that use empty credentials /// to allow returning error codes during IP authentication. pub fn with_auth(mut self, user: String, pass: String) -> Self { self.config.proxy_auth = Some((user, pass)); self } /// Resolve domain names locally on the client, rather than on the proxy server. /// /// Disabled by default as local resolution of domain names can be detected as a /// DNS leak. pub fn local_dns(mut self, local_dns: bool) -> Self { self.config.local_dns = local_dns; self } /// Send all messages of the handshake optmistically (without waiting for server response). /// /// A typical SOCKS handshake with user/pass authentication takes 3 round trips Optimistic sending /// can reduce round trip times and dramatically increase speed of handshake at the cost of /// reduced portability; many server implementations do not support optimistic sending as it /// is not defined in the RFC. /// /// Recommended to ensure connector works correctly without optimistic sending before trying /// with optimistic sending. pub fn send_optimistically(mut self, optimistic: bool) -> Self { self.config.optimistic = optimistic; self } } impl SocksConfig { fn new(proxy: Uri) -> Self { Self { proxy, proxy_auth: None, local_dns: false, optimistic: false, } } async fn execute(self, mut conn: T, host: String, port: u16) -> Result> where T: Read + Write + Unpin, { let address = match host.parse::() { Ok(ip) => Address::Socket(SocketAddr::new(ip, port)), Err(_) if host.len() <= 255 => { if self.local_dns { let socket = (host, port) .to_socket_addrs()? .next() .ok_or(SocksError::DnsFailure)?; Address::Socket(socket) } else { Address::Domain(host, port) } } Err(_) => return Err(SocksV5Error::HostTooLong.into()), }; let method = if self.proxy_auth.is_some() { AuthMethod::UserPass } else { AuthMethod::NoAuth }; let mut recv_buf = BytesMut::with_capacity(513); // Max length of valid recievable message is 513 from Auth Request let mut send_buf = BytesMut::with_capacity(262); // Max length of valid sendable message is 262 from Auth Response let mut state = State::SendingNegReq; loop { match state { State::SendingNegReq => { let req = NegotiationReq(&method); let start = send_buf.len(); req.write_to_buf(&mut send_buf)?; crate::rt::write_all(&mut conn, &send_buf[start..]).await?; if self.optimistic { if method == AuthMethod::UserPass { state = State::SendingAuthReq; } else { state = State::SendingProxyReq; } } else { state = State::ReadingNegRes; } } State::ReadingNegRes => { let res: NegotiationRes = super::read_message(&mut conn, &mut recv_buf).await?; if res.0 == AuthMethod::NoneAcceptable { return Err(SocksV5Error::Auth(AuthError::Unsupported).into()); } if res.0 != method { return Err(SocksV5Error::Auth(AuthError::MethodMismatch).into()); } if self.optimistic { if res.0 == AuthMethod::UserPass { state = State::ReadingAuthRes; } else { state = State::ReadingProxyRes; } } else if res.0 == AuthMethod::UserPass { state = State::SendingAuthReq; } else { state = State::SendingProxyReq; } } State::SendingAuthReq => { let (user, pass) = self.proxy_auth.as_ref().unwrap(); let req = AuthenticationReq(user, pass); let start = send_buf.len(); req.write_to_buf(&mut send_buf)?; crate::rt::write_all(&mut conn, &send_buf[start..]).await?; if self.optimistic { state = State::SendingProxyReq; } else { state = State::ReadingAuthRes; } } State::ReadingAuthRes => { let res: AuthenticationRes = super::read_message(&mut conn, &mut recv_buf).await?; if !res.0 { return Err(SocksV5Error::Auth(AuthError::Failed).into()); } if self.optimistic { state = State::ReadingProxyRes; } else { state = State::SendingProxyReq; } } State::SendingProxyReq => { let req = ProxyReq(&address); let start = send_buf.len(); req.write_to_buf(&mut send_buf)?; crate::rt::write_all(&mut conn, &send_buf[start..]).await?; if self.optimistic { state = State::ReadingNegRes; } else { state = State::ReadingProxyRes; } } State::ReadingProxyRes => { let res: ProxyRes = super::read_message(&mut conn, &mut recv_buf).await?; if res.0 == Status::Success { return Ok(conn); } else { return Err(SocksV5Error::Command(res.0).into()); } } } } } } impl Service for SocksV5 where C: Service, C::Future: Send + 'static, C::Response: Read + Write + Unpin + Send + 'static, C::Error: Send + 'static, { type Response = C::Response; type Error = SocksError; type Future = Handshaking; fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { self.inner.poll_ready(cx).map_err(SocksError::Inner) } fn call(&mut self, dst: Uri) -> Self::Future { let config = self.config.clone(); let connecting = self.inner.call(config.proxy.clone()); let fut = async move { let port = dst.port().map(|p| p.as_u16()).unwrap_or(443); let host = dst.host().ok_or(SocksError::MissingHost)?.to_string(); let conn = connecting.await.map_err(SocksError::Inner)?; config.execute(conn, host, port).await }; Handshaking { fut: Box::pin(fut), _marker: Default::default(), } } } hyper-util-0.1.19/src/client/legacy/connect/proxy/tunnel.rs000064400000000000000000000171711046102023000220230ustar 00000000000000use std::error::Error as StdError; use std::future::Future; use std::marker::{PhantomData, Unpin}; use std::pin::Pin; use std::task::{self, Poll}; use futures_core::ready; use http::{HeaderMap, HeaderValue, Uri}; use hyper::rt::{Read, Write}; use pin_project_lite::pin_project; use tower_service::Service; /// Tunnel Proxy via HTTP CONNECT /// /// This is a connector that can be used by the `legacy::Client`. It wraps /// another connector, and after getting an underlying connection, it creates /// an HTTP CONNECT tunnel over it. #[derive(Debug, Clone)] pub struct Tunnel { headers: Headers, inner: C, proxy_dst: Uri, } #[derive(Clone, Debug)] enum Headers { Empty, Auth(HeaderValue), Extra(HeaderMap), } #[derive(Debug)] pub enum TunnelError { ConnectFailed(Box), Io(std::io::Error), MissingHost, ProxyAuthRequired, ProxyHeadersTooLong, TunnelUnexpectedEof, TunnelUnsuccessful, } pin_project! { // Not publicly exported (so missing_docs doesn't trigger). // // We return this `Future` instead of the `Pin>` directly // so that users don't rely on it fitting in a `Pin>` slot // (and thus we can change the type in the future). #[must_use = "futures do nothing unless polled"] #[allow(missing_debug_implementations)] pub struct Tunneling { #[pin] fut: BoxTunneling, _marker: PhantomData, } } type BoxTunneling = Pin> + Send>>; impl Tunnel { /// Create a new Tunnel service. /// /// This wraps an underlying connector, and stores the address of a /// tunneling proxy server. /// /// A `Tunnel` can then be called with any destination. The `dst` passed to /// `call` will not be used to create the underlying connection, but will /// be used in an HTTP CONNECT request sent to the proxy destination. pub fn new(proxy_dst: Uri, connector: C) -> Self { Self { headers: Headers::Empty, inner: connector, proxy_dst, } } /// Add `proxy-authorization` header value to the CONNECT request. pub fn with_auth(mut self, mut auth: HeaderValue) -> Self { // just in case the user forgot auth.set_sensitive(true); match self.headers { Headers::Empty => { self.headers = Headers::Auth(auth); } Headers::Auth(ref mut existing) => { *existing = auth; } Headers::Extra(ref mut extra) => { extra.insert(http::header::PROXY_AUTHORIZATION, auth); } } self } /// Add extra headers to be sent with the CONNECT request. /// /// If existing headers have been set, these will be merged. pub fn with_headers(mut self, mut headers: HeaderMap) -> Self { match self.headers { Headers::Empty => { self.headers = Headers::Extra(headers); } Headers::Auth(auth) => { headers .entry(http::header::PROXY_AUTHORIZATION) .or_insert(auth); self.headers = Headers::Extra(headers); } Headers::Extra(ref mut extra) => { extra.extend(headers); } } self } } impl Service for Tunnel where C: Service, C::Future: Send + 'static, C::Response: Read + Write + Unpin + Send + 'static, C::Error: Into>, { type Response = C::Response; type Error = TunnelError; type Future = Tunneling; fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { ready!(self.inner.poll_ready(cx)).map_err(|e| TunnelError::ConnectFailed(e.into()))?; Poll::Ready(Ok(())) } fn call(&mut self, dst: Uri) -> Self::Future { let connecting = self.inner.call(self.proxy_dst.clone()); let headers = self.headers.clone(); Tunneling { fut: Box::pin(async move { let conn = connecting .await .map_err(|e| TunnelError::ConnectFailed(e.into()))?; tunnel( conn, dst.host().ok_or(TunnelError::MissingHost)?, dst.port().map(|p| p.as_u16()).unwrap_or(443), &headers, ) .await }), _marker: PhantomData, } } } impl Future for Tunneling where F: Future>, { type Output = Result; fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { self.project().fut.poll(cx) } } async fn tunnel(mut conn: T, host: &str, port: u16, headers: &Headers) -> Result where T: Read + Write + Unpin, { let mut buf = format!( "\ CONNECT {host}:{port} HTTP/1.1\r\n\ Host: {host}:{port}\r\n\ " ) .into_bytes(); match headers { Headers::Auth(auth) => { buf.extend_from_slice(b"Proxy-Authorization: "); buf.extend_from_slice(auth.as_bytes()); buf.extend_from_slice(b"\r\n"); } Headers::Extra(extra) => { for (name, value) in extra { buf.extend_from_slice(name.as_str().as_bytes()); buf.extend_from_slice(b": "); buf.extend_from_slice(value.as_bytes()); buf.extend_from_slice(b"\r\n"); } } Headers::Empty => (), } // headers end buf.extend_from_slice(b"\r\n"); crate::rt::write_all(&mut conn, &buf) .await .map_err(TunnelError::Io)?; let mut buf = [0; 8192]; let mut pos = 0; loop { let n = crate::rt::read(&mut conn, &mut buf[pos..]) .await .map_err(TunnelError::Io)?; if n == 0 { return Err(TunnelError::TunnelUnexpectedEof); } pos += n; let recvd = &buf[..pos]; if recvd.starts_with(b"HTTP/1.1 200") || recvd.starts_with(b"HTTP/1.0 200") { if recvd.ends_with(b"\r\n\r\n") { return Ok(conn); } if pos == buf.len() { return Err(TunnelError::ProxyHeadersTooLong); } // else read more } else if recvd.starts_with(b"HTTP/1.1 407") { return Err(TunnelError::ProxyAuthRequired); } else { return Err(TunnelError::TunnelUnsuccessful); } } } impl std::fmt::Display for TunnelError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_str("tunnel error: ")?; f.write_str(match self { TunnelError::MissingHost => "missing destination host", TunnelError::ProxyAuthRequired => "proxy authorization required", TunnelError::ProxyHeadersTooLong => "proxy response headers too long", TunnelError::TunnelUnexpectedEof => "unexpected end of file", TunnelError::TunnelUnsuccessful => "unsuccessful", TunnelError::ConnectFailed(_) => "failed to create underlying connection", TunnelError::Io(_) => "io error establishing tunnel", }) } } impl std::error::Error for TunnelError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match self { TunnelError::Io(ref e) => Some(e), TunnelError::ConnectFailed(ref e) => Some(&**e), _ => None, } } } hyper-util-0.1.19/src/client/legacy/mod.rs000064400000000000000000000004611046102023000164550ustar 00000000000000#[cfg(any(feature = "http1", feature = "http2"))] mod client; #[cfg(any(feature = "http1", feature = "http2"))] pub use client::{Builder, Client, Error, ResponseFuture}; pub mod connect; #[doc(hidden)] // Publicly available, but just for legacy purposes. A better pool will be // designed. pub mod pool; hyper-util-0.1.19/src/client/legacy/pool.rs000064400000000000000000001055101046102023000166500ustar 00000000000000#![allow(dead_code)] use std::collections::{HashMap, HashSet, VecDeque}; use std::convert::Infallible; use std::error::Error as StdError; use std::fmt::{self, Debug}; use std::future::Future; use std::hash::Hash; use std::ops::{Deref, DerefMut}; use std::pin::Pin; use std::sync::{Arc, Mutex, Weak}; use std::task::{self, Poll}; use std::time::{Duration, Instant}; use futures_channel::oneshot; use futures_core::ready; use tracing::{debug, trace}; use hyper::rt::Timer as _; use crate::common::{exec, exec::Exec, timer::Timer}; // FIXME: allow() required due to `impl Trait` leaking types to this lint #[allow(missing_debug_implementations)] pub struct Pool { // If the pool is disabled, this is None. inner: Option>>>, } // Before using a pooled connection, make sure the sender is not dead. // // This is a trait to allow the `client::pool::tests` to work for `i32`. // // See https://github.com/hyperium/hyper/issues/1429 pub trait Poolable: Unpin + Send + Sized + 'static { fn is_open(&self) -> bool; /// Reserve this connection. /// /// Allows for HTTP/2 to return a shared reservation. fn reserve(self) -> Reservation; fn can_share(&self) -> bool; } pub trait Key: Eq + Hash + Clone + Debug + Unpin + Send + 'static {} impl Key for T where T: Eq + Hash + Clone + Debug + Unpin + Send + 'static {} /// A marker to identify what version a pooled connection is. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] #[allow(dead_code)] pub enum Ver { Auto, Http2, } /// When checking out a pooled connection, it might be that the connection /// only supports a single reservation, or it might be usable for many. /// /// Specifically, HTTP/1 requires a unique reservation, but HTTP/2 can be /// used for multiple requests. // FIXME: allow() required due to `impl Trait` leaking types to this lint #[allow(missing_debug_implementations)] pub enum Reservation { /// This connection could be used multiple times, the first one will be /// reinserted into the `idle` pool, and the second will be given to /// the `Checkout`. #[cfg(feature = "http2")] Shared(T, T), /// This connection requires unique access. It will be returned after /// use is complete. Unique(T), } /// Simple type alias in case the key type needs to be adjusted. // pub type Key = (http::uri::Scheme, http::uri::Authority); //Arc; struct PoolInner { // A flag that a connection is being established, and the connection // should be shared. This prevents making multiple HTTP/2 connections // to the same host. connecting: HashSet, // These are internal Conns sitting in the event loop in the KeepAlive // state, waiting to receive a new Request to send on the socket. idle: HashMap>>, max_idle_per_host: usize, // These are outstanding Checkouts that are waiting for a socket to be // able to send a Request one. This is used when "racing" for a new // connection. // // The Client starts 2 tasks, 1 to connect a new socket, and 1 to wait // for the Pool to receive an idle Conn. When a Conn becomes idle, // this list is checked for any parked Checkouts, and tries to notify // them that the Conn could be used instead of waiting for a brand new // connection. waiters: HashMap>>, // A oneshot channel is used to allow the interval to be notified when // the Pool completely drops. That way, the interval can cancel immediately. idle_interval_ref: Option>, exec: Exec, timer: Option, timeout: Option, } // This is because `Weak::new()` *allocates* space for `T`, even if it // doesn't need it! struct WeakOpt(Option>); #[derive(Clone, Copy, Debug)] pub struct Config { pub idle_timeout: Option, pub max_idle_per_host: usize, } impl Config { pub fn is_enabled(&self) -> bool { self.max_idle_per_host > 0 } } impl Pool { pub fn new(config: Config, executor: E, timer: Option) -> Pool where E: hyper::rt::Executor + Send + Sync + Clone + 'static, M: hyper::rt::Timer + Send + Sync + Clone + 'static, { let exec = Exec::new(executor); let timer = timer.map(|t| Timer::new(t)); let inner = if config.is_enabled() { Some(Arc::new(Mutex::new(PoolInner { connecting: HashSet::new(), idle: HashMap::new(), idle_interval_ref: None, max_idle_per_host: config.max_idle_per_host, waiters: HashMap::new(), exec, timer, timeout: config.idle_timeout, }))) } else { None }; Pool { inner } } pub(crate) fn is_enabled(&self) -> bool { self.inner.is_some() } #[cfg(test)] pub(super) fn no_timer(&self) { // Prevent an actual interval from being created for this pool... { let mut inner = self.inner.as_ref().unwrap().lock().unwrap(); assert!(inner.idle_interval_ref.is_none(), "timer already spawned"); let (tx, _) = oneshot::channel(); inner.idle_interval_ref = Some(tx); } } } impl Pool { /// Returns a `Checkout` which is a future that resolves if an idle /// connection becomes available. pub fn checkout(&self, key: K) -> Checkout { Checkout { key, pool: self.clone(), waiter: None, } } /// Ensure that there is only ever 1 connecting task for HTTP/2 /// connections. This does nothing for HTTP/1. pub fn connecting(&self, key: &K, ver: Ver) -> Option> { if ver == Ver::Http2 { if let Some(ref enabled) = self.inner { let mut inner = enabled.lock().unwrap(); return if inner.connecting.insert(key.clone()) { let connecting = Connecting { key: key.clone(), pool: WeakOpt::downgrade(enabled), }; Some(connecting) } else { trace!("HTTP/2 connecting already in progress for {:?}", key); None }; } } // else Some(Connecting { key: key.clone(), // in HTTP/1's case, there is never a lock, so we don't // need to do anything in Drop. pool: WeakOpt::none(), }) } #[cfg(test)] fn locked(&self) -> std::sync::MutexGuard<'_, PoolInner> { self.inner.as_ref().expect("enabled").lock().expect("lock") } /* Used in client/tests.rs... #[cfg(test)] pub(super) fn h1_key(&self, s: &str) -> Key { Arc::new(s.to_string()) } #[cfg(test)] pub(super) fn idle_count(&self, key: &Key) -> usize { self .locked() .idle .get(key) .map(|list| list.len()) .unwrap_or(0) } */ pub fn pooled( &self, #[cfg_attr(not(feature = "http2"), allow(unused_mut))] mut connecting: Connecting, value: T, ) -> Pooled { let (value, pool_ref) = if let Some(ref enabled) = self.inner { match value.reserve() { #[cfg(feature = "http2")] Reservation::Shared(to_insert, to_return) => { let mut inner = enabled.lock().unwrap(); inner.put(connecting.key.clone(), to_insert, enabled); // Do this here instead of Drop for Connecting because we // already have a lock, no need to lock the mutex twice. inner.connected(&connecting.key); // prevent the Drop of Connecting from repeating inner.connected() connecting.pool = WeakOpt::none(); // Shared reservations don't need a reference to the pool, // since the pool always keeps a copy. (to_return, WeakOpt::none()) } Reservation::Unique(value) => { // Unique reservations must take a reference to the pool // since they hope to reinsert once the reservation is // completed (value, WeakOpt::downgrade(enabled)) } } } else { // If pool is not enabled, skip all the things... // The Connecting should have had no pool ref debug_assert!(connecting.pool.upgrade().is_none()); (value, WeakOpt::none()) }; Pooled { key: connecting.key.clone(), is_reused: false, pool: pool_ref, value: Some(value), } } fn reuse(&self, key: &K, value: T) -> Pooled { debug!("reuse idle connection for {:?}", key); // TODO: unhack this // In Pool::pooled(), which is used for inserting brand new connections, // there's some code that adjusts the pool reference taken depending // on if the Reservation can be shared or is unique. By the time // reuse() is called, the reservation has already been made, and // we just have the final value, without knowledge of if this is // unique or shared. So, the hack is to just assume Ver::Http2 means // shared... :( let mut pool_ref = WeakOpt::none(); if !value.can_share() { if let Some(ref enabled) = self.inner { pool_ref = WeakOpt::downgrade(enabled); } } Pooled { is_reused: true, key: key.clone(), pool: pool_ref, value: Some(value), } } } /// Pop off this list, looking for a usable connection that hasn't expired. struct IdlePopper<'a, T, K> { key: &'a K, list: &'a mut Vec>, } impl<'a, T: Poolable + 'a, K: Debug> IdlePopper<'a, T, K> { fn pop(self, expiration: &Expiration, now: Instant) -> Option> { while let Some(entry) = self.list.pop() { // If the connection has been closed, or is older than our idle // timeout, simply drop it and keep looking... if !entry.value.is_open() { trace!("removing closed connection for {:?}", self.key); continue; } // TODO: Actually, since the `idle` list is pushed to the end always, // that would imply that if *this* entry is expired, then anything // "earlier" in the list would *have* to be expired also... Right? // // In that case, we could just break out of the loop and drop the // whole list... if expiration.expires(entry.idle_at, now) { trace!("removing expired connection for {:?}", self.key); continue; } let value = match entry.value.reserve() { #[cfg(feature = "http2")] Reservation::Shared(to_reinsert, to_checkout) => { self.list.push(Idle { idle_at: now, value: to_reinsert, }); to_checkout } Reservation::Unique(unique) => unique, }; return Some(Idle { idle_at: entry.idle_at, value, }); } None } } impl PoolInner { fn now(&self) -> Instant { self.timer .as_ref() .map_or_else(|| Instant::now(), |t| t.now()) } fn put(&mut self, key: K, value: T, __pool_ref: &Arc>>) { if value.can_share() && self.idle.contains_key(&key) { trace!("put; existing idle HTTP/2 connection for {:?}", key); return; } trace!("put; add idle connection for {:?}", key); let mut remove_waiters = false; let mut value = Some(value); if let Some(waiters) = self.waiters.get_mut(&key) { while let Some(tx) = waiters.pop_front() { if !tx.is_canceled() { let reserved = value.take().expect("value already sent"); let reserved = match reserved.reserve() { #[cfg(feature = "http2")] Reservation::Shared(to_keep, to_send) => { value = Some(to_keep); to_send } Reservation::Unique(uniq) => uniq, }; match tx.send(reserved) { Ok(()) => { if value.is_none() { break; } else { continue; } } Err(e) => { value = Some(e); } } } trace!("put; removing canceled waiter for {:?}", key); } remove_waiters = waiters.is_empty(); } if remove_waiters { self.waiters.remove(&key); } match value { Some(value) => { // borrow-check scope... { let now = self.now(); let idle_list = self.idle.entry(key.clone()).or_default(); if self.max_idle_per_host <= idle_list.len() { trace!("max idle per host for {:?}, dropping connection", key); return; } debug!("pooling idle connection for {:?}", key); idle_list.push(Idle { value, idle_at: now, }); } self.spawn_idle_interval(__pool_ref); } None => trace!("put; found waiter for {:?}", key), } } /// A `Connecting` task is complete. Not necessarily successfully, /// but the lock is going away, so clean up. fn connected(&mut self, key: &K) { let existed = self.connecting.remove(key); debug_assert!(existed, "Connecting dropped, key not in pool.connecting"); // cancel any waiters. if there are any, it's because // this Connecting task didn't complete successfully. // those waiters would never receive a connection. self.waiters.remove(key); } fn spawn_idle_interval(&mut self, pool_ref: &Arc>>) { if self.idle_interval_ref.is_some() { return; } let dur = if let Some(dur) = self.timeout { dur } else { return; }; if dur == Duration::ZERO { return; } let timer = if let Some(timer) = self.timer.clone() { timer } else { return; }; // While someone might want a shorter duration, and it will be respected // at checkout time, there's no need to wake up and proactively evict // faster than this. const MIN_CHECK: Duration = Duration::from_millis(90); let dur = dur.max(MIN_CHECK); let (tx, rx) = oneshot::channel(); self.idle_interval_ref = Some(tx); let interval = IdleTask { timer: timer.clone(), duration: dur, pool: WeakOpt::downgrade(pool_ref), pool_drop_notifier: rx, }; self.exec.execute(interval.run()); } } impl PoolInner { /// Any `FutureResponse`s that were created will have made a `Checkout`, /// and possibly inserted into the pool that it is waiting for an idle /// connection. If a user ever dropped that future, we need to clean out /// those parked senders. fn clean_waiters(&mut self, key: &K) { let mut remove_waiters = false; if let Some(waiters) = self.waiters.get_mut(key) { waiters.retain(|tx| !tx.is_canceled()); remove_waiters = waiters.is_empty(); } if remove_waiters { self.waiters.remove(key); } } } impl PoolInner { /// This should *only* be called by the IdleTask fn clear_expired(&mut self) { let dur = self.timeout.expect("interval assumes timeout"); let now = self.now(); //self.last_idle_check_at = now; self.idle.retain(|key, values| { values.retain(|entry| { if !entry.value.is_open() { trace!("idle interval evicting closed for {:?}", key); return false; } // Avoid `Instant::sub` to avoid issues like rust-lang/rust#86470. if now.saturating_duration_since(entry.idle_at) > dur { trace!("idle interval evicting expired for {:?}", key); return false; } // Otherwise, keep this value... true }); // returning false evicts this key/val !values.is_empty() }); } } impl Clone for Pool { fn clone(&self) -> Pool { Pool { inner: self.inner.clone(), } } } /// A wrapped poolable value that tries to reinsert to the Pool on Drop. // Note: The bounds `T: Poolable` is needed for the Drop impl. pub struct Pooled { value: Option, is_reused: bool, key: K, pool: WeakOpt>>, } impl Pooled { pub fn is_reused(&self) -> bool { self.is_reused } pub fn is_pool_enabled(&self) -> bool { self.pool.0.is_some() } fn as_ref(&self) -> &T { self.value.as_ref().expect("not dropped") } fn as_mut(&mut self) -> &mut T { self.value.as_mut().expect("not dropped") } } impl Deref for Pooled { type Target = T; fn deref(&self) -> &T { self.as_ref() } } impl DerefMut for Pooled { fn deref_mut(&mut self) -> &mut T { self.as_mut() } } impl Drop for Pooled { fn drop(&mut self) { if let Some(value) = self.value.take() { if !value.is_open() { // If we *already* know the connection is done here, // it shouldn't be re-inserted back into the pool. return; } if let Some(pool) = self.pool.upgrade() { if let Ok(mut inner) = pool.lock() { inner.put(self.key.clone(), value, &pool); } } else if !value.can_share() { trace!("pool dropped, dropping pooled ({:?})", self.key); } // Ver::Http2 is already in the Pool (or dead), so we wouldn't // have an actual reference to the Pool. } } } impl fmt::Debug for Pooled { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Pooled").field("key", &self.key).finish() } } struct Idle { idle_at: Instant, value: T, } // FIXME: allow() required due to `impl Trait` leaking types to this lint #[allow(missing_debug_implementations)] pub struct Checkout { key: K, pool: Pool, waiter: Option>, } #[derive(Debug)] #[non_exhaustive] pub enum Error { PoolDisabled, CheckoutNoLongerWanted, CheckedOutClosedValue, } impl Error { pub(super) fn is_canceled(&self) -> bool { matches!(self, Error::CheckedOutClosedValue) } } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(match self { Error::PoolDisabled => "pool is disabled", Error::CheckedOutClosedValue => "checked out connection was closed", Error::CheckoutNoLongerWanted => "request was canceled", }) } } impl StdError for Error {} impl Checkout { fn poll_waiter( &mut self, cx: &mut task::Context<'_>, ) -> Poll, Error>>> { if let Some(mut rx) = self.waiter.take() { match Pin::new(&mut rx).poll(cx) { Poll::Ready(Ok(value)) => { if value.is_open() { Poll::Ready(Some(Ok(self.pool.reuse(&self.key, value)))) } else { Poll::Ready(Some(Err(Error::CheckedOutClosedValue))) } } Poll::Pending => { self.waiter = Some(rx); Poll::Pending } Poll::Ready(Err(_canceled)) => { Poll::Ready(Some(Err(Error::CheckoutNoLongerWanted))) } } } else { Poll::Ready(None) } } fn checkout(&mut self, cx: &mut task::Context<'_>) -> Option> { let entry = { let mut inner = self.pool.inner.as_ref()?.lock().unwrap(); let expiration = Expiration::new(inner.timeout); let now = inner.now(); let maybe_entry = inner.idle.get_mut(&self.key).and_then(|list| { trace!("take? {:?}: expiration = {:?}", self.key, expiration.0); // A block to end the mutable borrow on list, // so the map below can check is_empty() { let popper = IdlePopper { key: &self.key, list, }; popper.pop(&expiration, now) } .map(|e| (e, list.is_empty())) }); let (entry, empty) = if let Some((e, empty)) = maybe_entry { (Some(e), empty) } else { // No entry found means nuke the list for sure. (None, true) }; if empty { //TODO: This could be done with the HashMap::entry API instead. inner.idle.remove(&self.key); } if entry.is_none() && self.waiter.is_none() { let (tx, mut rx) = oneshot::channel(); trace!("checkout waiting for idle connection: {:?}", self.key); inner .waiters .entry(self.key.clone()) .or_insert_with(VecDeque::new) .push_back(tx); // register the waker with this oneshot assert!(Pin::new(&mut rx).poll(cx).is_pending()); self.waiter = Some(rx); } entry }; entry.map(|e| self.pool.reuse(&self.key, e.value)) } } impl Future for Checkout { type Output = Result, Error>; fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { if let Some(pooled) = ready!(self.poll_waiter(cx)?) { return Poll::Ready(Ok(pooled)); } if let Some(pooled) = self.checkout(cx) { Poll::Ready(Ok(pooled)) } else if !self.pool.is_enabled() { Poll::Ready(Err(Error::PoolDisabled)) } else { // There's a new waiter, already registered in self.checkout() debug_assert!(self.waiter.is_some()); Poll::Pending } } } impl Drop for Checkout { fn drop(&mut self) { if self.waiter.take().is_some() { trace!("checkout dropped for {:?}", self.key); if let Some(Ok(mut inner)) = self.pool.inner.as_ref().map(|i| i.lock()) { inner.clean_waiters(&self.key); } } } } // FIXME: allow() required due to `impl Trait` leaking types to this lint #[allow(missing_debug_implementations)] pub struct Connecting { key: K, pool: WeakOpt>>, } impl Connecting { pub fn alpn_h2(self, pool: &Pool) -> Option { debug_assert!( self.pool.0.is_none(), "Connecting::alpn_h2 but already Http2" ); pool.connecting(&self.key, Ver::Http2) } } impl Drop for Connecting { fn drop(&mut self) { if let Some(pool) = self.pool.upgrade() { // No need to panic on drop, that could abort! if let Ok(mut inner) = pool.lock() { inner.connected(&self.key); } } } } struct Expiration(Option); impl Expiration { fn new(dur: Option) -> Expiration { Expiration(dur) } fn expires(&self, instant: Instant, now: Instant) -> bool { match self.0 { // Avoid `Instant::elapsed` to avoid issues like rust-lang/rust#86470. Some(timeout) => now.saturating_duration_since(instant) > timeout, None => false, } } } struct IdleTask { timer: Timer, duration: Duration, pool: WeakOpt>>, // This allows the IdleTask to be notified as soon as the entire // Pool is fully dropped, and shutdown. This channel is never sent on, // but Err(Canceled) will be received when the Pool is dropped. pool_drop_notifier: oneshot::Receiver, } impl IdleTask { async fn run(self) { use futures_util::future; let mut sleep = self.timer.sleep_until(self.timer.now() + self.duration); let mut on_pool_drop = self.pool_drop_notifier; loop { match future::select(&mut on_pool_drop, &mut sleep).await { future::Either::Left(_) => { // pool dropped, bah-bye break; } future::Either::Right(((), _)) => { if let Some(inner) = self.pool.upgrade() { if let Ok(mut inner) = inner.lock() { trace!("idle interval checking for expired"); inner.clear_expired(); } } let deadline = self.timer.now() + self.duration; self.timer.reset(&mut sleep, deadline); } } } trace!("pool closed, canceling idle interval"); return; } } impl WeakOpt { fn none() -> Self { WeakOpt(None) } fn downgrade(arc: &Arc) -> Self { WeakOpt(Some(Arc::downgrade(arc))) } fn upgrade(&self) -> Option> { self.0.as_ref().and_then(Weak::upgrade) } } #[cfg(test)] mod tests { use std::fmt::Debug; use std::future::Future; use std::hash::Hash; use std::pin::Pin; use std::task::{self, Poll}; use std::time::Duration; use super::{Connecting, Key, Pool, Poolable, Reservation, WeakOpt}; use crate::rt::{TokioExecutor, TokioTimer}; use crate::common::timer; #[derive(Clone, Debug, PartialEq, Eq, Hash)] struct KeyImpl(http::uri::Scheme, http::uri::Authority); type KeyTuple = (http::uri::Scheme, http::uri::Authority); /// Test unique reservations. #[derive(Debug, PartialEq, Eq)] struct Uniq(T); impl Poolable for Uniq { fn is_open(&self) -> bool { true } fn reserve(self) -> Reservation { Reservation::Unique(self) } fn can_share(&self) -> bool { false } } fn c(key: K) -> Connecting { Connecting { key, pool: WeakOpt::none(), } } fn host_key(s: &str) -> KeyImpl { KeyImpl(http::uri::Scheme::HTTP, s.parse().expect("host key")) } fn pool_no_timer() -> Pool { pool_max_idle_no_timer(usize::MAX) } fn pool_max_idle_no_timer(max_idle: usize) -> Pool { let pool = Pool::new( super::Config { idle_timeout: Some(Duration::from_millis(100)), max_idle_per_host: max_idle, }, TokioExecutor::new(), Option::::None, ); pool.no_timer(); pool } #[tokio::test] async fn test_pool_checkout_smoke() { let pool = pool_no_timer(); let key = host_key("foo"); let pooled = pool.pooled(c(key.clone()), Uniq(41)); drop(pooled); match pool.checkout(key).await { Ok(pooled) => assert_eq!(*pooled, Uniq(41)), Err(_) => panic!("not ready"), }; } /// Helper to check if the future is ready after polling once. struct PollOnce<'a, F>(&'a mut F); impl Future for PollOnce<'_, F> where F: Future> + Unpin, { type Output = Option<()>; fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { match Pin::new(&mut self.0).poll(cx) { Poll::Ready(Ok(_)) => Poll::Ready(Some(())), Poll::Ready(Err(_)) => Poll::Ready(Some(())), Poll::Pending => Poll::Ready(None), } } } #[tokio::test] async fn test_pool_checkout_returns_none_if_expired() { let pool = pool_no_timer(); let key = host_key("foo"); let pooled = pool.pooled(c(key.clone()), Uniq(41)); drop(pooled); tokio::time::sleep(pool.locked().timeout.unwrap()).await; let mut checkout = pool.checkout(key); let poll_once = PollOnce(&mut checkout); let is_not_ready = poll_once.await.is_none(); assert!(is_not_ready); } #[tokio::test] async fn test_pool_checkout_removes_expired() { let pool = pool_no_timer(); let key = host_key("foo"); pool.pooled(c(key.clone()), Uniq(41)); pool.pooled(c(key.clone()), Uniq(5)); pool.pooled(c(key.clone()), Uniq(99)); assert_eq!( pool.locked().idle.get(&key).map(|entries| entries.len()), Some(3) ); tokio::time::sleep(pool.locked().timeout.unwrap()).await; let mut checkout = pool.checkout(key.clone()); let poll_once = PollOnce(&mut checkout); // checkout.await should clean out the expired poll_once.await; assert!(!pool.locked().idle.contains_key(&key)); } #[test] fn test_pool_max_idle_per_host() { let pool = pool_max_idle_no_timer(2); let key = host_key("foo"); pool.pooled(c(key.clone()), Uniq(41)); pool.pooled(c(key.clone()), Uniq(5)); pool.pooled(c(key.clone()), Uniq(99)); // pooled and dropped 3, max_idle should only allow 2 assert_eq!( pool.locked().idle.get(&key).map(|entries| entries.len()), Some(2) ); } #[tokio::test] async fn test_pool_timer_removes_expired_realtime() { test_pool_timer_removes_expired_inner().await } #[tokio::test(start_paused = true)] async fn test_pool_timer_removes_expired_faketime() { test_pool_timer_removes_expired_inner().await } async fn test_pool_timer_removes_expired_inner() { let pool = Pool::new( super::Config { idle_timeout: Some(Duration::from_millis(10)), max_idle_per_host: usize::MAX, }, TokioExecutor::new(), Some(TokioTimer::new()), ); let key = host_key("foo"); pool.pooled(c(key.clone()), Uniq(41)); pool.pooled(c(key.clone()), Uniq(5)); pool.pooled(c(key.clone()), Uniq(99)); assert_eq!( pool.locked().idle.get(&key).map(|entries| entries.len()), Some(3) ); // Let the timer tick passed the expiration... tokio::time::sleep(Duration::from_millis(30)).await; // But minimum interval is higher, so nothing should have been reaped assert_eq!( pool.locked().idle.get(&key).map(|entries| entries.len()), Some(3) ); // Now wait passed the minimum interval more tokio::time::sleep(Duration::from_millis(70)).await; // Yield in case other task hasn't been able to run :shrug: tokio::task::yield_now().await; assert!(!pool.locked().idle.contains_key(&key)); } #[tokio::test] async fn test_pool_checkout_task_unparked() { use futures_util::future::join; use futures_util::FutureExt; let pool = pool_no_timer(); let key = host_key("foo"); let pooled = pool.pooled(c(key.clone()), Uniq(41)); let checkout = join(pool.checkout(key), async { // the checkout future will park first, // and then this lazy future will be polled, which will insert // the pooled back into the pool // // this test makes sure that doing so will unpark the checkout drop(pooled); }) .map(|(entry, _)| entry); assert_eq!(*checkout.await.unwrap(), Uniq(41)); } #[tokio::test] async fn test_pool_checkout_drop_cleans_up_waiters() { let pool = pool_no_timer::, KeyImpl>(); let key = host_key("foo"); let mut checkout1 = pool.checkout(key.clone()); let mut checkout2 = pool.checkout(key.clone()); let poll_once1 = PollOnce(&mut checkout1); let poll_once2 = PollOnce(&mut checkout2); // first poll needed to get into Pool's parked poll_once1.await; assert_eq!(pool.locked().waiters.get(&key).unwrap().len(), 1); poll_once2.await; assert_eq!(pool.locked().waiters.get(&key).unwrap().len(), 2); // on drop, clean up Pool drop(checkout1); assert_eq!(pool.locked().waiters.get(&key).unwrap().len(), 1); drop(checkout2); assert!(!pool.locked().waiters.contains_key(&key)); } #[derive(Debug)] struct CanClose { #[allow(unused)] val: i32, closed: bool, } impl Poolable for CanClose { fn is_open(&self) -> bool { !self.closed } fn reserve(self) -> Reservation { Reservation::Unique(self) } fn can_share(&self) -> bool { false } } #[test] fn pooled_drop_if_closed_doesnt_reinsert() { let pool = pool_no_timer(); let key = host_key("foo"); pool.pooled( c(key.clone()), CanClose { val: 57, closed: true, }, ); assert!(!pool.locked().idle.contains_key(&key)); } } hyper-util-0.1.19/src/client/mod.rs000064400000000000000000000003511046102023000152070ustar 00000000000000//! HTTP client utilities /// Legacy implementations of `connect` module and `Client` #[cfg(feature = "client-legacy")] pub mod legacy; #[cfg(feature = "client-pool")] pub mod pool; #[cfg(feature = "client-proxy")] pub mod proxy; hyper-util-0.1.19/src/client/pool/cache.rs000064400000000000000000000355121046102023000164530ustar 00000000000000//! A cache of services //! //! The cache is a single list of cached services, bundled with a `MakeService`. //! Calling the cache returns either an existing service, or makes a new one. //! The returned `impl Service` can be used to send requests, and when dropped, //! it will try to be returned back to the cache. pub use self::internal::builder; #[cfg(docsrs)] pub use self::internal::Builder; #[cfg(docsrs)] pub use self::internal::Cache; #[cfg(docsrs)] pub use self::internal::Cached; // For now, nothing else in this module is nameable. We can always make things // more public, but we can't change type shapes (generics) once things are // public. mod internal { use std::fmt; use std::future::Future; use std::pin::Pin; use std::sync::{Arc, Mutex, Weak}; use std::task::{self, Poll}; use futures_core::ready; use futures_util::future; use tokio::sync::oneshot; use tower_service::Service; use super::events; /// Start a builder to construct a `Cache` pool. pub fn builder() -> Builder { Builder { events: events::Ignore, } } /// A cache pool of services from the inner make service. /// /// Created with [`builder()`]. /// /// # Unnameable /// /// This type is normally unnameable, forbidding naming of the type within /// code. The type is exposed in the documentation to show which methods /// can be publicly called. #[derive(Debug)] pub struct Cache where M: Service, { connector: M, shared: Arc>>, events: Ev, } /// A builder to configure a `Cache`. /// /// # Unnameable /// /// This type is normally unnameable, forbidding naming of the type within /// code. The type is exposed in the documentation to show which methods /// can be publicly called. #[derive(Debug)] pub struct Builder { events: Ev, } /// A cached service returned from a [`Cache`]. /// /// Implements `Service` by delegating to the inner service. Once dropped, /// tries to reinsert into the `Cache`. /// /// # Unnameable /// /// This type is normally unnameable, forbidding naming of the type within /// code. The type is exposed in the documentation to show which methods /// can be publicly called. pub struct Cached { is_closed: bool, inner: Option, shared: Weak>>, // todo: on_idle } pub enum CacheFuture where M: Service, { Racing { shared: Arc>>, select: future::Select, M::Future>, events: Ev, }, Connecting { // TODO: could be Weak even here... shared: Arc>>, future: M::Future, }, Cached { svc: Option>, }, } // shouldn't be pub #[derive(Debug)] pub struct Shared { services: Vec, waiters: Vec>, } // impl Builder impl Builder { /// Provide a `Future` executor to be used by the `Cache`. /// /// The executor is used handle some optional background tasks that /// can improve the behavior of the cache, such as reducing connection /// thrashing when a race is won. If not configured with an executor, /// the default behavior is to ignore any of these optional background /// tasks. /// /// The executor should implmenent [`hyper::rt::Executor`]. /// /// # Example /// /// ```rust /// # #[cfg(feature = "tokio")] /// # fn run() { /// let builder = hyper_util::client::pool::cache::builder() /// .executor(hyper_util::rt::TokioExecutor::new()); /// # } /// ``` pub fn executor(self, exec: E) -> Builder> { Builder { events: events::WithExecutor(exec), } } /// Build a `Cache` pool around the `connector`. pub fn build(self, connector: M) -> Cache where M: Service, { Cache { connector, events: self.events, shared: Arc::new(Mutex::new(Shared { services: Vec::new(), waiters: Vec::new(), })), } } } // impl Cache impl Cache where M: Service, { /// Retain all cached services indicated by the predicate. pub fn retain(&mut self, predicate: F) where F: FnMut(&mut M::Response) -> bool, { self.shared.lock().unwrap().services.retain_mut(predicate); } /// Check whether this cache has no cached services. pub fn is_empty(&self) -> bool { self.shared.lock().unwrap().services.is_empty() } } impl Service for Cache where M: Service, M::Future: Unpin, M::Response: Unpin, Ev: events::Events> + Clone + Unpin, { type Response = Cached; type Error = M::Error; type Future = CacheFuture; fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { if !self.shared.lock().unwrap().services.is_empty() { Poll::Ready(Ok(())) } else { self.connector.poll_ready(cx) } } fn call(&mut self, target: Dst) -> Self::Future { // 1. If already cached, easy! let waiter = { let mut locked = self.shared.lock().unwrap(); if let Some(found) = locked.take() { return CacheFuture::Cached { svc: Some(Cached::new(found, Arc::downgrade(&self.shared))), }; } let (tx, rx) = oneshot::channel(); locked.waiters.push(tx); rx }; // 2. Otherwise, we start a new connect, and also listen for // any newly idle. CacheFuture::Racing { shared: self.shared.clone(), select: future::select(waiter, self.connector.call(target)), events: self.events.clone(), } } } impl Clone for Cache where M: Service + Clone, Ev: Clone, { fn clone(&self) -> Self { Self { connector: self.connector.clone(), events: self.events.clone(), shared: self.shared.clone(), } } } impl Future for CacheFuture where M: Service, M::Future: Unpin, M::Response: Unpin, Ev: events::Events> + Unpin, { type Output = Result, M::Error>; fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { loop { match &mut *self.as_mut() { CacheFuture::Racing { shared, select, events, } => { match ready!(Pin::new(select).poll(cx)) { future::Either::Left((Err(_pool_closed), connecting)) => { // pool was dropped, so we'll never get it from a waiter, // but if this future still exists, then the user still // wants a connection. just wait for the connecting *self = CacheFuture::Connecting { shared: shared.clone(), future: connecting, }; } future::Either::Left((Ok(pool_got), connecting)) => { events.on_race_lost(BackgroundConnect { future: connecting, shared: Arc::downgrade(&shared), }); return Poll::Ready(Ok(Cached::new( pool_got, Arc::downgrade(&shared), ))); } future::Either::Right((connected, _waiter)) => { let inner = connected?; return Poll::Ready(Ok(Cached::new( inner, Arc::downgrade(&shared), ))); } } } CacheFuture::Connecting { shared, future } => { let inner = ready!(Pin::new(future).poll(cx))?; return Poll::Ready(Ok(Cached::new(inner, Arc::downgrade(&shared)))); } CacheFuture::Cached { svc } => { return Poll::Ready(Ok(svc.take().unwrap())); } } } } } // impl Cached impl Cached { fn new(inner: S, shared: Weak>>) -> Self { Cached { is_closed: false, inner: Some(inner), shared, } } // TODO: inner()? looks like `tower` likes `get_ref()` and `get_mut()`. /// Get a reference to the inner service. pub fn inner(&self) -> &S { self.inner.as_ref().expect("inner only taken in drop") } /// Get a mutable reference to the inner service. pub fn inner_mut(&mut self) -> &mut S { self.inner.as_mut().expect("inner only taken in drop") } } impl Service for Cached where S: Service, { type Response = S::Response; type Error = S::Error; type Future = S::Future; fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { self.inner.as_mut().unwrap().poll_ready(cx).map_err(|err| { self.is_closed = true; err }) } fn call(&mut self, req: Req) -> Self::Future { self.inner.as_mut().unwrap().call(req) } } impl Drop for Cached { fn drop(&mut self) { if self.is_closed { return; } if let Some(value) = self.inner.take() { if let Some(shared) = self.shared.upgrade() { if let Ok(mut shared) = shared.lock() { shared.put(value); } } } } } impl fmt::Debug for Cached { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_tuple("Cached") .field(self.inner.as_ref().unwrap()) .finish() } } // impl Shared impl Shared { fn put(&mut self, val: V) { let mut val = Some(val); while let Some(tx) = self.waiters.pop() { if !tx.is_closed() { match tx.send(val.take().unwrap()) { Ok(()) => break, Err(v) => { val = Some(v); } } } } if let Some(val) = val { self.services.push(val); } } fn take(&mut self) -> Option { // TODO: take in a loop self.services.pop() } } pub struct BackgroundConnect { future: CF, shared: Weak>>, } impl Future for BackgroundConnect where CF: Future> + Unpin, { type Output = (); fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { match ready!(Pin::new(&mut self.future).poll(cx)) { Ok(svc) => { if let Some(shared) = self.shared.upgrade() { if let Ok(mut locked) = shared.lock() { locked.put(svc); } } Poll::Ready(()) } Err(_e) => Poll::Ready(()), } } } } mod events { #[derive(Clone, Debug)] #[non_exhaustive] pub struct Ignore; #[derive(Clone, Debug)] pub struct WithExecutor(pub(super) E); pub trait Events { fn on_race_lost(&self, fut: CF); } impl Events for Ignore { fn on_race_lost(&self, _fut: CF) {} } impl Events for WithExecutor where E: hyper::rt::Executor, { fn on_race_lost(&self, fut: CF) { self.0.execute(fut); } } } #[cfg(test)] mod tests { use futures_util::future; use tower_service::Service; use tower_test::assert_request_eq; #[tokio::test] async fn test_makes_svc_when_empty() { let (mock, mut handle) = tower_test::mock::pair(); let mut cache = super::builder().build(mock); handle.allow(1); crate::common::future::poll_fn(|cx| cache.poll_ready(cx)) .await .unwrap(); let f = cache.call(1); future::join(f, async move { assert_request_eq!(handle, 1).send_response("one"); }) .await .0 .expect("call"); } #[tokio::test] async fn test_reuses_after_idle() { let (mock, mut handle) = tower_test::mock::pair(); let mut cache = super::builder().build(mock); // only 1 connection should ever be made handle.allow(1); crate::common::future::poll_fn(|cx| cache.poll_ready(cx)) .await .unwrap(); let f = cache.call(1); let cached = future::join(f, async { assert_request_eq!(handle, 1).send_response("one"); }) .await .0 .expect("call"); drop(cached); crate::common::future::poll_fn(|cx| cache.poll_ready(cx)) .await .unwrap(); let f = cache.call(1); let cached = f.await.expect("call"); drop(cached); } } hyper-util-0.1.19/src/client/pool/map.rs000064400000000000000000000130651046102023000161640ustar 00000000000000//! Map pool utilities //! //! The map isn't a typical `Service`, but rather stand-alone type that can map //! requests to a key and service factory. This is because the service is more //! of a router, and cannot determine which inner service to check for //! backpressure since it's not know until the request is made. //! //! The map implementation allows customization of extracting a key, and how to //! construct a MakeService for that key. //! //! # Example //! //! ```rust,ignore //! # async fn run() { //! # use hyper_util::client::pool; //! # let req = http::Request::new(()); //! # let some_http1_connector = || { //! # tower::service::service_fn(|_req| async { Ok::<_, &'static str>(()) }) //! # }; //! let mut map = pool::map::Map::builder() //! .keys(|uri| (uri.scheme().clone(), uri.authority().clone())) //! .values(|_uri| { //! some_http1_connector() //! }) //! .build(); //! //! let resp = map.service(req.uri()).call(req).await; //! # } //! ``` use std::collections::HashMap; // expose the documentation #[cfg(docsrs)] pub use self::builder::Builder; /// A map caching `MakeService`s per key. /// /// Create one with the [`Map::builder()`]. pub struct Map where T: target::Target, { map: HashMap, targeter: T, } // impl Map impl Map { /// Create a [`Builder`] to configure a new `Map`. pub fn builder() -> builder::Builder { builder::Builder::new() } } impl Map where T: target::Target, { fn new(targeter: T) -> Self { Map { map: HashMap::new(), targeter, } } } impl Map where T: target::Target, T::Key: Eq + std::hash::Hash, { /// Get a service after extracting the key from `req`. pub fn service(&mut self, req: &Req) -> &mut T::Service { let key = self.targeter.key(req); self.map .entry(key) .or_insert_with(|| self.targeter.service(req)) } /// Retains only the services specified by the predicate. pub fn retain(&mut self, predicate: F) where F: FnMut(&T::Key, &mut T::Service) -> bool, { self.map.retain(predicate); } /// Clears the map, removing all key-value pairs. pub fn clear(&mut self) { self.map.clear(); } } // sealed and unnameable for now mod target { pub trait Target { type Key; type Service; fn key(&self, dst: &Dst) -> Self::Key; fn service(&self, dst: &Dst) -> Self::Service; } } // sealed and unnameable for now mod builder { use std::marker::PhantomData; /// A builder to configure a `Map`. /// /// # Unnameable /// /// This type is normally unnameable, forbidding naming of the type within /// code. The type is exposed in the documentation to show which methods /// can be publicly called. pub struct Builder { _dst: PhantomData, keys: K, svcs: S, } pub struct WantsKeyer; pub struct WantsServiceMaker; pub enum StartHere {} pub struct Built { keys: K, svcs: S, } impl Builder { pub(super) fn new() -> Self { Builder { _dst: PhantomData, keys: WantsKeyer, svcs: WantsServiceMaker, } } } impl Builder { /// Provide a closure that extracts a pool key for the destination. pub fn keys(self, keyer: K) -> Builder where K: Fn(&Dst) -> KK, { Builder { _dst: PhantomData, keys: keyer, svcs: self.svcs, } } } impl Builder { /// Provide a closure to create a new `MakeService` for the destination. pub fn values(self, svcs: S) -> Builder where S: Fn(&Dst) -> SS, { Builder { _dst: PhantomData, keys: self.keys, svcs, } } } impl Builder where Built: super::target::Target, as super::target::Target>::Key: Eq + std::hash::Hash, { /// Build the `Map` pool. pub fn build(self) -> super::Map, Dst> { super::Map::new(Built { keys: self.keys, svcs: self.svcs, }) } } impl super::target::Target for StartHere { type Key = StartHere; type Service = StartHere; fn key(&self, _: &StartHere) -> Self::Key { match *self {} } fn service(&self, _: &StartHere) -> Self::Service { match *self {} } } impl super::target::Target for Built where K: Fn(&Dst) -> KK, S: Fn(&Dst) -> SS, KK: Eq + std::hash::Hash, { type Key = KK; type Service = SS; fn key(&self, dst: &Dst) -> Self::Key { (self.keys)(dst) } fn service(&self, dst: &Dst) -> Self::Service { (self.svcs)(dst) } } } #[cfg(test)] mod tests { #[test] fn smoke() { let mut pool = super::Map::builder().keys(|_| "a").values(|_| "b").build(); pool.service(&"hello"); } } hyper-util-0.1.19/src/client/pool/mod.rs000064400000000000000000000005031046102023000161570ustar 00000000000000//! Composable pool services //! //! This module contains various concepts of a connection pool separated into //! their own concerns. This allows for users to compose the layers, along with //! any other layers, when constructing custom connection pools. pub mod cache; pub mod map; pub mod negotiate; pub mod singleton; hyper-util-0.1.19/src/client/pool/negotiate.rs000064400000000000000000000456271046102023000173770ustar 00000000000000//! Negotiate a pool of services //! //! The negotiate pool allows for a service that can decide between two service //! types based on an intermediate return value. It differs from typical //! routing since it doesn't depend on the request, but the response. //! //! The original use case is support ALPN upgrades to HTTP/2, with a fallback //! to HTTP/1. //! //! # Example //! //! ```rust,ignore //! # async fn run() -> Result<(), Box> { //! # struct Conn; //! # impl Conn { fn negotiated_protocol(&self) -> &[u8] { b"h2" } } //! # let some_tls_connector = tower::service::service_fn(|_| async move { //! # Ok::<_, std::convert::Infallible>(Conn) //! # }); //! # let http1_layer = tower::layer::layer_fn(|s| s); //! # let http2_layer = tower::layer::layer_fn(|s| s); //! let mut pool = hyper_util::client::pool::negotiate::builder() //! .connect(some_tls_connector) //! .inspect(|c| c.negotiated_protocol() == b"h2") //! .fallback(http1_layer) //! .upgrade(http2_layer) //! .build(); //! //! // connect //! let mut svc = pool.call(http::Uri::from_static("https://hyper.rs")).await?; //! svc.ready().await; //! //! // http1 or http2 is now set up //! # let some_http_req = http::Request::new(()); //! let resp = svc.call(some_http_req).await?; //! # Ok(()) //! # } //! ``` pub use self::internal::builder; #[cfg(docsrs)] pub use self::internal::Builder; #[cfg(docsrs)] pub use self::internal::Negotiate; #[cfg(docsrs)] pub use self::internal::Negotiated; mod internal { use std::future::Future; use std::pin::Pin; use std::sync::{Arc, Mutex}; use std::task::{self, Poll}; use futures_core::ready; use pin_project_lite::pin_project; use tower_layer::Layer; use tower_service::Service; type BoxError = Box; /// A negotiating pool over an inner make service. /// /// Created with [`builder()`]. /// /// # Unnameable /// /// This type is normally unnameable, forbidding naming of the type within /// code. The type is exposed in the documentation to show which methods /// can be publicly called. #[derive(Clone)] pub struct Negotiate { left: L, right: R, } /// A negotiated service returned by [`Negotiate`]. /// /// # Unnameable /// /// This type is normally unnameable, forbidding naming of the type within /// code. The type is exposed in the documentation to show which methods /// can be publicly called. #[derive(Clone, Debug)] pub enum Negotiated { #[doc(hidden)] Fallback(L), #[doc(hidden)] Upgraded(R), } pin_project! { pub struct Negotiating where L: Service, R: Service<()>, { #[pin] state: State, left: L, right: R, } } pin_project! { #[project = StateProj] enum State { Eager { #[pin] future: FR, dst: Option, }, Fallback { #[pin] future: FL, }, Upgrade { #[pin] future: FR, } } } pin_project! { #[project = NegotiatedProj] pub enum NegotiatedFuture { Fallback { #[pin] future: L }, Upgraded { #[pin] future: R }, } } /// A builder to configure a `Negotiate`. /// /// # Unnameable /// /// This type is normally unnameable, forbidding naming of the type within /// code. The type is exposed in the documentation to show which methods /// can be publicly called. #[derive(Debug)] pub struct Builder { connect: C, inspect: I, fallback: L, upgrade: R, } #[derive(Debug)] pub struct WantsConnect; #[derive(Debug)] pub struct WantsInspect; #[derive(Debug)] pub struct WantsFallback; #[derive(Debug)] pub struct WantsUpgrade; /// Start a builder to construct a `Negotiate` pool. pub fn builder() -> Builder { Builder { connect: WantsConnect, inspect: WantsInspect, fallback: WantsFallback, upgrade: WantsUpgrade, } } impl Builder { /// Provide the initial connector. pub fn connect(self, connect: CC) -> Builder { Builder { connect, inspect: self.inspect, fallback: self.fallback, upgrade: self.upgrade, } } /// Provide the inspector that determines the result of the negotiation. pub fn inspect(self, inspect: II) -> Builder { Builder { connect: self.connect, inspect, fallback: self.fallback, upgrade: self.upgrade, } } /// Provide the layer to fallback to if negotiation fails. pub fn fallback(self, fallback: LL) -> Builder { Builder { connect: self.connect, inspect: self.inspect, fallback, upgrade: self.upgrade, } } /// Provide the layer to upgrade to if negotiation succeeds. pub fn upgrade(self, upgrade: RR) -> Builder { Builder { connect: self.connect, inspect: self.inspect, fallback: self.fallback, upgrade, } } /// Build the `Negotiate` pool. pub fn build(self) -> Negotiate where C: Service, C::Error: Into, L: Layer>, L::Service: Service + Clone, >::Error: Into, R: Layer>, R::Service: Service<()> + Clone, >::Error: Into, I: Fn(&C::Response) -> bool + Clone, { let Builder { connect, inspect, fallback, upgrade, } = self; let slot = Arc::new(Mutex::new(None)); let wrapped = Inspector { svc: connect, inspect, slot: slot.clone(), }; let left = fallback.layer(wrapped); let right = upgrade.layer(Inspected { slot }); Negotiate { left, right } } } impl Negotiate { /// Get a mutable reference to the fallback service. pub fn fallback_mut(&mut self) -> &mut L { &mut self.left } /// Get a mutable reference to the upgrade service. pub fn upgrade_mut(&mut self) -> &mut R { &mut self.right } } impl Service for Negotiate where L: Service + Clone, L::Error: Into, R: Service<()> + Clone, R::Error: Into, { type Response = Negotiated; type Error = BoxError; type Future = Negotiating; fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { self.left.poll_ready(cx).map_err(Into::into) } fn call(&mut self, dst: Target) -> Self::Future { let left = self.left.clone(); Negotiating { state: State::Eager { future: self.right.call(()), dst: Some(dst), }, // place clone, take original that we already polled-ready. left: std::mem::replace(&mut self.left, left), right: self.right.clone(), } } } impl Future for Negotiating where L: Service, L::Error: Into, R: Service<()>, R::Error: Into, { type Output = Result, BoxError>; fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { // States: // - `Eager`: try the "right" path first; on `UseOther` sentinel, fall back to left. // - `Fallback`: try the left path; on `UseOther` sentinel, upgrade back to right. // - `Upgrade`: retry the right path after a fallback. // If all fail, give up. let mut me = self.project(); loop { match me.state.as_mut().project() { StateProj::Eager { future, dst } => match ready!(future.poll(cx)) { Ok(out) => return Poll::Ready(Ok(Negotiated::Upgraded(out))), Err(err) => { let err = err.into(); if UseOther::is(&*err) { let dst = dst.take().unwrap(); let f = me.left.call(dst); me.state.set(State::Fallback { future: f }); continue; } else { return Poll::Ready(Err(err)); } } }, StateProj::Fallback { future } => match ready!(future.poll(cx)) { Ok(out) => return Poll::Ready(Ok(Negotiated::Fallback(out))), Err(err) => { let err = err.into(); if UseOther::is(&*err) { let f = me.right.call(()); me.state.set(State::Upgrade { future: f }); continue; } else { return Poll::Ready(Err(err)); } } }, StateProj::Upgrade { future } => match ready!(future.poll(cx)) { Ok(out) => return Poll::Ready(Ok(Negotiated::Upgraded(out))), Err(err) => return Poll::Ready(Err(err.into())), }, } } } } impl Negotiated { // Could be useful? #[cfg(test)] pub(super) fn is_fallback(&self) -> bool { matches!(self, Negotiated::Fallback(_)) } #[cfg(test)] pub(super) fn is_upgraded(&self) -> bool { matches!(self, Negotiated::Upgraded(_)) } // TODO: are these the correct methods? Or .as_ref().fallback(), etc? /// Get a reference to the fallback service if this is it. pub fn fallback_ref(&self) -> Option<&L> { if let Negotiated::Fallback(ref left) = self { Some(left) } else { None } } /// Get a mutable reference to the fallback service if this is it. pub fn fallback_mut(&mut self) -> Option<&mut L> { if let Negotiated::Fallback(ref mut left) = self { Some(left) } else { None } } /// Get a reference to the upgraded service if this is it. pub fn upgraded_ref(&self) -> Option<&R> { if let Negotiated::Upgraded(ref right) = self { Some(right) } else { None } } /// Get a mutable reference to the upgraded service if this is it. pub fn upgraded_mut(&mut self) -> Option<&mut R> { if let Negotiated::Upgraded(ref mut right) = self { Some(right) } else { None } } } impl Service for Negotiated where L: Service, R: Service, { type Response = Res; type Error = E; type Future = NegotiatedFuture; fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { match self { Negotiated::Fallback(ref mut s) => s.poll_ready(cx), Negotiated::Upgraded(ref mut s) => s.poll_ready(cx), } } fn call(&mut self, req: Req) -> Self::Future { match self { Negotiated::Fallback(ref mut s) => NegotiatedFuture::Fallback { future: s.call(req), }, Negotiated::Upgraded(ref mut s) => NegotiatedFuture::Upgraded { future: s.call(req), }, } } } impl Future for NegotiatedFuture where L: Future, R: Future, { type Output = Out; fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { match self.project() { NegotiatedProj::Fallback { future } => future.poll(cx), NegotiatedProj::Upgraded { future } => future.poll(cx), } } } // ===== internal ===== pub struct Inspector { svc: M, inspect: I, slot: Arc>>, } pin_project! { pub struct InspectFuture { #[pin] future: F, inspect: I, slot: Arc>>, } } impl Clone for Inspector { fn clone(&self) -> Self { Self { svc: self.svc.clone(), inspect: self.inspect.clone(), slot: self.slot.clone(), } } } impl Service for Inspector where M: Service, M::Error: Into, I: Clone + Fn(&S) -> bool, { type Response = M::Response; type Error = BoxError; type Future = InspectFuture; fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { self.svc.poll_ready(cx).map_err(Into::into) } fn call(&mut self, dst: Target) -> Self::Future { InspectFuture { future: self.svc.call(dst), inspect: self.inspect.clone(), slot: self.slot.clone(), } } } impl Future for InspectFuture where F: Future>, E: Into, I: Fn(&S) -> bool, { type Output = Result; fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { let me = self.project(); let s = ready!(me.future.poll(cx)).map_err(Into::into)?; Poll::Ready(if (me.inspect)(&s) { *me.slot.lock().unwrap() = Some(s); Err(UseOther.into()) } else { Ok(s) }) } } pub struct Inspected { slot: Arc>>, } impl Service for Inspected { type Response = S; type Error = BoxError; type Future = std::future::Ready>; fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> Poll> { if self.slot.lock().unwrap().is_some() { Poll::Ready(Ok(())) } else { Poll::Ready(Err(UseOther.into())) } } fn call(&mut self, _dst: Target) -> Self::Future { let s = self .slot .lock() .unwrap() .take() .ok_or_else(|| UseOther.into()); std::future::ready(s) } } impl Clone for Inspected { fn clone(&self) -> Inspected { Inspected { slot: self.slot.clone(), } } } #[derive(Debug)] struct UseOther; impl std::fmt::Display for UseOther { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_str("sentinel error; using other") } } impl std::error::Error for UseOther {} impl UseOther { fn is(err: &(dyn std::error::Error + 'static)) -> bool { let mut source = Some(err); while let Some(err) = source { if err.is::() { return true; } source = err.source(); } false } } } #[cfg(test)] mod tests { use futures_util::future; use tower_service::Service; use tower_test::assert_request_eq; #[tokio::test] async fn not_negotiated_falls_back_to_left() { let (mock_svc, mut handle) = tower_test::mock::pair::<(), &'static str>(); let mut negotiate = super::builder() .connect(mock_svc) .inspect(|_: &&str| false) .fallback(layer_fn(|s| s)) .upgrade(layer_fn(|s| s)) .build(); crate::common::future::poll_fn(|cx| negotiate.poll_ready(cx)) .await .unwrap(); let fut = negotiate.call(()); let nsvc = future::join(fut, async move { assert_request_eq!(handle, ()).send_response("one"); }) .await .0 .expect("call"); assert!(nsvc.is_fallback()); } #[tokio::test] async fn negotiated_uses_right() { let (mock_svc, mut handle) = tower_test::mock::pair::<(), &'static str>(); let mut negotiate = super::builder() .connect(mock_svc) .inspect(|_: &&str| true) .fallback(layer_fn(|s| s)) .upgrade(layer_fn(|s| s)) .build(); crate::common::future::poll_fn(|cx| negotiate.poll_ready(cx)) .await .unwrap(); let fut = negotiate.call(()); let nsvc = future::join(fut, async move { assert_request_eq!(handle, ()).send_response("one"); }) .await .0 .expect("call"); assert!(nsvc.is_upgraded()); } fn layer_fn(f: F) -> LayerFn { LayerFn(f) } #[derive(Clone)] struct LayerFn(F); impl tower_layer::Layer for LayerFn where F: Fn(S) -> Out, { type Service = Out; fn layer(&self, inner: S) -> Self::Service { (self.0)(inner) } } } hyper-util-0.1.19/src/client/pool/singleton.rs000064400000000000000000000374531046102023000174200ustar 00000000000000//! Singleton pools //! //! This ensures that only one active connection is made. //! //! The singleton pool wraps a `MakeService` so that it only produces a //! single `Service`. It bundles all concurrent calls to it, so that only //! one connection is made. All calls to the singleton will return a clone of //! the inner service once established. //! //! This fits the HTTP/2 case well. //! //! ## Example //! //! ```rust,ignore //! let mut pool = Singleton::new(some_make_svc); //! //! let svc1 = pool.call(some_dst).await?; //! //! let svc2 = pool.call(some_dst).await?; //! // svc1 == svc2 //! ``` use std::sync::{Arc, Mutex}; use std::task::{self, Poll}; use tokio::sync::oneshot; use tower_service::Service; use self::internal::{DitchGuard, SingletonError, SingletonFuture, State}; type BoxError = Box; #[cfg(docsrs)] pub use self::internal::Singled; /// A singleton pool over an inner service. /// /// The singleton wraps an inner service maker, bundling all calls to ensure /// only one service is created. Once made, it returns clones of the made /// service. #[derive(Debug)] pub struct Singleton where M: Service, { mk_svc: M, state: Arc>>, } impl Singleton where M: Service, M::Response: Clone, { /// Create a new singleton pool over an inner make service. pub fn new(mk_svc: M) -> Self { Singleton { mk_svc, state: Arc::new(Mutex::new(State::Empty)), } } // pub fn clear? cancel? /// Retains the inner made service if specified by the predicate. pub fn retain(&mut self, mut predicate: F) where F: FnMut(&mut M::Response) -> bool, { let mut locked = self.state.lock().unwrap(); match *locked { State::Empty => {} State::Making(..) => {} State::Made(ref mut svc) => { if !predicate(svc) { *locked = State::Empty; } } } } /// Returns whether this singleton pool is empty. /// /// If this pool has created a shared instance, or is currently in the /// process of creating one, this returns false. pub fn is_empty(&self) -> bool { matches!(*self.state.lock().unwrap(), State::Empty) } } impl Service for Singleton where M: Service, M::Response: Clone, M::Error: Into, { type Response = internal::Singled; type Error = SingletonError; type Future = SingletonFuture; fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { if let State::Empty = *self.state.lock().unwrap() { return self .mk_svc .poll_ready(cx) .map_err(|e| SingletonError(e.into())); } Poll::Ready(Ok(())) } fn call(&mut self, dst: Target) -> Self::Future { let mut locked = self.state.lock().unwrap(); match *locked { State::Empty => { let fut = self.mk_svc.call(dst); *locked = State::Making(Vec::new()); SingletonFuture::Driving { future: fut, singleton: DitchGuard(Arc::downgrade(&self.state)), } } State::Making(ref mut waiters) => { let (tx, rx) = oneshot::channel(); waiters.push(tx); SingletonFuture::Waiting { rx, state: Arc::downgrade(&self.state), } } State::Made(ref svc) => SingletonFuture::Made { svc: Some(svc.clone()), state: Arc::downgrade(&self.state), }, } } } impl Clone for Singleton where M: Service + Clone, { fn clone(&self) -> Self { Self { mk_svc: self.mk_svc.clone(), state: self.state.clone(), } } } // Holds some "pub" items that otherwise shouldn't be public. mod internal { use std::future::Future; use std::pin::Pin; use std::sync::{Mutex, Weak}; use std::task::{self, Poll}; use futures_core::ready; use pin_project_lite::pin_project; use tokio::sync::oneshot; use tower_service::Service; use super::BoxError; pin_project! { #[project = SingletonFutureProj] pub enum SingletonFuture { Driving { #[pin] future: F, singleton: DitchGuard, }, Waiting { rx: oneshot::Receiver, state: Weak>>, }, Made { svc: Option, state: Weak>>, }, } } // XXX: pub because of the enum SingletonFuture #[derive(Debug)] pub enum State { Empty, Making(Vec>), Made(S), } // XXX: pub because of the enum SingletonFuture pub struct DitchGuard(pub(super) Weak>>); /// A cached service returned from a [`Singleton`]. /// /// Implements `Service` by delegating to the inner service. If /// `poll_ready` returns an error, this will clear the cache in the related /// `Singleton`. /// /// [`Singleton`]: super::Singleton /// /// # Unnameable /// /// This type is normally unnameable, forbidding naming of the type within /// code. The type is exposed in the documentation to show which methods /// can be publicly called. #[derive(Debug)] pub struct Singled { inner: S, state: Weak>>, } impl Future for SingletonFuture where F: Future>, E: Into, S: Clone, { type Output = Result, SingletonError>; fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { match self.project() { SingletonFutureProj::Driving { future, singleton } => { match ready!(future.poll(cx)) { Ok(svc) => { if let Some(state) = singleton.0.upgrade() { let mut locked = state.lock().unwrap(); match std::mem::replace(&mut *locked, State::Made(svc.clone())) { State::Making(waiters) => { for tx in waiters { let _ = tx.send(svc.clone()); } } State::Empty | State::Made(_) => { // shouldn't happen! unreachable!() } } } // take out of the DitchGuard so it doesn't treat as "ditched" let state = std::mem::replace(&mut singleton.0, Weak::new()); Poll::Ready(Ok(Singled::new(svc, state))) } Err(e) => { if let Some(state) = singleton.0.upgrade() { let mut locked = state.lock().unwrap(); singleton.0 = Weak::new(); *locked = State::Empty; } Poll::Ready(Err(SingletonError(e.into()))) } } } SingletonFutureProj::Waiting { rx, state } => match ready!(Pin::new(rx).poll(cx)) { Ok(svc) => Poll::Ready(Ok(Singled::new(svc, state.clone()))), Err(_canceled) => Poll::Ready(Err(SingletonError(Canceled.into()))), }, SingletonFutureProj::Made { svc, state } => { Poll::Ready(Ok(Singled::new(svc.take().unwrap(), state.clone()))) } } } } impl Drop for DitchGuard { fn drop(&mut self) { if let Some(state) = self.0.upgrade() { if let Ok(mut locked) = state.lock() { *locked = State::Empty; } } } } impl Singled { fn new(inner: S, state: Weak>>) -> Self { Singled { inner, state } } } impl Service for Singled where S: Service, { type Response = S::Response; type Error = S::Error; type Future = S::Future; fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { // We notice if the cached service dies, and clear the singleton cache. match self.inner.poll_ready(cx) { Poll::Ready(Err(err)) => { if let Some(state) = self.state.upgrade() { *state.lock().unwrap() = State::Empty; } Poll::Ready(Err(err)) } other => other, } } fn call(&mut self, req: Req) -> Self::Future { self.inner.call(req) } } // An opaque error type. By not exposing the type, nor being specifically // Box, we can _change_ the type once we no longer need the Canceled // error type. This will be possible with the refactor to baton passing. #[derive(Debug)] pub struct SingletonError(pub(super) BoxError); impl std::fmt::Display for SingletonError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_str("singleton connection error") } } impl std::error::Error for SingletonError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { Some(&*self.0) } } #[derive(Debug)] struct Canceled; impl std::fmt::Display for Canceled { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_str("singleton connection canceled") } } impl std::error::Error for Canceled {} } #[cfg(test)] mod tests { use std::future::Future; use std::pin::Pin; use std::task::Poll; use tower_service::Service; use super::Singleton; #[tokio::test] async fn first_call_drives_subsequent_wait() { let (mock_svc, mut handle) = tower_test::mock::pair::<(), &'static str>(); let mut singleton = Singleton::new(mock_svc); handle.allow(1); crate::common::future::poll_fn(|cx| singleton.poll_ready(cx)) .await .unwrap(); // First call: should go into Driving let fut1 = singleton.call(()); // Second call: should go into Waiting let fut2 = singleton.call(()); // Expect exactly one request to the inner service let ((), send_response) = handle.next_request().await.unwrap(); send_response.send_response("svc"); // Both futures should resolve to the same value fut1.await.unwrap(); fut2.await.unwrap(); } #[tokio::test] async fn made_state_returns_immediately() { let (mock_svc, mut handle) = tower_test::mock::pair::<(), &'static str>(); let mut singleton = Singleton::new(mock_svc); handle.allow(1); crate::common::future::poll_fn(|cx| singleton.poll_ready(cx)) .await .unwrap(); // Drive first call to completion let fut1 = singleton.call(()); let ((), send_response) = handle.next_request().await.unwrap(); send_response.send_response("svc"); fut1.await.unwrap(); // Second call should not hit inner service singleton.call(()).await.unwrap(); } #[tokio::test] async fn cached_service_poll_ready_error_clears_singleton() { // Outer mock returns an inner mock service let (outer, mut outer_handle) = tower_test::mock::pair::<(), tower_test::mock::Mock<(), &'static str>>(); let mut singleton = Singleton::new(outer); // Allow the singleton to be made outer_handle.allow(2); crate::common::future::poll_fn(|cx| singleton.poll_ready(cx)) .await .unwrap(); // First call produces an inner mock service let fut1 = singleton.call(()); let ((), send_inner) = outer_handle.next_request().await.unwrap(); let (inner, mut inner_handle) = tower_test::mock::pair::<(), &'static str>(); send_inner.send_response(inner); let mut cached = fut1.await.unwrap(); // Now: allow readiness on the inner mock, then inject error inner_handle.allow(1); // Inject error so next poll_ready fails inner_handle.send_error(std::io::Error::new( std::io::ErrorKind::Other, "cached poll_ready failed", )); // Drive poll_ready on cached service let err = crate::common::future::poll_fn(|cx| cached.poll_ready(cx)) .await .err() .expect("expected poll_ready error"); assert_eq!(err.to_string(), "cached poll_ready failed"); // After error, the singleton should be cleared, so a new call drives outer again outer_handle.allow(1); crate::common::future::poll_fn(|cx| singleton.poll_ready(cx)) .await .unwrap(); let fut2 = singleton.call(()); let ((), send_inner2) = outer_handle.next_request().await.unwrap(); let (inner2, mut inner_handle2) = tower_test::mock::pair::<(), &'static str>(); send_inner2.send_response(inner2); let mut cached2 = fut2.await.unwrap(); // The new cached service should still work inner_handle2.allow(1); crate::common::future::poll_fn(|cx| cached2.poll_ready(cx)) .await .expect("expected poll_ready"); let cfut2 = cached2.call(()); let ((), send_cached2) = inner_handle2.next_request().await.unwrap(); send_cached2.send_response("svc2"); cfut2.await.unwrap(); } #[tokio::test] async fn cancel_waiter_does_not_affect_others() { let (mock_svc, mut handle) = tower_test::mock::pair::<(), &'static str>(); let mut singleton = Singleton::new(mock_svc); crate::common::future::poll_fn(|cx| singleton.poll_ready(cx)) .await .unwrap(); let fut1 = singleton.call(()); let fut2 = singleton.call(()); drop(fut2); // cancel one waiter let ((), send_response) = handle.next_request().await.unwrap(); send_response.send_response("svc"); fut1.await.unwrap(); } // TODO: this should be able to be improved with a cooperative baton refactor #[tokio::test] async fn cancel_driver_cancels_all() { let (mock_svc, mut handle) = tower_test::mock::pair::<(), &'static str>(); let mut singleton = Singleton::new(mock_svc); crate::common::future::poll_fn(|cx| singleton.poll_ready(cx)) .await .unwrap(); let mut fut1 = singleton.call(()); let fut2 = singleton.call(()); // poll driver just once, and then drop crate::common::future::poll_fn(move |cx| { let _ = Pin::new(&mut fut1).poll(cx); Poll::Ready(()) }) .await; let ((), send_response) = handle.next_request().await.unwrap(); send_response.send_response("svc"); assert_eq!( fut2.await.unwrap_err().0.to_string(), "singleton connection canceled" ); } } hyper-util-0.1.19/src/client/proxy/matcher.rs000064400000000000000000000640701046102023000172440ustar 00000000000000//! Proxy matchers //! //! This module contains different matchers to configure rules for when a proxy //! should be used, and if so, with what arguments. //! //! A [`Matcher`] can be constructed either using environment variables, or //! a [`Matcher::builder()`]. //! //! Once constructed, the `Matcher` can be asked if it intercepts a `Uri` by //! calling [`Matcher::intercept()`]. //! //! An [`Intercept`] includes the destination for the proxy, and any parsed //! authentication to be used. use std::fmt; use std::net::IpAddr; use http::header::HeaderValue; use ipnet::IpNet; use percent_encoding::percent_decode_str; #[cfg(docsrs)] pub use self::builder::IntoValue; #[cfg(not(docsrs))] use self::builder::IntoValue; /// A proxy matcher, usually built from environment variables. pub struct Matcher { http: Option, https: Option, no: NoProxy, } /// A matched proxy, /// /// This is returned by a matcher if a proxy should be used. #[derive(Clone)] pub struct Intercept { uri: http::Uri, auth: Auth, } /// A builder to create a [`Matcher`]. /// /// Construct with [`Matcher::builder()`]. #[derive(Default)] pub struct Builder { is_cgi: bool, all: String, http: String, https: String, no: String, } #[derive(Clone)] enum Auth { Empty, Basic(http::header::HeaderValue), Raw(String, String), } /// A filter for proxy matchers. /// /// This type is based off the `NO_PROXY` rules used by curl. #[derive(Clone, Debug, Default)] struct NoProxy { ips: IpMatcher, domains: DomainMatcher, } #[derive(Clone, Debug, Default)] struct DomainMatcher(Vec); #[derive(Clone, Debug, Default)] struct IpMatcher(Vec); #[derive(Clone, Debug)] enum Ip { Address(IpAddr), Network(IpNet), } // ===== impl Matcher ===== impl Matcher { /// Create a matcher reading the current environment variables. /// /// This checks for values in the following variables, treating them the /// same as curl does: /// /// - `ALL_PROXY`/`all_proxy` /// - `HTTPS_PROXY`/`https_proxy` /// - `HTTP_PROXY`/`http_proxy` /// - `NO_PROXY`/`no_proxy` pub fn from_env() -> Self { Builder::from_env().build() } /// Create a matcher from the environment or system. /// /// This checks the same environment variables as `from_env()`, and if not /// set, checks the system configuration for values for the OS. /// /// This constructor is always available, but if the `client-proxy-system` /// feature is enabled, it will check more configuration. Use this /// constructor if you want to allow users to optionally enable more, or /// use `from_env` if you do not want the values to change based on an /// enabled feature. pub fn from_system() -> Self { Builder::from_system().build() } /// Start a builder to configure a matcher. pub fn builder() -> Builder { Builder::default() } /// Check if the destination should be intercepted by a proxy. /// /// If the proxy rules match the destination, a new `Uri` will be returned /// to connect to. pub fn intercept(&self, dst: &http::Uri) -> Option { // TODO(perf): don't need to check `no` if below doesn't match... if self.no.contains(dst.host()?) { return None; } match dst.scheme_str() { Some("http") => self.http.clone(), Some("https") => self.https.clone(), _ => None, } } } impl fmt::Debug for Matcher { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut b = f.debug_struct("Matcher"); if let Some(ref http) = self.http { b.field("http", http); } if let Some(ref https) = self.https { b.field("https", https); } if !self.no.is_empty() { b.field("no", &self.no); } b.finish() } } // ===== impl Intercept ===== impl Intercept { /// Get the `http::Uri` for the target proxy. pub fn uri(&self) -> &http::Uri { &self.uri } /// Get any configured basic authorization. /// /// This should usually be used with a `Proxy-Authorization` header, to /// send in Basic format. /// /// # Example /// /// ```rust /// # use hyper_util::client::proxy::matcher::Matcher; /// # let uri = http::Uri::from_static("https://hyper.rs"); /// let m = Matcher::builder() /// .all("https://Aladdin:opensesame@localhost:8887") /// .build(); /// /// let proxy = m.intercept(&uri).expect("example"); /// let auth = proxy.basic_auth().expect("example"); /// assert_eq!(auth, "Basic QWxhZGRpbjpvcGVuc2VzYW1l"); /// ``` pub fn basic_auth(&self) -> Option<&HeaderValue> { if let Auth::Basic(ref val) = self.auth { Some(val) } else { None } } /// Get any configured raw authorization. /// /// If not detected as another scheme, this is the username and password /// that should be sent with whatever protocol the proxy handshake uses. /// /// # Example /// /// ```rust /// # use hyper_util::client::proxy::matcher::Matcher; /// # let uri = http::Uri::from_static("https://hyper.rs"); /// let m = Matcher::builder() /// .all("socks5h://Aladdin:opensesame@localhost:8887") /// .build(); /// /// let proxy = m.intercept(&uri).expect("example"); /// let auth = proxy.raw_auth().expect("example"); /// assert_eq!(auth, ("Aladdin", "opensesame")); /// ``` pub fn raw_auth(&self) -> Option<(&str, &str)> { if let Auth::Raw(ref u, ref p) = self.auth { Some((u.as_str(), p.as_str())) } else { None } } } impl fmt::Debug for Intercept { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Intercept") .field("uri", &self.uri) // dont output auth, its sensitive .finish() } } // ===== impl Builder ===== impl Builder { fn from_env() -> Self { Builder { is_cgi: std::env::var_os("REQUEST_METHOD").is_some(), all: get_first_env(&["ALL_PROXY", "all_proxy"]), http: get_first_env(&["HTTP_PROXY", "http_proxy"]), https: get_first_env(&["HTTPS_PROXY", "https_proxy"]), no: get_first_env(&["NO_PROXY", "no_proxy"]), } } fn from_system() -> Self { #[allow(unused_mut)] let mut builder = Self::from_env(); #[cfg(all(feature = "client-proxy-system", target_os = "macos"))] mac::with_system(&mut builder); #[cfg(all(feature = "client-proxy-system", windows))] win::with_system(&mut builder); builder } /// Set the target proxy for all destinations. pub fn all(mut self, val: S) -> Self where S: IntoValue, { self.all = val.into_value(); self } /// Set the target proxy for HTTP destinations. pub fn http(mut self, val: S) -> Self where S: IntoValue, { self.http = val.into_value(); self } /// Set the target proxy for HTTPS destinations. pub fn https(mut self, val: S) -> Self where S: IntoValue, { self.https = val.into_value(); self } /// Set the "no" proxy filter. /// /// The rules are as follows: /// * Entries are expected to be comma-separated (whitespace between entries is ignored) /// * IP addresses (both IPv4 and IPv6) are allowed, as are optional subnet masks (by adding /size, /// for example "`192.168.1.0/24`"). /// * An entry "`*`" matches all hostnames (this is the only wildcard allowed) /// * Any other entry is considered a domain name (and may contain a leading dot, for example `google.com` /// and `.google.com` are equivalent) and would match both that domain AND all subdomains. /// /// For example, if `"NO_PROXY=google.com, 192.168.1.0/24"` was set, all of the following would match /// (and therefore would bypass the proxy): /// * `http://google.com/` /// * `http://www.google.com/` /// * `http://192.168.1.42/` /// /// The URL `http://notgoogle.com/` would not match. pub fn no(mut self, val: S) -> Self where S: IntoValue, { self.no = val.into_value(); self } /// Construct a [`Matcher`] using the configured values. pub fn build(self) -> Matcher { if self.is_cgi { return Matcher { http: None, https: None, no: NoProxy::empty(), }; } let all = parse_env_uri(&self.all); Matcher { http: parse_env_uri(&self.http).or_else(|| all.clone()), https: parse_env_uri(&self.https).or(all), no: NoProxy::from_string(&self.no), } } } fn get_first_env(names: &[&str]) -> String { for name in names { if let Ok(val) = std::env::var(name) { return val; } } String::new() } fn parse_env_uri(val: &str) -> Option { use std::borrow::Cow; let uri = val.parse::().ok()?; let mut builder = http::Uri::builder(); let mut is_httpish = false; let mut auth = Auth::Empty; builder = builder.scheme(match uri.scheme() { Some(s) => { if s == &http::uri::Scheme::HTTP || s == &http::uri::Scheme::HTTPS { is_httpish = true; s.clone() } else if matches!(s.as_str(), "socks4" | "socks4a" | "socks5" | "socks5h") { s.clone() } else { // can't use this proxy scheme return None; } } // if no scheme provided, assume they meant 'http' None => { is_httpish = true; http::uri::Scheme::HTTP } }); let authority = uri.authority()?; if let Some((userinfo, host_port)) = authority.as_str().split_once('@') { let (user, pass) = match userinfo.split_once(':') { Some((user, pass)) => (user, Some(pass)), None => (userinfo, None), }; let user = percent_decode_str(user).decode_utf8_lossy(); let pass = pass.map(|pass| percent_decode_str(pass).decode_utf8_lossy()); if is_httpish { auth = Auth::Basic(encode_basic_auth(&user, pass.as_deref())); } else { auth = Auth::Raw( user.into_owned(), pass.map_or_else(String::new, Cow::into_owned), ); } builder = builder.authority(host_port); } else { builder = builder.authority(authority.clone()); } // removing any path, but we MUST specify one or the builder errors builder = builder.path_and_query("/"); let dst = builder.build().ok()?; Some(Intercept { uri: dst, auth }) } fn encode_basic_auth(user: &str, pass: Option<&str>) -> HeaderValue { use base64::prelude::BASE64_STANDARD; use base64::write::EncoderWriter; use std::io::Write; let mut buf = b"Basic ".to_vec(); { let mut encoder = EncoderWriter::new(&mut buf, &BASE64_STANDARD); let _ = write!(encoder, "{user}:"); if let Some(password) = pass { let _ = write!(encoder, "{password}"); } } let mut header = HeaderValue::from_bytes(&buf).expect("base64 is always valid HeaderValue"); header.set_sensitive(true); header } impl NoProxy { /* fn from_env() -> NoProxy { let raw = std::env::var("NO_PROXY") .or_else(|_| std::env::var("no_proxy")) .unwrap_or_default(); Self::from_string(&raw) } */ fn empty() -> NoProxy { NoProxy { ips: IpMatcher(Vec::new()), domains: DomainMatcher(Vec::new()), } } /// Returns a new no-proxy configuration based on a `no_proxy` string (or `None` if no variables /// are set) /// The rules are as follows: /// * The environment variable `NO_PROXY` is checked, if it is not set, `no_proxy` is checked /// * If neither environment variable is set, `None` is returned /// * Entries are expected to be comma-separated (whitespace between entries is ignored) /// * IP addresses (both IPv4 and IPv6) are allowed, as are optional subnet masks (by adding /size, /// for example "`192.168.1.0/24`"). /// * An entry "`*`" matches all hostnames (this is the only wildcard allowed) /// * Any other entry is considered a domain name (and may contain a leading dot, for example `google.com` /// and `.google.com` are equivalent) and would match both that domain AND all subdomains. /// /// For example, if `"NO_PROXY=google.com, 192.168.1.0/24"` was set, all of the following would match /// (and therefore would bypass the proxy): /// * `http://google.com/` /// * `http://www.google.com/` /// * `http://192.168.1.42/` /// /// The URL `http://notgoogle.com/` would not match. pub fn from_string(no_proxy_list: &str) -> Self { let mut ips = Vec::new(); let mut domains = Vec::new(); let parts = no_proxy_list.split(',').map(str::trim); for part in parts { match part.parse::() { // If we can parse an IP net or address, then use it, otherwise, assume it is a domain Ok(ip) => ips.push(Ip::Network(ip)), Err(_) => match part.parse::() { Ok(addr) => ips.push(Ip::Address(addr)), Err(_) => { if !part.trim().is_empty() { domains.push(part.to_owned()) } } }, } } NoProxy { ips: IpMatcher(ips), domains: DomainMatcher(domains), } } /// Return true if this matches the host (domain or IP). pub fn contains(&self, host: &str) -> bool { // According to RFC3986, raw IPv6 hosts will be wrapped in []. So we need to strip those off // the end in order to parse correctly let host = if host.starts_with('[') { let x: &[_] = &['[', ']']; host.trim_matches(x) } else { host }; match host.parse::() { // If we can parse an IP addr, then use it, otherwise, assume it is a domain Ok(ip) => self.ips.contains(ip), Err(_) => self.domains.contains(host), } } fn is_empty(&self) -> bool { self.ips.0.is_empty() && self.domains.0.is_empty() } } impl IpMatcher { fn contains(&self, addr: IpAddr) -> bool { for ip in &self.0 { match ip { Ip::Address(address) => { if &addr == address { return true; } } Ip::Network(net) => { if net.contains(&addr) { return true; } } } } false } } impl DomainMatcher { // The following links may be useful to understand the origin of these rules: // * https://curl.se/libcurl/c/CURLOPT_NOPROXY.html // * https://github.com/curl/curl/issues/1208 fn contains(&self, domain: &str) -> bool { let domain_len = domain.len(); for d in &self.0 { if d == domain || d.strip_prefix('.') == Some(domain) { return true; } else if domain.ends_with(d) { if d.starts_with('.') { // If the first character of d is a dot, that means the first character of domain // must also be a dot, so we are looking at a subdomain of d and that matches return true; } else if domain.as_bytes().get(domain_len - d.len() - 1) == Some(&b'.') { // Given that d is a prefix of domain, if the prior character in domain is a dot // then that means we must be matching a subdomain of d, and that matches return true; } } else if d == "*" { return true; } } false } } mod builder { /// A type that can used as a `Builder` value. /// /// Private and sealed, only visible in docs. pub trait IntoValue { #[doc(hidden)] fn into_value(self) -> String; } impl IntoValue for String { #[doc(hidden)] fn into_value(self) -> String { self } } impl IntoValue for &String { #[doc(hidden)] fn into_value(self) -> String { self.into() } } impl IntoValue for &str { #[doc(hidden)] fn into_value(self) -> String { self.into() } } } #[cfg(feature = "client-proxy-system")] #[cfg(target_os = "macos")] mod mac { use system_configuration::core_foundation::base::{CFType, TCFType, TCFTypeRef}; use system_configuration::core_foundation::dictionary::CFDictionary; use system_configuration::core_foundation::number::CFNumber; use system_configuration::core_foundation::string::{CFString, CFStringRef}; use system_configuration::dynamic_store::SCDynamicStoreBuilder; use system_configuration::sys::schema_definitions::{ kSCPropNetProxiesHTTPEnable, kSCPropNetProxiesHTTPPort, kSCPropNetProxiesHTTPProxy, kSCPropNetProxiesHTTPSEnable, kSCPropNetProxiesHTTPSPort, kSCPropNetProxiesHTTPSProxy, }; pub(super) fn with_system(builder: &mut super::Builder) { let store = SCDynamicStoreBuilder::new("").build(); let proxies_map = if let Some(proxies_map) = store.get_proxies() { proxies_map } else { return; }; if builder.http.is_empty() { let http_proxy_config = parse_setting_from_dynamic_store( &proxies_map, unsafe { kSCPropNetProxiesHTTPEnable }, unsafe { kSCPropNetProxiesHTTPProxy }, unsafe { kSCPropNetProxiesHTTPPort }, ); if let Some(http) = http_proxy_config { builder.http = http; } } if builder.https.is_empty() { let https_proxy_config = parse_setting_from_dynamic_store( &proxies_map, unsafe { kSCPropNetProxiesHTTPSEnable }, unsafe { kSCPropNetProxiesHTTPSProxy }, unsafe { kSCPropNetProxiesHTTPSPort }, ); if let Some(https) = https_proxy_config { builder.https = https; } } } fn parse_setting_from_dynamic_store( proxies_map: &CFDictionary, enabled_key: CFStringRef, host_key: CFStringRef, port_key: CFStringRef, ) -> Option { let proxy_enabled = proxies_map .find(enabled_key) .and_then(|flag| flag.downcast::()) .and_then(|flag| flag.to_i32()) .unwrap_or(0) == 1; if proxy_enabled { let proxy_host = proxies_map .find(host_key) .and_then(|host| host.downcast::()) .map(|host| host.to_string()); let proxy_port = proxies_map .find(port_key) .and_then(|port| port.downcast::()) .and_then(|port| port.to_i32()); return match (proxy_host, proxy_port) { (Some(proxy_host), Some(proxy_port)) => Some(format!("{proxy_host}:{proxy_port}")), (Some(proxy_host), None) => Some(proxy_host), (None, Some(_)) => None, (None, None) => None, }; } None } } #[cfg(feature = "client-proxy-system")] #[cfg(windows)] mod win { pub(super) fn with_system(builder: &mut super::Builder) { let settings = if let Ok(settings) = windows_registry::CURRENT_USER .open("Software\\Microsoft\\Windows\\CurrentVersion\\Internet Settings") { settings } else { return; }; if settings.get_u32("ProxyEnable").unwrap_or(0) == 0 { return; } if let Ok(val) = settings.get_string("ProxyServer") { if builder.http.is_empty() { builder.http = val.clone(); } if builder.https.is_empty() { builder.https = val; } } if builder.no.is_empty() { if let Ok(val) = settings.get_string("ProxyOverride") { builder.no = val .split(';') .map(|s| s.trim()) .collect::>() .join(",") .replace("*.", ""); } } } } #[cfg(test)] mod tests { use super::*; #[test] fn test_domain_matcher() { let domains = vec![".foo.bar".into(), "bar.foo".into()]; let matcher = DomainMatcher(domains); // domains match with leading `.` assert!(matcher.contains("foo.bar")); // subdomains match with leading `.` assert!(matcher.contains("www.foo.bar")); // domains match with no leading `.` assert!(matcher.contains("bar.foo")); // subdomains match with no leading `.` assert!(matcher.contains("www.bar.foo")); // non-subdomain string prefixes don't match assert!(!matcher.contains("notfoo.bar")); assert!(!matcher.contains("notbar.foo")); } #[test] fn test_no_proxy_wildcard() { let no_proxy = NoProxy::from_string("*"); assert!(no_proxy.contains("any.where")); } #[test] fn test_no_proxy_ip_ranges() { let no_proxy = NoProxy::from_string(".foo.bar, bar.baz,10.42.1.1/24,::1,10.124.7.8,2001::/17"); let should_not_match = [ // random url, not in no_proxy "hyper.rs", // make sure that random non-subdomain string prefixes don't match "notfoo.bar", // make sure that random non-subdomain string prefixes don't match "notbar.baz", // ipv4 address out of range "10.43.1.1", // ipv4 address out of range "10.124.7.7", // ipv6 address out of range "[ffff:db8:a0b:12f0::1]", // ipv6 address out of range "[2005:db8:a0b:12f0::1]", ]; for host in &should_not_match { assert!(!no_proxy.contains(host), "should not contain {host:?}"); } let should_match = [ // make sure subdomains (with leading .) match "hello.foo.bar", // make sure exact matches (without leading .) match (also makes sure spaces between entries work) "bar.baz", // make sure subdomains (without leading . in no_proxy) match "foo.bar.baz", // make sure subdomains (without leading . in no_proxy) match - this differs from cURL "foo.bar", // ipv4 address match within range "10.42.1.100", // ipv6 address exact match "[::1]", // ipv6 address match within range "[2001:db8:a0b:12f0::1]", // ipv4 address exact match "10.124.7.8", ]; for host in &should_match { assert!(no_proxy.contains(host), "should contain {host:?}"); } } macro_rules! p { ($($n:ident = $v:expr,)*) => ({Builder { $($n: $v.into(),)* ..Builder::default() }.build()}); } fn intercept(p: &Matcher, u: &str) -> Intercept { p.intercept(&u.parse().unwrap()).unwrap() } #[test] fn test_all_proxy() { let p = p! { all = "http://om.nom", }; assert_eq!("http://om.nom", intercept(&p, "http://example.com").uri()); assert_eq!("http://om.nom", intercept(&p, "https://example.com").uri()); } #[test] fn test_specific_overrides_all() { let p = p! { all = "http://no.pe", http = "http://y.ep", }; assert_eq!("http://no.pe", intercept(&p, "https://example.com").uri()); // the http rule is "more specific" than the all rule assert_eq!("http://y.ep", intercept(&p, "http://example.com").uri()); } #[test] fn test_parse_no_scheme_defaults_to_http() { let p = p! { https = "y.ep", http = "127.0.0.1:8887", }; assert_eq!(intercept(&p, "https://example.local").uri(), "http://y.ep"); assert_eq!( intercept(&p, "http://example.local").uri(), "http://127.0.0.1:8887" ); } #[test] fn test_parse_http_auth() { let p = p! { all = "http://Aladdin:opensesame@y.ep", }; let proxy = intercept(&p, "https://example.local"); assert_eq!(proxy.uri(), "http://y.ep"); assert_eq!( proxy.basic_auth().expect("basic_auth"), "Basic QWxhZGRpbjpvcGVuc2VzYW1l" ); } #[test] fn test_parse_http_auth_without_password() { let p = p! { all = "http://Aladdin@y.ep", }; let proxy = intercept(&p, "https://example.local"); assert_eq!(proxy.uri(), "http://y.ep"); assert_eq!( proxy.basic_auth().expect("basic_auth"), "Basic QWxhZGRpbjo=" ); } #[test] fn test_parse_http_auth_without_scheme() { let p = p! { all = "Aladdin:opensesame@y.ep", }; let proxy = intercept(&p, "https://example.local"); assert_eq!(proxy.uri(), "http://y.ep"); assert_eq!( proxy.basic_auth().expect("basic_auth"), "Basic QWxhZGRpbjpvcGVuc2VzYW1l" ); } #[test] fn test_dont_parse_http_when_is_cgi() { let mut builder = Matcher::builder(); builder.is_cgi = true; builder.http = "http://never.gonna.let.you.go".into(); let m = builder.build(); assert!(m.intercept(&"http://rick.roll".parse().unwrap()).is_none()); } } hyper-util-0.1.19/src/client/proxy/mod.rs000064400000000000000000000000461046102023000163710ustar 00000000000000//! Proxy utilities pub mod matcher; hyper-util-0.1.19/src/common/exec.rs000064400000000000000000000022001046102023000153610ustar 00000000000000#![allow(dead_code)] use hyper::rt::Executor; use std::fmt; use std::future::Future; use std::pin::Pin; use std::sync::Arc; pub(crate) type BoxSendFuture = Pin + Send>>; // Either the user provides an executor for background tasks, or we use // `tokio::spawn`. #[derive(Clone)] pub(crate) enum Exec { Executor(Arc + Send + Sync>), } // ===== impl Exec ===== impl Exec { pub(crate) fn new(inner: E) -> Self where E: Executor + Send + Sync + 'static, { Exec::Executor(Arc::new(inner)) } pub(crate) fn execute(&self, fut: F) where F: Future + Send + 'static, { match *self { Exec::Executor(ref e) => { e.execute(Box::pin(fut)); } } } } impl fmt::Debug for Exec { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Exec").finish() } } impl hyper::rt::Executor for Exec where F: Future + Send + 'static, { fn execute(&self, fut: F) { Exec::execute(self, fut); } } hyper-util-0.1.19/src/common/future.rs000064400000000000000000000010461046102023000157560ustar 00000000000000use std::{ future::Future, pin::Pin, task::{Context, Poll}, }; // TODO: replace with `std::future::poll_fn` once MSRV >= 1.64 pub(crate) fn poll_fn(f: F) -> PollFn where F: FnMut(&mut Context<'_>) -> Poll, { PollFn { f } } pub(crate) struct PollFn { f: F, } impl Unpin for PollFn {} impl Future for PollFn where F: FnMut(&mut Context<'_>) -> Poll, { type Output = T; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { (self.f)(cx) } } hyper-util-0.1.19/src/common/lazy.rs000064400000000000000000000033461046102023000154300ustar 00000000000000use pin_project_lite::pin_project; use std::future::Future; use std::pin::Pin; use std::task::{self, Poll}; pub(crate) trait Started: Future { fn started(&self) -> bool; } pub(crate) fn lazy(func: F) -> Lazy where F: FnOnce() -> R, R: Future + Unpin, { Lazy { inner: Inner::Init { func }, } } // FIXME: allow() required due to `impl Trait` leaking types to this lint pin_project! { #[allow(missing_debug_implementations)] pub(crate) struct Lazy { #[pin] inner: Inner, } } pin_project! { #[project = InnerProj] #[project_replace = InnerProjReplace] enum Inner { Init { func: F }, Fut { #[pin] fut: R }, Empty, } } impl Started for Lazy where F: FnOnce() -> R, R: Future, { fn started(&self) -> bool { match self.inner { Inner::Init { .. } => false, Inner::Fut { .. } | Inner::Empty => true, } } } impl Future for Lazy where F: FnOnce() -> R, R: Future, { type Output = R::Output; fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { let mut this = self.project(); if let InnerProj::Fut { fut } = this.inner.as_mut().project() { return fut.poll(cx); } match this.inner.as_mut().project_replace(Inner::Empty) { InnerProjReplace::Init { func } => { this.inner.set(Inner::Fut { fut: func() }); if let InnerProj::Fut { fut } = this.inner.project() { return fut.poll(cx); } unreachable!() } _ => unreachable!("lazy state wrong"), } } } hyper-util-0.1.19/src/common/mod.rs000064400000000000000000000007341046102023000152260ustar 00000000000000#![allow(missing_docs)] pub(crate) mod exec; #[cfg(feature = "client-legacy")] mod lazy; #[cfg(feature = "server")] // #[cfg(feature = "server-auto")] pub(crate) mod rewind; #[cfg(feature = "client-legacy")] mod sync; pub(crate) mod timer; #[cfg(feature = "client-legacy")] pub(crate) use exec::Exec; #[cfg(feature = "client-legacy")] pub(crate) use lazy::{lazy, Started as Lazy}; #[cfg(feature = "client-legacy")] pub(crate) use sync::SyncWrapper; pub(crate) mod future; hyper-util-0.1.19/src/common/rewind.rs000064400000000000000000000072141046102023000157370ustar 00000000000000use std::{cmp, io}; use bytes::{Buf, Bytes}; use hyper::rt::{Read, ReadBufCursor, Write}; use std::{ pin::Pin, task::{self, Poll}, }; /// Combine a buffer with an IO, rewinding reads to use the buffer. #[derive(Debug)] pub(crate) struct Rewind { pub(crate) pre: Option, pub(crate) inner: T, } impl Rewind { #[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))] pub(crate) fn new_buffered(io: T, buf: Bytes) -> Self { Rewind { pre: Some(buf), inner: io, } } } impl Read for Rewind where T: Read + Unpin, { fn poll_read( mut self: Pin<&mut Self>, cx: &mut task::Context<'_>, mut buf: ReadBufCursor<'_>, ) -> Poll> { if let Some(mut prefix) = self.pre.take() { // If there are no remaining bytes, let the bytes get dropped. if !prefix.is_empty() { let copy_len = cmp::min(prefix.len(), buf.remaining()); buf.put_slice(&prefix[..copy_len]); prefix.advance(copy_len); // Put back what's left if !prefix.is_empty() { self.pre = Some(prefix); } return Poll::Ready(Ok(())); } } Pin::new(&mut self.inner).poll_read(cx, buf) } } impl Write for Rewind where T: Write + Unpin, { fn poll_write( mut self: Pin<&mut Self>, cx: &mut task::Context<'_>, buf: &[u8], ) -> Poll> { Pin::new(&mut self.inner).poll_write(cx, buf) } fn poll_write_vectored( mut self: Pin<&mut Self>, cx: &mut task::Context<'_>, bufs: &[io::IoSlice<'_>], ) -> Poll> { Pin::new(&mut self.inner).poll_write_vectored(cx, bufs) } fn poll_flush(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { Pin::new(&mut self.inner).poll_flush(cx) } fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { Pin::new(&mut self.inner).poll_shutdown(cx) } fn is_write_vectored(&self) -> bool { self.inner.is_write_vectored() } } /* #[cfg(test)] mod tests { use super::Rewind; use bytes::Bytes; use tokio::io::AsyncReadExt; #[cfg(not(miri))] #[tokio::test] async fn partial_rewind() { let underlying = [104, 101, 108, 108, 111]; let mock = tokio_test::io::Builder::new().read(&underlying).build(); let mut stream = Rewind::new(mock); // Read off some bytes, ensure we filled o1 let mut buf = [0; 2]; stream.read_exact(&mut buf).await.expect("read1"); // Rewind the stream so that it is as if we never read in the first place. stream.rewind(Bytes::copy_from_slice(&buf[..])); let mut buf = [0; 5]; stream.read_exact(&mut buf).await.expect("read1"); // At this point we should have read everything that was in the MockStream assert_eq!(&buf, &underlying); } #[cfg(not(miri))] #[tokio::test] async fn full_rewind() { let underlying = [104, 101, 108, 108, 111]; let mock = tokio_test::io::Builder::new().read(&underlying).build(); let mut stream = Rewind::new(mock); let mut buf = [0; 5]; stream.read_exact(&mut buf).await.expect("read1"); // Rewind the stream so that it is as if we never read in the first place. stream.rewind(Bytes::copy_from_slice(&buf[..])); let mut buf = [0; 5]; stream.read_exact(&mut buf).await.expect("read1"); } } */ hyper-util-0.1.19/src/common/sync.rs000064400000000000000000000045061046102023000154240ustar 00000000000000pub(crate) struct SyncWrapper(T); impl SyncWrapper { /// Creates a new SyncWrapper containing the given value. /// /// # Examples /// /// ```ignore /// use hyper::common::sync_wrapper::SyncWrapper; /// /// let wrapped = SyncWrapper::new(42); /// ``` pub(crate) fn new(value: T) -> Self { Self(value) } /// Acquires a reference to the protected value. /// /// This is safe because it requires an exclusive reference to the wrapper. Therefore this method /// neither panics nor does it return an error. This is in contrast to [`Mutex::get_mut`] which /// returns an error if another thread panicked while holding the lock. It is not recommended /// to send an exclusive reference to a potentially damaged value to another thread for further /// processing. /// /// [`Mutex::get_mut`]: https://doc.rust-lang.org/std/sync/struct.Mutex.html#method.get_mut /// /// # Examples /// /// ```ignore /// use hyper::common::sync_wrapper::SyncWrapper; /// /// let mut wrapped = SyncWrapper::new(42); /// let value = wrapped.get_mut(); /// *value = 0; /// assert_eq!(*wrapped.get_mut(), 0); /// ``` pub(crate) fn get_mut(&mut self) -> &mut T { &mut self.0 } /// Consumes this wrapper, returning the underlying data. /// /// This is safe because it requires ownership of the wrapper, aherefore this method will neither /// panic nor does it return an error. This is in contrast to [`Mutex::into_inner`] which /// returns an error if another thread panicked while holding the lock. It is not recommended /// to send an exclusive reference to a potentially damaged value to another thread for further /// processing. /// /// [`Mutex::into_inner`]: https://doc.rust-lang.org/std/sync/struct.Mutex.html#method.into_inner /// /// # Examples /// /// ```ignore /// use hyper::common::sync_wrapper::SyncWrapper; /// /// let mut wrapped = SyncWrapper::new(42); /// assert_eq!(wrapped.into_inner(), 42); /// ``` #[allow(dead_code)] pub(crate) fn into_inner(self) -> T { self.0 } } // this is safe because the only operations permitted on this data structure require exclusive // access or ownership unsafe impl Sync for SyncWrapper {} hyper-util-0.1.19/src/common/timer.rs000064400000000000000000000015501046102023000155640ustar 00000000000000#![allow(dead_code)] use std::fmt; use std::pin::Pin; use std::sync::Arc; use std::time::Duration; use std::time::Instant; use hyper::rt::Sleep; #[derive(Clone)] pub(crate) struct Timer(Arc); // =====impl Timer===== impl Timer { pub(crate) fn new(inner: T) -> Self where T: hyper::rt::Timer + Send + Sync + 'static, { Self(Arc::new(inner)) } } impl fmt::Debug for Timer { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Timer").finish() } } impl hyper::rt::Timer for Timer { fn sleep(&self, duration: Duration) -> Pin> { self.0.sleep(duration) } fn sleep_until(&self, deadline: Instant) -> Pin> { self.0.sleep_until(deadline) } fn now(&self) -> Instant { self.0.now() } } hyper-util-0.1.19/src/error.rs000064400000000000000000000004621046102023000143060ustar 00000000000000/* use std::error::Error; pub(crate) fn find<'a, E: Error + 'static>(top: &'a (dyn Error + 'static)) -> Option<&'a E> { let mut err = Some(top); while let Some(src) = err { if src.is::() { return src.downcast_ref(); } err = src.source(); } None } */ hyper-util-0.1.19/src/lib.rs000064400000000000000000000007041046102023000137220ustar 00000000000000#![deny(missing_docs)] #![cfg_attr(docsrs, feature(doc_cfg))] //! Utilities for working with hyper. //! //! This crate is less-stable than [`hyper`](https://docs.rs/hyper). However, //! does respect Rust's semantic version regarding breaking changes. #[cfg(feature = "client")] pub mod client; mod common; pub mod rt; #[cfg(feature = "server")] pub mod server; #[cfg(any(feature = "service", feature = "client-legacy"))] pub mod service; mod error; hyper-util-0.1.19/src/rt/io.rs000064400000000000000000000014421046102023000142100ustar 00000000000000use std::marker::Unpin; use std::pin::Pin; use std::task::Poll; use futures_core::ready; use hyper::rt::{Read, ReadBuf, Write}; use crate::common::future::poll_fn; pub(crate) async fn read(io: &mut T, buf: &mut [u8]) -> Result where T: Read + Unpin, { poll_fn(move |cx| { let mut buf = ReadBuf::new(buf); ready!(Pin::new(&mut *io).poll_read(cx, buf.unfilled()))?; Poll::Ready(Ok(buf.filled().len())) }) .await } pub(crate) async fn write_all(io: &mut T, buf: &[u8]) -> Result<(), std::io::Error> where T: Write + Unpin, { let mut n = 0; poll_fn(move |cx| { while n < buf.len() { n += ready!(Pin::new(&mut *io).poll_write(cx, &buf[n..])?); } Poll::Ready(Ok(())) }) .await } hyper-util-0.1.19/src/rt/mod.rs000064400000000000000000000004171046102023000143610ustar 00000000000000//! Runtime utilities #[cfg(feature = "client-legacy")] mod io; #[cfg(feature = "client-legacy")] pub(crate) use self::io::{read, write_all}; #[cfg(feature = "tokio")] pub mod tokio; #[cfg(feature = "tokio")] pub use self::tokio::{TokioExecutor, TokioIo, TokioTimer}; hyper-util-0.1.19/src/rt/tokio/with_hyper_io.rs000064400000000000000000000106631046102023000176040ustar 00000000000000use pin_project_lite::pin_project; use std::{ pin::Pin, task::{Context, Poll}, }; pin_project! { /// Extends an underlying [`tokio`] I/O with [`hyper`] I/O implementations. /// /// This implements [`Read`] and [`Write`] given an inner type that implements [`AsyncRead`] /// and [`AsyncWrite`], respectively. #[derive(Debug)] pub struct WithHyperIo { #[pin] inner: I, } } // ==== impl WithHyperIo ===== impl WithHyperIo { /// Wraps the inner I/O in an [`WithHyperIo`] pub fn new(inner: I) -> Self { Self { inner } } /// Returns a reference to the inner type. pub fn inner(&self) -> &I { &self.inner } /// Returns a mutable reference to the inner type. pub fn inner_mut(&mut self) -> &mut I { &mut self.inner } /// Consumes this wrapper and returns the inner type. pub fn into_inner(self) -> I { self.inner } } /// [`WithHyperIo`] is [`Read`] if `I` is [`AsyncRead`]. /// /// [`AsyncRead`]: tokio::io::AsyncRead /// [`Read`]: hyper::rt::Read impl hyper::rt::Read for WithHyperIo where I: tokio::io::AsyncRead, { fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, mut buf: hyper::rt::ReadBufCursor<'_>, ) -> Poll> { let n = unsafe { let mut tbuf = tokio::io::ReadBuf::uninit(buf.as_mut()); match tokio::io::AsyncRead::poll_read(self.project().inner, cx, &mut tbuf) { Poll::Ready(Ok(())) => tbuf.filled().len(), other => return other, } }; unsafe { buf.advance(n); } Poll::Ready(Ok(())) } } /// [`WithHyperIo`] is [`Write`] if `I` is [`AsyncWrite`]. /// /// [`AsyncWrite`]: tokio::io::AsyncWrite /// [`Write`]: hyper::rt::Write impl hyper::rt::Write for WithHyperIo where I: tokio::io::AsyncWrite, { fn poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { tokio::io::AsyncWrite::poll_write(self.project().inner, cx, buf) } fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { tokio::io::AsyncWrite::poll_flush(self.project().inner, cx) } fn poll_shutdown( self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll> { tokio::io::AsyncWrite::poll_shutdown(self.project().inner, cx) } fn is_write_vectored(&self) -> bool { tokio::io::AsyncWrite::is_write_vectored(&self.inner) } fn poll_write_vectored( self: Pin<&mut Self>, cx: &mut Context<'_>, bufs: &[std::io::IoSlice<'_>], ) -> Poll> { tokio::io::AsyncWrite::poll_write_vectored(self.project().inner, cx, bufs) } } /// [`WithHyperIo`] exposes its inner `I`'s [`AsyncRead`] implementation. /// /// [`AsyncRead`]: tokio::io::AsyncRead impl tokio::io::AsyncRead for WithHyperIo where I: tokio::io::AsyncRead, { #[inline] fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut tokio::io::ReadBuf<'_>, ) -> Poll> { self.project().inner.poll_read(cx, buf) } } /// [`WithHyperIo`] exposes its inner `I`'s [`AsyncWrite`] implementation. /// /// [`AsyncWrite`]: tokio::io::AsyncWrite impl tokio::io::AsyncWrite for WithHyperIo where I: tokio::io::AsyncWrite, { #[inline] fn poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { self.project().inner.poll_write(cx, buf) } #[inline] fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { self.project().inner.poll_flush(cx) } #[inline] fn poll_shutdown( self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll> { self.project().inner.poll_shutdown(cx) } #[inline] fn is_write_vectored(&self) -> bool { self.inner.is_write_vectored() } #[inline] fn poll_write_vectored( self: Pin<&mut Self>, cx: &mut Context<'_>, bufs: &[std::io::IoSlice<'_>], ) -> Poll> { self.project().inner.poll_write_vectored(cx, bufs) } } hyper-util-0.1.19/src/rt/tokio/with_tokio_io.rs000064400000000000000000000112171046102023000175760ustar 00000000000000use pin_project_lite::pin_project; use std::{ pin::Pin, task::{Context, Poll}, }; pin_project! { /// Extends an underlying [`hyper`] I/O with [`tokio`] I/O implementations. /// /// This implements [`AsyncRead`] and [`AsyncWrite`] given an inner type that implements /// [`Read`] and [`Write`], respectively. #[derive(Debug)] pub struct WithTokioIo { #[pin] inner: I, } } // ==== impl WithTokioIo ===== /// [`WithTokioIo`] is [`AsyncRead`] if `I` is [`Read`]. /// /// [`AsyncRead`]: tokio::io::AsyncRead /// [`Read`]: hyper::rt::Read impl tokio::io::AsyncRead for WithTokioIo where I: hyper::rt::Read, { fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, tbuf: &mut tokio::io::ReadBuf<'_>, ) -> Poll> { //let init = tbuf.initialized().len(); let filled = tbuf.filled().len(); let sub_filled = unsafe { let mut buf = hyper::rt::ReadBuf::uninit(tbuf.unfilled_mut()); match hyper::rt::Read::poll_read(self.project().inner, cx, buf.unfilled()) { Poll::Ready(Ok(())) => buf.filled().len(), other => return other, } }; let n_filled = filled + sub_filled; // At least sub_filled bytes had to have been initialized. let n_init = sub_filled; unsafe { tbuf.assume_init(n_init); tbuf.set_filled(n_filled); } Poll::Ready(Ok(())) } } /// [`WithTokioIo`] is [`AsyncWrite`] if `I` is [`Write`]. /// /// [`AsyncWrite`]: tokio::io::AsyncWrite /// [`Write`]: hyper::rt::Write impl tokio::io::AsyncWrite for WithTokioIo where I: hyper::rt::Write, { fn poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { hyper::rt::Write::poll_write(self.project().inner, cx, buf) } fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { hyper::rt::Write::poll_flush(self.project().inner, cx) } fn poll_shutdown( self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll> { hyper::rt::Write::poll_shutdown(self.project().inner, cx) } fn is_write_vectored(&self) -> bool { hyper::rt::Write::is_write_vectored(&self.inner) } fn poll_write_vectored( self: Pin<&mut Self>, cx: &mut Context<'_>, bufs: &[std::io::IoSlice<'_>], ) -> Poll> { hyper::rt::Write::poll_write_vectored(self.project().inner, cx, bufs) } } /// [`WithTokioIo`] exposes its inner `I`'s [`Write`] implementation. /// /// [`Write`]: hyper::rt::Write impl hyper::rt::Write for WithTokioIo where I: hyper::rt::Write, { #[inline] fn poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { self.project().inner.poll_write(cx, buf) } #[inline] fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { self.project().inner.poll_flush(cx) } #[inline] fn poll_shutdown( self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll> { self.project().inner.poll_shutdown(cx) } #[inline] fn is_write_vectored(&self) -> bool { self.inner.is_write_vectored() } #[inline] fn poll_write_vectored( self: Pin<&mut Self>, cx: &mut Context<'_>, bufs: &[std::io::IoSlice<'_>], ) -> Poll> { self.project().inner.poll_write_vectored(cx, bufs) } } impl WithTokioIo { /// Wraps the inner I/O in an [`WithTokioIo`] pub fn new(inner: I) -> Self { Self { inner } } /// Returns a reference to the inner type. pub fn inner(&self) -> &I { &self.inner } /// Returns a mutable reference to the inner type. pub fn inner_mut(&mut self) -> &mut I { &mut self.inner } /// Consumes this wrapper and returns the inner type. pub fn into_inner(self) -> I { self.inner } } /// [`WithTokioIo`] exposes its inner `I`'s [`Read`] implementation. /// /// [`Read`]: hyper::rt::Read impl hyper::rt::Read for WithTokioIo where I: hyper::rt::Read, { #[inline] fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: hyper::rt::ReadBufCursor<'_>, ) -> Poll> { self.project().inner.poll_read(cx, buf) } } hyper-util-0.1.19/src/rt/tokio.rs000064400000000000000000000243241046102023000147320ustar 00000000000000//! [`tokio`] runtime components integration for [`hyper`]. //! //! [`hyper::rt`] exposes a set of traits to allow hyper to be agnostic to //! its underlying asynchronous runtime. This submodule provides glue for //! [`tokio`] users to bridge those types to [`hyper`]'s interfaces. //! //! # IO //! //! [`hyper`] abstracts over asynchronous readers and writers using [`Read`] //! and [`Write`], while [`tokio`] abstracts over this using [`AsyncRead`] //! and [`AsyncWrite`]. This submodule provides a collection of IO adaptors //! to bridge these two IO ecosystems together: [`TokioIo`], //! [`WithHyperIo`], and [`WithTokioIo`]. //! //! To compare and constrast these IO adaptors and to help explain which //! is the proper choice for your needs, here is a table showing which IO //! traits these implement, given two types `T` and `H` which implement //! Tokio's and Hyper's corresponding IO traits: //! //! | | [`AsyncRead`] | [`AsyncWrite`] | [`Read`] | [`Write`] | //! |--------------------|------------------|-------------------|--------------|--------------| //! | `T` | ✅ **true** | ✅ **true** | ❌ **false** | ❌ **false** | //! | `H` | ❌ **false** | ❌ **false** | ✅ **true** | ✅ **true** | //! | [`TokioIo`] | ❌ **false** | ❌ **false** | ✅ **true** | ✅ **true** | //! | [`TokioIo`] | ✅ **true** | ✅ **true** | ❌ **false** | ❌ **false** | //! | [`WithHyperIo`] | ✅ **true** | ✅ **true** | ✅ **true** | ✅ **true** | //! | [`WithHyperIo`] | ❌ **false** | ❌ **false** | ❌ **false** | ❌ **false** | //! | [`WithTokioIo`] | ❌ **false** | ❌ **false** | ❌ **false** | ❌ **false** | //! | [`WithTokioIo`] | ✅ **true** | ✅ **true** | ✅ **true** | ✅ **true** | //! //! For most situations, [`TokioIo`] is the proper choice. This should be //! constructed, wrapping some underlying [`hyper`] or [`tokio`] IO, at the //! call-site of a function like [`hyper::client::conn::http1::handshake`]. //! //! [`TokioIo`] switches across these ecosystems, but notably does not //! preserve the existing IO trait implementations of its underlying IO. If //! one wishes to _extend_ IO with additional implementations, //! [`WithHyperIo`] and [`WithTokioIo`] are the correct choice. //! //! For example, a Tokio reader/writer can be wrapped in [`WithHyperIo`]. //! That will implement _both_ sets of IO traits. Conversely, //! [`WithTokioIo`] will implement both sets of IO traits given a //! reader/writer that implements Hyper's [`Read`] and [`Write`]. //! //! See [`tokio::io`] and ["_Asynchronous IO_"][tokio-async-docs] for more //! information. //! //! [`AsyncRead`]: tokio::io::AsyncRead //! [`AsyncWrite`]: tokio::io::AsyncWrite //! [`Read`]: hyper::rt::Read //! [`Write`]: hyper::rt::Write //! [tokio-async-docs]: https://docs.rs/tokio/latest/tokio/#asynchronous-io use std::{ future::Future, pin::Pin, task::{Context, Poll}, time::{Duration, Instant}, }; use hyper::rt::{Executor, Sleep, Timer}; use pin_project_lite::pin_project; #[cfg(feature = "tracing")] use tracing::instrument::Instrument; pub use self::{with_hyper_io::WithHyperIo, with_tokio_io::WithTokioIo}; mod with_hyper_io; mod with_tokio_io; /// Future executor that utilises `tokio` threads. #[non_exhaustive] #[derive(Default, Debug, Clone)] pub struct TokioExecutor {} pin_project! { /// A wrapper that implements Tokio's IO traits for an inner type that /// implements hyper's IO traits, or vice versa (implements hyper's IO /// traits for a type that implements Tokio's IO traits). #[derive(Debug)] pub struct TokioIo { #[pin] inner: T, } } /// A Timer that uses the tokio runtime. #[non_exhaustive] #[derive(Default, Clone, Debug)] pub struct TokioTimer; // Use TokioSleep to get tokio::time::Sleep to implement Unpin. // see https://docs.rs/tokio/latest/tokio/time/struct.Sleep.html pin_project! { #[derive(Debug)] struct TokioSleep { #[pin] inner: tokio::time::Sleep, } } // ===== impl TokioExecutor ===== impl Executor for TokioExecutor where Fut: Future + Send + 'static, Fut::Output: Send + 'static, { fn execute(&self, fut: Fut) { #[cfg(feature = "tracing")] tokio::spawn(fut.in_current_span()); #[cfg(not(feature = "tracing"))] tokio::spawn(fut); } } impl TokioExecutor { /// Create new executor that relies on [`tokio::spawn`] to execute futures. pub fn new() -> Self { Self {} } } // ==== impl TokioIo ===== impl TokioIo { /// Wrap a type implementing Tokio's or hyper's IO traits. pub fn new(inner: T) -> Self { Self { inner } } /// Borrow the inner type. pub fn inner(&self) -> &T { &self.inner } /// Mut borrow the inner type. pub fn inner_mut(&mut self) -> &mut T { &mut self.inner } /// Consume this wrapper and get the inner type. pub fn into_inner(self) -> T { self.inner } } impl hyper::rt::Read for TokioIo where T: tokio::io::AsyncRead, { fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, mut buf: hyper::rt::ReadBufCursor<'_>, ) -> Poll> { let n = unsafe { let mut tbuf = tokio::io::ReadBuf::uninit(buf.as_mut()); match tokio::io::AsyncRead::poll_read(self.project().inner, cx, &mut tbuf) { Poll::Ready(Ok(())) => tbuf.filled().len(), other => return other, } }; unsafe { buf.advance(n); } Poll::Ready(Ok(())) } } impl hyper::rt::Write for TokioIo where T: tokio::io::AsyncWrite, { fn poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { tokio::io::AsyncWrite::poll_write(self.project().inner, cx, buf) } fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { tokio::io::AsyncWrite::poll_flush(self.project().inner, cx) } fn poll_shutdown( self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll> { tokio::io::AsyncWrite::poll_shutdown(self.project().inner, cx) } fn is_write_vectored(&self) -> bool { tokio::io::AsyncWrite::is_write_vectored(&self.inner) } fn poll_write_vectored( self: Pin<&mut Self>, cx: &mut Context<'_>, bufs: &[std::io::IoSlice<'_>], ) -> Poll> { tokio::io::AsyncWrite::poll_write_vectored(self.project().inner, cx, bufs) } } impl tokio::io::AsyncRead for TokioIo where T: hyper::rt::Read, { fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, tbuf: &mut tokio::io::ReadBuf<'_>, ) -> Poll> { //let init = tbuf.initialized().len(); let filled = tbuf.filled().len(); let sub_filled = unsafe { let mut buf = hyper::rt::ReadBuf::uninit(tbuf.unfilled_mut()); match hyper::rt::Read::poll_read(self.project().inner, cx, buf.unfilled()) { Poll::Ready(Ok(())) => buf.filled().len(), other => return other, } }; let n_filled = filled + sub_filled; // At least sub_filled bytes had to have been initialized. let n_init = sub_filled; unsafe { tbuf.assume_init(n_init); tbuf.set_filled(n_filled); } Poll::Ready(Ok(())) } } impl tokio::io::AsyncWrite for TokioIo where T: hyper::rt::Write, { fn poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { hyper::rt::Write::poll_write(self.project().inner, cx, buf) } fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { hyper::rt::Write::poll_flush(self.project().inner, cx) } fn poll_shutdown( self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll> { hyper::rt::Write::poll_shutdown(self.project().inner, cx) } fn is_write_vectored(&self) -> bool { hyper::rt::Write::is_write_vectored(&self.inner) } fn poll_write_vectored( self: Pin<&mut Self>, cx: &mut Context<'_>, bufs: &[std::io::IoSlice<'_>], ) -> Poll> { hyper::rt::Write::poll_write_vectored(self.project().inner, cx, bufs) } } // ==== impl TokioTimer ===== impl Timer for TokioTimer { fn sleep(&self, duration: Duration) -> Pin> { Box::pin(TokioSleep { inner: tokio::time::sleep(duration), }) } fn sleep_until(&self, deadline: Instant) -> Pin> { Box::pin(TokioSleep { inner: tokio::time::sleep_until(deadline.into()), }) } fn reset(&self, sleep: &mut Pin>, new_deadline: Instant) { if let Some(sleep) = sleep.as_mut().downcast_mut_pin::() { sleep.reset(new_deadline) } } fn now(&self) -> Instant { tokio::time::Instant::now().into() } } impl TokioTimer { /// Create a new TokioTimer pub fn new() -> Self { Self {} } } impl Future for TokioSleep { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { self.project().inner.poll(cx) } } impl Sleep for TokioSleep {} impl TokioSleep { fn reset(self: Pin<&mut Self>, deadline: Instant) { self.project().inner.as_mut().reset(deadline.into()); } } #[cfg(test)] mod tests { use crate::rt::TokioExecutor; use hyper::rt::Executor; use tokio::sync::oneshot; #[tokio::test] async fn simple_execute() -> Result<(), Box> { let (tx, rx) = oneshot::channel(); let executor = TokioExecutor::new(); executor.execute(async move { tx.send(()).unwrap(); }); rx.await.map_err(Into::into) } } hyper-util-0.1.19/src/server/conn/auto/mod.rs000064400000000000000000001312011046102023000171430ustar 00000000000000//! Http1 or Http2 connection. pub mod upgrade; use hyper::service::HttpService; use std::future::Future; use std::marker::PhantomPinned; use std::mem::MaybeUninit; use std::pin::Pin; use std::task::{Context, Poll}; use std::{error::Error as StdError, io, time::Duration}; use bytes::Bytes; use futures_core::ready; use http::{Request, Response}; use http_body::Body; use hyper::{ body::Incoming, rt::{Read, ReadBuf, Timer, Write}, service::Service, }; #[cfg(feature = "http1")] use hyper::server::conn::http1; #[cfg(feature = "http2")] use hyper::{rt::bounds::Http2ServerConnExec, server::conn::http2}; #[cfg(any(not(feature = "http2"), not(feature = "http1")))] use std::marker::PhantomData; use pin_project_lite::pin_project; use crate::common::rewind::Rewind; type Error = Box; type Result = std::result::Result; const H2_PREFACE: &[u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; /// Exactly equivalent to [`Http2ServerConnExec`]. #[cfg(feature = "http2")] pub trait HttpServerConnExec: Http2ServerConnExec {} #[cfg(feature = "http2")] impl> HttpServerConnExec for T {} /// Exactly equivalent to [`Http2ServerConnExec`]. #[cfg(not(feature = "http2"))] pub trait HttpServerConnExec {} #[cfg(not(feature = "http2"))] impl HttpServerConnExec for T {} /// Http1 or Http2 connection builder. #[derive(Clone, Debug)] pub struct Builder { #[cfg(feature = "http1")] http1: http1::Builder, #[cfg(feature = "http2")] http2: http2::Builder, #[cfg(any(feature = "http1", feature = "http2"))] version: Option, #[cfg(not(feature = "http2"))] _executor: E, } impl Default for Builder { fn default() -> Self { Self::new(E::default()) } } impl Builder { /// Create a new auto connection builder. /// /// `executor` parameter should be a type that implements /// [`Executor`](hyper::rt::Executor) trait. /// /// # Example /// /// ``` /// use hyper_util::{ /// rt::TokioExecutor, /// server::conn::auto, /// }; /// /// auto::Builder::new(TokioExecutor::new()); /// ``` pub fn new(executor: E) -> Self { Self { #[cfg(feature = "http1")] http1: http1::Builder::new(), #[cfg(feature = "http2")] http2: http2::Builder::new(executor), #[cfg(any(feature = "http1", feature = "http2"))] version: None, #[cfg(not(feature = "http2"))] _executor: executor, } } /// Http1 configuration. #[cfg(feature = "http1")] pub fn http1(&mut self) -> Http1Builder<'_, E> { Http1Builder { inner: self } } /// Http2 configuration. #[cfg(feature = "http2")] pub fn http2(&mut self) -> Http2Builder<'_, E> { Http2Builder { inner: self } } /// Only accepts HTTP/2 /// /// Does not do anything if used with [`serve_connection_with_upgrades`] /// /// [`serve_connection_with_upgrades`]: Builder::serve_connection_with_upgrades #[cfg(feature = "http2")] pub fn http2_only(mut self) -> Self { assert!(self.version.is_none()); self.version = Some(Version::H2); self } /// Only accepts HTTP/1 /// /// Does not do anything if used with [`serve_connection_with_upgrades`] /// /// [`serve_connection_with_upgrades`]: Builder::serve_connection_with_upgrades #[cfg(feature = "http1")] pub fn http1_only(mut self) -> Self { assert!(self.version.is_none()); self.version = Some(Version::H1); self } /// Returns `true` if this builder can serve an HTTP/1.1-based connection. pub fn is_http1_available(&self) -> bool { match self.version { #[cfg(feature = "http1")] Some(Version::H1) => true, #[cfg(feature = "http2")] Some(Version::H2) => false, #[cfg(any(feature = "http1", feature = "http2"))] _ => true, } } /// Returns `true` if this builder can serve an HTTP/2-based connection. pub fn is_http2_available(&self) -> bool { match self.version { #[cfg(feature = "http1")] Some(Version::H1) => false, #[cfg(feature = "http2")] Some(Version::H2) => true, #[cfg(any(feature = "http1", feature = "http2"))] _ => true, } } /// Set whether HTTP/1 connections will write header names as title case at /// the socket level. /// /// This setting only affects HTTP/1 connections. HTTP/2 connections are /// not affected by this setting. /// /// Default is false. /// /// # Example /// /// ``` /// use hyper_util::{ /// rt::TokioExecutor, /// server::conn::auto, /// }; /// /// auto::Builder::new(TokioExecutor::new()) /// .title_case_headers(true); /// ``` #[cfg(feature = "http1")] pub fn title_case_headers(mut self, enabled: bool) -> Self { self.http1.title_case_headers(enabled); self } /// Set whether HTTP/1 connections will preserve the original case of header names. /// /// This setting only affects HTTP/1 connections. HTTP/2 connections are /// not affected by this setting. /// /// Default is false. /// /// # Example /// /// ``` /// use hyper_util::{ /// rt::TokioExecutor, /// server::conn::auto, /// }; /// /// auto::Builder::new(TokioExecutor::new()) /// .preserve_header_case(true); /// ``` #[cfg(feature = "http1")] pub fn preserve_header_case(mut self, enabled: bool) -> Self { self.http1.preserve_header_case(enabled); self } /// Bind a connection together with a [`Service`]. pub fn serve_connection(&self, io: I, service: S) -> Connection<'_, I, S, E> where S: Service, Response = Response>, S::Future: 'static, S::Error: Into>, B: Body + 'static, B::Error: Into>, I: Read + Write + Unpin + 'static, E: HttpServerConnExec, { let state = match self.version { #[cfg(feature = "http1")] Some(Version::H1) => { let io = Rewind::new_buffered(io, Bytes::new()); let conn = self.http1.serve_connection(io, service); ConnState::H1 { conn } } #[cfg(feature = "http2")] Some(Version::H2) => { let io = Rewind::new_buffered(io, Bytes::new()); let conn = self.http2.serve_connection(io, service); ConnState::H2 { conn } } #[cfg(any(feature = "http1", feature = "http2"))] _ => ConnState::ReadVersion { read_version: read_version(io), builder: Cow::Borrowed(self), service: Some(service), }, }; Connection { state } } /// Bind a connection together with a [`Service`], with the ability to /// handle HTTP upgrades. This requires that the IO object implements /// `Send`. /// /// Note that if you ever want to use [`hyper::upgrade::Upgraded::downcast`] /// with this crate, you'll need to use [`hyper_util::server::conn::auto::upgrade::downcast`] /// instead. See the documentation of the latter to understand why. /// /// [`hyper_util::server::conn::auto::upgrade::downcast`]: crate::server::conn::auto::upgrade::downcast pub fn serve_connection_with_upgrades( &self, io: I, service: S, ) -> UpgradeableConnection<'_, I, S, E> where S: Service, Response = Response>, S::Future: 'static, S::Error: Into>, B: Body + 'static, B::Error: Into>, I: Read + Write + Unpin + Send + 'static, E: HttpServerConnExec, { UpgradeableConnection { state: UpgradeableConnState::ReadVersion { read_version: read_version(io), builder: Cow::Borrowed(self), service: Some(service), }, } } } #[derive(Copy, Clone, Debug)] enum Version { H1, H2, } impl Version { #[must_use] #[cfg(any(not(feature = "http2"), not(feature = "http1")))] pub fn unsupported(self) -> Error { match self { Version::H1 => Error::from("HTTP/1 is not supported"), Version::H2 => Error::from("HTTP/2 is not supported"), } } } fn read_version(io: I) -> ReadVersion where I: Read + Unpin, { ReadVersion { io: Some(io), buf: [MaybeUninit::uninit(); 24], filled: 0, version: Version::H2, cancelled: false, _pin: PhantomPinned, } } pin_project! { struct ReadVersion { io: Option, buf: [MaybeUninit; 24], // the amount of `buf` thats been filled filled: usize, version: Version, cancelled: bool, // Make this future `!Unpin` for compatibility with async trait methods. #[pin] _pin: PhantomPinned, } } impl ReadVersion { pub fn cancel(self: Pin<&mut Self>) { *self.project().cancelled = true; } } impl Future for ReadVersion where I: Read + Unpin, { type Output = io::Result<(Version, Rewind)>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.project(); if *this.cancelled { return Poll::Ready(Err(io::Error::new(io::ErrorKind::Interrupted, "Cancelled"))); } let mut buf = ReadBuf::uninit(&mut *this.buf); // SAFETY: `this.filled` tracks how many bytes have been read (and thus initialized) and // we're only advancing by that many. unsafe { buf.unfilled().advance(*this.filled); }; // We start as H2 and switch to H1 as soon as we don't have the preface. while buf.filled().len() < H2_PREFACE.len() { let len = buf.filled().len(); ready!(Pin::new(this.io.as_mut().unwrap()).poll_read(cx, buf.unfilled()))?; *this.filled = buf.filled().len(); // We starts as H2 and switch to H1 when we don't get the preface. if buf.filled().len() == len || buf.filled()[len..] != H2_PREFACE[len..buf.filled().len()] { *this.version = Version::H1; break; } } let io = this.io.take().unwrap(); let buf = buf.filled().to_vec(); Poll::Ready(Ok(( *this.version, Rewind::new_buffered(io, Bytes::from(buf)), ))) } } pin_project! { /// A [`Future`](core::future::Future) representing an HTTP/1 connection, returned from /// [`Builder::serve_connection`](struct.Builder.html#method.serve_connection). /// /// To drive HTTP on this connection this future **must be polled**, typically with /// `.await`. If it isn't polled, no progress will be made on this connection. #[must_use = "futures do nothing unless polled"] pub struct Connection<'a, I, S, E> where S: HttpService, { #[pin] state: ConnState<'a, I, S, E>, } } // A custom COW, since the libstd is has ToOwned bounds that are too eager. enum Cow<'a, T> { Borrowed(&'a T), Owned(T), } impl std::ops::Deref for Cow<'_, T> { type Target = T; fn deref(&self) -> &T { match self { Cow::Borrowed(t) => &*t, Cow::Owned(ref t) => t, } } } #[cfg(feature = "http1")] type Http1Connection = hyper::server::conn::http1::Connection, S>; #[cfg(not(feature = "http1"))] type Http1Connection = (PhantomData, PhantomData); #[cfg(feature = "http2")] type Http2Connection = hyper::server::conn::http2::Connection, S, E>; #[cfg(not(feature = "http2"))] type Http2Connection = (PhantomData, PhantomData, PhantomData); pin_project! { #[project = ConnStateProj] enum ConnState<'a, I, S, E> where S: HttpService, { ReadVersion { #[pin] read_version: ReadVersion, builder: Cow<'a, Builder>, service: Option, }, H1 { #[pin] conn: Http1Connection, }, H2 { #[pin] conn: Http2Connection, }, } } impl Connection<'_, I, S, E> where S: HttpService, S::Error: Into>, I: Read + Write + Unpin, B: Body + 'static, B::Error: Into>, E: HttpServerConnExec, { /// Start a graceful shutdown process for this connection. /// /// This `Connection` should continue to be polled until shutdown can finish. /// /// # Note /// /// This should only be called while the `Connection` future is still pending. If called after /// `Connection::poll` has resolved, this does nothing. pub fn graceful_shutdown(self: Pin<&mut Self>) { match self.project().state.project() { ConnStateProj::ReadVersion { read_version, .. } => read_version.cancel(), #[cfg(feature = "http1")] ConnStateProj::H1 { conn } => conn.graceful_shutdown(), #[cfg(feature = "http2")] ConnStateProj::H2 { conn } => conn.graceful_shutdown(), #[cfg(any(not(feature = "http1"), not(feature = "http2")))] _ => unreachable!(), } } /// Make this Connection static, instead of borrowing from Builder. pub fn into_owned(self) -> Connection<'static, I, S, E> where Builder: Clone, { Connection { state: match self.state { ConnState::ReadVersion { read_version, builder, service, } => ConnState::ReadVersion { read_version, service, builder: Cow::Owned(builder.clone()), }, #[cfg(feature = "http1")] ConnState::H1 { conn } => ConnState::H1 { conn }, #[cfg(feature = "http2")] ConnState::H2 { conn } => ConnState::H2 { conn }, #[cfg(any(not(feature = "http1"), not(feature = "http2")))] _ => unreachable!(), }, } } } impl Future for Connection<'_, I, S, E> where S: Service, Response = Response>, S::Future: 'static, S::Error: Into>, B: Body + 'static, B::Error: Into>, I: Read + Write + Unpin + 'static, E: HttpServerConnExec, { type Output = Result<()>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { loop { let mut this = self.as_mut().project(); match this.state.as_mut().project() { ConnStateProj::ReadVersion { read_version, builder, service, } => { let (version, io) = ready!(read_version.poll(cx))?; let service = service.take().unwrap(); match version { #[cfg(feature = "http1")] Version::H1 => { let conn = builder.http1.serve_connection(io, service); this.state.set(ConnState::H1 { conn }); } #[cfg(feature = "http2")] Version::H2 => { let conn = builder.http2.serve_connection(io, service); this.state.set(ConnState::H2 { conn }); } #[cfg(any(not(feature = "http1"), not(feature = "http2")))] _ => return Poll::Ready(Err(version.unsupported())), } } #[cfg(feature = "http1")] ConnStateProj::H1 { conn } => { return conn.poll(cx).map_err(Into::into); } #[cfg(feature = "http2")] ConnStateProj::H2 { conn } => { return conn.poll(cx).map_err(Into::into); } #[cfg(any(not(feature = "http1"), not(feature = "http2")))] _ => unreachable!(), } } } } pin_project! { /// An upgradable [`Connection`], returned by /// [`Builder::serve_upgradable_connection`](struct.Builder.html#method.serve_connection_with_upgrades). /// /// To drive HTTP on this connection this future **must be polled**, typically with /// `.await`. If it isn't polled, no progress will be made on this connection. #[must_use = "futures do nothing unless polled"] pub struct UpgradeableConnection<'a, I, S, E> where S: HttpService, { #[pin] state: UpgradeableConnState<'a, I, S, E>, } } #[cfg(feature = "http1")] type Http1UpgradeableConnection = hyper::server::conn::http1::UpgradeableConnection; #[cfg(not(feature = "http1"))] type Http1UpgradeableConnection = (PhantomData, PhantomData); pin_project! { #[project = UpgradeableConnStateProj] enum UpgradeableConnState<'a, I, S, E> where S: HttpService, { ReadVersion { #[pin] read_version: ReadVersion, builder: Cow<'a, Builder>, service: Option, }, H1 { #[pin] conn: Http1UpgradeableConnection, S>, }, H2 { #[pin] conn: Http2Connection, }, } } impl UpgradeableConnection<'_, I, S, E> where S: HttpService, S::Error: Into>, I: Read + Write + Unpin, B: Body + 'static, B::Error: Into>, E: HttpServerConnExec, { /// Start a graceful shutdown process for this connection. /// /// This `UpgradeableConnection` should continue to be polled until shutdown can finish. /// /// # Note /// /// This should only be called while the `Connection` future is still nothing. pending. If /// called after `UpgradeableConnection::poll` has resolved, this does nothing. pub fn graceful_shutdown(self: Pin<&mut Self>) { match self.project().state.project() { UpgradeableConnStateProj::ReadVersion { read_version, .. } => read_version.cancel(), #[cfg(feature = "http1")] UpgradeableConnStateProj::H1 { conn } => conn.graceful_shutdown(), #[cfg(feature = "http2")] UpgradeableConnStateProj::H2 { conn } => conn.graceful_shutdown(), #[cfg(any(not(feature = "http1"), not(feature = "http2")))] _ => unreachable!(), } } /// Make this Connection static, instead of borrowing from Builder. pub fn into_owned(self) -> UpgradeableConnection<'static, I, S, E> where Builder: Clone, { UpgradeableConnection { state: match self.state { UpgradeableConnState::ReadVersion { read_version, builder, service, } => UpgradeableConnState::ReadVersion { read_version, service, builder: Cow::Owned(builder.clone()), }, #[cfg(feature = "http1")] UpgradeableConnState::H1 { conn } => UpgradeableConnState::H1 { conn }, #[cfg(feature = "http2")] UpgradeableConnState::H2 { conn } => UpgradeableConnState::H2 { conn }, #[cfg(any(not(feature = "http1"), not(feature = "http2")))] _ => unreachable!(), }, } } } impl Future for UpgradeableConnection<'_, I, S, E> where S: Service, Response = Response>, S::Future: 'static, S::Error: Into>, B: Body + 'static, B::Error: Into>, I: Read + Write + Unpin + Send + 'static, E: HttpServerConnExec, { type Output = Result<()>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { loop { let mut this = self.as_mut().project(); match this.state.as_mut().project() { UpgradeableConnStateProj::ReadVersion { read_version, builder, service, } => { let (version, io) = ready!(read_version.poll(cx))?; let service = service.take().unwrap(); match version { #[cfg(feature = "http1")] Version::H1 => { let conn = builder.http1.serve_connection(io, service).with_upgrades(); this.state.set(UpgradeableConnState::H1 { conn }); } #[cfg(feature = "http2")] Version::H2 => { let conn = builder.http2.serve_connection(io, service); this.state.set(UpgradeableConnState::H2 { conn }); } #[cfg(any(not(feature = "http1"), not(feature = "http2")))] _ => return Poll::Ready(Err(version.unsupported())), } } #[cfg(feature = "http1")] UpgradeableConnStateProj::H1 { conn } => { return conn.poll(cx).map_err(Into::into); } #[cfg(feature = "http2")] UpgradeableConnStateProj::H2 { conn } => { return conn.poll(cx).map_err(Into::into); } #[cfg(any(not(feature = "http1"), not(feature = "http2")))] _ => unreachable!(), } } } } /// Http1 part of builder. #[cfg(feature = "http1")] pub struct Http1Builder<'a, E> { inner: &'a mut Builder, } #[cfg(feature = "http1")] impl Http1Builder<'_, E> { /// Http2 configuration. #[cfg(feature = "http2")] pub fn http2(&mut self) -> Http2Builder<'_, E> { Http2Builder { inner: self.inner } } /// Set whether the `date` header should be included in HTTP responses. /// /// Note that including the `date` header is recommended by RFC 7231. /// /// Default is true. pub fn auto_date_header(&mut self, enabled: bool) -> &mut Self { self.inner.http1.auto_date_header(enabled); self } /// Set whether HTTP/1 connections should support half-closures. /// /// Clients can chose to shutdown their write-side while waiting /// for the server to respond. Setting this to `true` will /// prevent closing the connection immediately if `read` /// detects an EOF in the middle of a request. /// /// Default is `false`. pub fn half_close(&mut self, val: bool) -> &mut Self { self.inner.http1.half_close(val); self } /// Enables or disables HTTP/1 keep-alive. /// /// Default is true. pub fn keep_alive(&mut self, val: bool) -> &mut Self { self.inner.http1.keep_alive(val); self } /// Set whether HTTP/1 connections will write header names as title case at /// the socket level. /// /// Note that this setting does not affect HTTP/2. /// /// Default is false. pub fn title_case_headers(&mut self, enabled: bool) -> &mut Self { self.inner.http1.title_case_headers(enabled); self } /// Set whether HTTP/1 connections will silently ignored malformed header lines. /// /// If this is enabled and a header line does not start with a valid header /// name, or does not include a colon at all, the line will be silently ignored /// and no error will be reported. /// /// Default is false. pub fn ignore_invalid_headers(&mut self, enabled: bool) -> &mut Self { self.inner.http1.ignore_invalid_headers(enabled); self } /// Set whether to support preserving original header cases. /// /// Currently, this will record the original cases received, and store them /// in a private extension on the `Request`. It will also look for and use /// such an extension in any provided `Response`. /// /// Since the relevant extension is still private, there is no way to /// interact with the original cases. The only effect this can have now is /// to forward the cases in a proxy-like fashion. /// /// Note that this setting does not affect HTTP/2. /// /// Default is false. pub fn preserve_header_case(&mut self, enabled: bool) -> &mut Self { self.inner.http1.preserve_header_case(enabled); self } /// Set the maximum number of headers. /// /// When a request is received, the parser will reserve a buffer to store headers for optimal /// performance. /// /// If server receives more headers than the buffer size, it responds to the client with /// "431 Request Header Fields Too Large". /// /// The headers is allocated on the stack by default, which has higher performance. After /// setting this value, headers will be allocated in heap memory, that is, heap memory /// allocation will occur for each request, and there will be a performance drop of about 5%. /// /// Note that this setting does not affect HTTP/2. /// /// Default is 100. pub fn max_headers(&mut self, val: usize) -> &mut Self { self.inner.http1.max_headers(val); self } /// Set a timeout for reading client request headers. If a client does not /// transmit the entire header within this time, the connection is closed. /// /// Requires a [`Timer`] set by [`Http1Builder::timer`] to take effect. Panics if `header_read_timeout` is configured /// without a [`Timer`]. /// /// Pass `None` to disable. /// /// Default is currently 30 seconds, but do not depend on that. pub fn header_read_timeout(&mut self, read_timeout: impl Into>) -> &mut Self { self.inner.http1.header_read_timeout(read_timeout); self } /// Set whether HTTP/1 connections should try to use vectored writes, /// or always flatten into a single buffer. /// /// Note that setting this to false may mean more copies of body data, /// but may also improve performance when an IO transport doesn't /// support vectored writes well, such as most TLS implementations. /// /// Setting this to true will force hyper to use queued strategy /// which may eliminate unnecessary cloning on some TLS backends /// /// Default is `auto`. In this mode hyper will try to guess which /// mode to use pub fn writev(&mut self, val: bool) -> &mut Self { self.inner.http1.writev(val); self } /// Set the maximum buffer size for the connection. /// /// Default is ~400kb. /// /// # Panics /// /// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum. pub fn max_buf_size(&mut self, max: usize) -> &mut Self { self.inner.http1.max_buf_size(max); self } /// Aggregates flushes to better support pipelined responses. /// /// Experimental, may have bugs. /// /// Default is false. pub fn pipeline_flush(&mut self, enabled: bool) -> &mut Self { self.inner.http1.pipeline_flush(enabled); self } /// Set the timer used in background tasks. pub fn timer(&mut self, timer: M) -> &mut Self where M: Timer + Send + Sync + 'static, { self.inner.http1.timer(timer); self } /// Bind a connection together with a [`Service`]. #[cfg(feature = "http2")] pub async fn serve_connection(&self, io: I, service: S) -> Result<()> where S: Service, Response = Response>, S::Future: 'static, S::Error: Into>, B: Body + 'static, B::Error: Into>, I: Read + Write + Unpin + 'static, E: HttpServerConnExec, { self.inner.serve_connection(io, service).await } /// Bind a connection together with a [`Service`]. #[cfg(not(feature = "http2"))] pub async fn serve_connection(&self, io: I, service: S) -> Result<()> where S: Service, Response = Response>, S::Future: 'static, S::Error: Into>, B: Body + 'static, B::Error: Into>, I: Read + Write + Unpin + 'static, { self.inner.serve_connection(io, service).await } /// Bind a connection together with a [`Service`], with the ability to /// handle HTTP upgrades. This requires that the IO object implements /// `Send`. #[cfg(feature = "http2")] pub fn serve_connection_with_upgrades( &self, io: I, service: S, ) -> UpgradeableConnection<'_, I, S, E> where S: Service, Response = Response>, S::Future: 'static, S::Error: Into>, B: Body + 'static, B::Error: Into>, I: Read + Write + Unpin + Send + 'static, E: HttpServerConnExec, { self.inner.serve_connection_with_upgrades(io, service) } } /// Http2 part of builder. #[cfg(feature = "http2")] pub struct Http2Builder<'a, E> { inner: &'a mut Builder, } #[cfg(feature = "http2")] impl Http2Builder<'_, E> { #[cfg(feature = "http1")] /// Http1 configuration. pub fn http1(&mut self) -> Http1Builder<'_, E> { Http1Builder { inner: self.inner } } /// Configures the maximum number of pending reset streams allowed before a GOAWAY will be sent. /// /// This will default to the default value set by the [`h2` crate](https://crates.io/crates/h2). /// As of v0.4.0, it is 20. /// /// See for more information. pub fn max_pending_accept_reset_streams(&mut self, max: impl Into>) -> &mut Self { self.inner.http2.max_pending_accept_reset_streams(max); self } /// Configures the maximum number of local reset streams allowed before a GOAWAY will be sent. /// /// If not set, hyper will use a default, currently of 1024. /// /// If `None` is supplied, hyper will not apply any limit. /// This is not advised, as it can potentially expose servers to DOS vulnerabilities. /// /// See for more information. pub fn max_local_error_reset_streams(&mut self, max: impl Into>) -> &mut Self { self.inner.http2.max_local_error_reset_streams(max); self } /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2 /// stream-level flow control. /// /// Passing `None` will do nothing. /// /// If not set, hyper will use a default. /// /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE pub fn initial_stream_window_size(&mut self, sz: impl Into>) -> &mut Self { self.inner.http2.initial_stream_window_size(sz); self } /// Sets the max connection-level flow control for HTTP2. /// /// Passing `None` will do nothing. /// /// If not set, hyper will use a default. pub fn initial_connection_window_size(&mut self, sz: impl Into>) -> &mut Self { self.inner.http2.initial_connection_window_size(sz); self } /// Sets whether to use an adaptive flow control. /// /// Enabling this will override the limits set in /// `http2_initial_stream_window_size` and /// `http2_initial_connection_window_size`. pub fn adaptive_window(&mut self, enabled: bool) -> &mut Self { self.inner.http2.adaptive_window(enabled); self } /// Sets the maximum frame size to use for HTTP2. /// /// Passing `None` will do nothing. /// /// If not set, hyper will use a default. pub fn max_frame_size(&mut self, sz: impl Into>) -> &mut Self { self.inner.http2.max_frame_size(sz); self } /// Sets the [`SETTINGS_MAX_CONCURRENT_STREAMS`][spec] option for HTTP2 /// connections. /// /// Default is 200. Passing `None` will remove any limit. /// /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_MAX_CONCURRENT_STREAMS pub fn max_concurrent_streams(&mut self, max: impl Into>) -> &mut Self { self.inner.http2.max_concurrent_streams(max); self } /// Sets an interval for HTTP2 Ping frames should be sent to keep a /// connection alive. /// /// Pass `None` to disable HTTP2 keep-alive. /// /// Default is currently disabled. /// /// # Cargo Feature /// pub fn keep_alive_interval(&mut self, interval: impl Into>) -> &mut Self { self.inner.http2.keep_alive_interval(interval); self } /// Sets a timeout for receiving an acknowledgement of the keep-alive ping. /// /// If the ping is not acknowledged within the timeout, the connection will /// be closed. Does nothing if `http2_keep_alive_interval` is disabled. /// /// Default is 20 seconds. /// /// # Cargo Feature /// pub fn keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self { self.inner.http2.keep_alive_timeout(timeout); self } /// Set the maximum write buffer size for each HTTP/2 stream. /// /// Default is currently ~400KB, but may change. /// /// # Panics /// /// The value must be no larger than `u32::MAX`. pub fn max_send_buf_size(&mut self, max: usize) -> &mut Self { self.inner.http2.max_send_buf_size(max); self } /// Enables the [extended CONNECT protocol]. /// /// [extended CONNECT protocol]: https://datatracker.ietf.org/doc/html/rfc8441#section-4 pub fn enable_connect_protocol(&mut self) -> &mut Self { self.inner.http2.enable_connect_protocol(); self } /// Sets the max size of received header frames. /// /// Default is currently ~16MB, but may change. pub fn max_header_list_size(&mut self, max: u32) -> &mut Self { self.inner.http2.max_header_list_size(max); self } /// Set the timer used in background tasks. pub fn timer(&mut self, timer: M) -> &mut Self where M: Timer + Send + Sync + 'static, { self.inner.http2.timer(timer); self } /// Set whether the `date` header should be included in HTTP responses. /// /// Note that including the `date` header is recommended by RFC 7231. /// /// Default is true. pub fn auto_date_header(&mut self, enabled: bool) -> &mut Self { self.inner.http2.auto_date_header(enabled); self } /// Bind a connection together with a [`Service`]. pub async fn serve_connection(&self, io: I, service: S) -> Result<()> where S: Service, Response = Response>, S::Future: 'static, S::Error: Into>, B: Body + 'static, B::Error: Into>, I: Read + Write + Unpin + 'static, E: HttpServerConnExec, { self.inner.serve_connection(io, service).await } /// Bind a connection together with a [`Service`], with the ability to /// handle HTTP upgrades. This requires that the IO object implements /// `Send`. pub fn serve_connection_with_upgrades( &self, io: I, service: S, ) -> UpgradeableConnection<'_, I, S, E> where S: Service, Response = Response>, S::Future: 'static, S::Error: Into>, B: Body + 'static, B::Error: Into>, I: Read + Write + Unpin + Send + 'static, E: HttpServerConnExec, { self.inner.serve_connection_with_upgrades(io, service) } } #[cfg(test)] mod tests { use crate::{ rt::{TokioExecutor, TokioIo}, server::conn::auto, }; use http::{Request, Response}; use http_body::Body; use http_body_util::{BodyExt, Empty, Full}; use hyper::{body, body::Bytes, client, service::service_fn}; use std::{convert::Infallible, error::Error as StdError, net::SocketAddr, time::Duration}; use tokio::{ net::{TcpListener, TcpStream}, pin, }; const BODY: &[u8] = b"Hello, world!"; #[test] fn configuration() { // One liner. auto::Builder::new(TokioExecutor::new()) .http1() .keep_alive(true) .http2() .keep_alive_interval(None); // .serve_connection(io, service); // Using variable. let mut builder = auto::Builder::new(TokioExecutor::new()); builder.http1().keep_alive(true); builder.http2().keep_alive_interval(None); // builder.serve_connection(io, service); } #[test] #[cfg(feature = "http1")] fn title_case_headers_configuration() { // Test title_case_headers can be set on the main builder auto::Builder::new(TokioExecutor::new()).title_case_headers(true); // Can be combined with other configuration auto::Builder::new(TokioExecutor::new()) .title_case_headers(true) .http1_only(); } #[test] #[cfg(feature = "http1")] fn preserve_header_case_configuration() { // Test preserve_header_case can be set on the main builder auto::Builder::new(TokioExecutor::new()).preserve_header_case(true); // Can be combined with other configuration auto::Builder::new(TokioExecutor::new()) .preserve_header_case(true) .http1_only(); } #[cfg(not(miri))] #[tokio::test] async fn http1() { let addr = start_server(false, false).await; let mut sender = connect_h1(addr).await; let response = sender .send_request(Request::new(Empty::::new())) .await .unwrap(); let body = response.into_body().collect().await.unwrap().to_bytes(); assert_eq!(body, BODY); } #[cfg(not(miri))] #[tokio::test] async fn http2() { let addr = start_server(false, false).await; let mut sender = connect_h2(addr).await; let response = sender .send_request(Request::new(Empty::::new())) .await .unwrap(); let body = response.into_body().collect().await.unwrap().to_bytes(); assert_eq!(body, BODY); } #[cfg(not(miri))] #[tokio::test] async fn http2_only() { let addr = start_server(false, true).await; let mut sender = connect_h2(addr).await; let response = sender .send_request(Request::new(Empty::::new())) .await .unwrap(); let body = response.into_body().collect().await.unwrap().to_bytes(); assert_eq!(body, BODY); } #[cfg(not(miri))] #[tokio::test] async fn http2_only_fail_if_client_is_http1() { let addr = start_server(false, true).await; let mut sender = connect_h1(addr).await; let _ = sender .send_request(Request::new(Empty::::new())) .await .expect_err("should fail"); } #[cfg(not(miri))] #[tokio::test] async fn http1_only() { let addr = start_server(true, false).await; let mut sender = connect_h1(addr).await; let response = sender .send_request(Request::new(Empty::::new())) .await .unwrap(); let body = response.into_body().collect().await.unwrap().to_bytes(); assert_eq!(body, BODY); } #[cfg(not(miri))] #[tokio::test] async fn http1_only_fail_if_client_is_http2() { let addr = start_server(true, false).await; let mut sender = connect_h2(addr).await; let _ = sender .send_request(Request::new(Empty::::new())) .await .expect_err("should fail"); } #[cfg(not(miri))] #[tokio::test] async fn graceful_shutdown() { let listener = TcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0))) .await .unwrap(); let listener_addr = listener.local_addr().unwrap(); // Spawn the task in background so that we can connect there let listen_task = tokio::spawn(async move { listener.accept().await.unwrap() }); // Only connect a stream, do not send headers or anything let _stream = TcpStream::connect(listener_addr).await.unwrap(); let (stream, _) = listen_task.await.unwrap(); let stream = TokioIo::new(stream); let builder = auto::Builder::new(TokioExecutor::new()); let connection = builder.serve_connection(stream, service_fn(hello)); pin!(connection); connection.as_mut().graceful_shutdown(); let connection_error = tokio::time::timeout(Duration::from_millis(200), connection) .await .expect("Connection should have finished in a timely manner after graceful shutdown.") .expect_err("Connection should have been interrupted."); let connection_error = connection_error .downcast_ref::() .expect("The error should have been `std::io::Error`."); assert_eq!(connection_error.kind(), std::io::ErrorKind::Interrupted); } async fn connect_h1(addr: SocketAddr) -> client::conn::http1::SendRequest where B: Body + Send + 'static, B::Data: Send, B::Error: Into>, { let stream = TokioIo::new(TcpStream::connect(addr).await.unwrap()); let (sender, connection) = client::conn::http1::handshake(stream).await.unwrap(); tokio::spawn(connection); sender } async fn connect_h2(addr: SocketAddr) -> client::conn::http2::SendRequest where B: Body + Unpin + Send + 'static, B::Data: Send, B::Error: Into>, { let stream = TokioIo::new(TcpStream::connect(addr).await.unwrap()); let (sender, connection) = client::conn::http2::Builder::new(TokioExecutor::new()) .handshake(stream) .await .unwrap(); tokio::spawn(connection); sender } async fn start_server(h1_only: bool, h2_only: bool) -> SocketAddr { let addr: SocketAddr = ([127, 0, 0, 1], 0).into(); let listener = TcpListener::bind(addr).await.unwrap(); let local_addr = listener.local_addr().unwrap(); tokio::spawn(async move { loop { let (stream, _) = listener.accept().await.unwrap(); let stream = TokioIo::new(stream); tokio::task::spawn(async move { let mut builder = auto::Builder::new(TokioExecutor::new()); if h1_only { builder = builder.http1_only(); builder.serve_connection(stream, service_fn(hello)).await } else if h2_only { builder = builder.http2_only(); builder.serve_connection(stream, service_fn(hello)).await } else { builder .http2() .max_header_list_size(4096) .serve_connection_with_upgrades(stream, service_fn(hello)) .await } .unwrap(); }); } }); local_addr } async fn hello(_req: Request) -> Result>, Infallible> { Ok(Response::new(Full::new(Bytes::from(BODY)))) } } hyper-util-0.1.19/src/server/conn/auto/upgrade.rs000064400000000000000000000037351046102023000200250ustar 00000000000000//! Upgrade utilities. use bytes::{Bytes, BytesMut}; use hyper::{ rt::{Read, Write}, upgrade::Upgraded, }; use crate::common::rewind::Rewind; /// Tries to downcast the internal trait object to the type passed. /// /// On success, returns the downcasted parts. On error, returns the Upgraded back. /// This is a kludge to work around the fact that the machinery provided by /// [`hyper_util::server::conn::auto`] wraps the inner `T` with a private type /// that is not reachable from outside the crate. /// /// [`hyper_util::server::conn::auto`]: crate::server::conn::auto /// /// This kludge will be removed when this machinery is added back to the main /// `hyper` code. pub fn downcast(upgraded: Upgraded) -> Result, Upgraded> where T: Read + Write + Unpin + 'static, { let hyper::upgrade::Parts { io: rewind, mut read_buf, .. } = upgraded.downcast::>()?; if let Some(pre) = rewind.pre { read_buf = if read_buf.is_empty() { pre } else { let mut buf = BytesMut::from(read_buf); buf.extend_from_slice(&pre); buf.freeze() }; } Ok(Parts { io: rewind.inner, read_buf, }) } /// The deconstructed parts of an [`Upgraded`] type. /// /// Includes the original IO type, and a read buffer of bytes that the /// HTTP state machine may have already read before completing an upgrade. #[derive(Debug)] #[non_exhaustive] pub struct Parts { /// The original IO object used before the upgrade. pub io: T, /// A buffer of bytes that have been read but not processed as HTTP. /// /// For instance, if the `Connection` is used for an HTTP upgrade request, /// it is possible the server sent back the first bytes of the new protocol /// along with the response upgrade. /// /// You will want to check for any existing bytes if you plan to continue /// communicating on the IO object. pub read_buf: Bytes, } hyper-util-0.1.19/src/server/conn/mod.rs000064400000000000000000000001331046102023000161720ustar 00000000000000//! Connection utilities. #[cfg(any(feature = "http1", feature = "http2"))] pub mod auto; hyper-util-0.1.19/src/server/graceful.rs000064400000000000000000000371401046102023000162560ustar 00000000000000//! Utility to gracefully shutdown a server. //! //! This module provides a [`GracefulShutdown`] type, //! which can be used to gracefully shutdown a server. //! //! See //! for an example of how to use this. use std::{ fmt::{self, Debug}, future::Future, pin::Pin, task::{self, Poll}, }; use pin_project_lite::pin_project; use tokio::sync::watch; /// A graceful shutdown utility // Purposefully not `Clone`, see `watcher()` method for why. pub struct GracefulShutdown { tx: watch::Sender<()>, } /// A watcher side of the graceful shutdown. /// /// This type can only watch a connection, it cannot trigger a shutdown. /// /// Call [`GracefulShutdown::watcher()`] to construct one of these. pub struct Watcher { rx: watch::Receiver<()>, } impl GracefulShutdown { /// Create a new graceful shutdown helper. pub fn new() -> Self { let (tx, _) = watch::channel(()); Self { tx } } /// Wrap a future for graceful shutdown watching. pub fn watch(&self, conn: C) -> impl Future { self.watcher().watch(conn) } /// Create an owned type that can watch a connection. /// /// This method allows created an owned type that can be sent onto another /// task before calling [`Watcher::watch()`]. // Internal: this function exists because `Clone` allows footguns. // If the `tx` were cloned (or the `rx`), race conditions can happens where // one task starting a shutdown is scheduled and interwined with a task // starting to watch a connection, and the "watch version" is one behind. pub fn watcher(&self) -> Watcher { let rx = self.tx.subscribe(); Watcher { rx } } /// Signal shutdown for all watched connections. /// /// This returns a `Future` which will complete once all watched /// connections have shutdown. pub async fn shutdown(self) { let Self { tx } = self; // signal all the watched futures about the change let _ = tx.send(()); // and then wait for all of them to complete tx.closed().await; } /// Returns the number of the watching connections. pub fn count(&self) -> usize { self.tx.receiver_count() } } impl Debug for GracefulShutdown { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("GracefulShutdown").finish() } } impl Default for GracefulShutdown { fn default() -> Self { Self::new() } } impl Watcher { /// Wrap a future for graceful shutdown watching. pub fn watch(self, conn: C) -> impl Future { let Watcher { mut rx } = self; GracefulConnectionFuture::new(conn, async move { let _ = rx.changed().await; // hold onto the rx until the watched future is completed rx }) } } impl Debug for Watcher { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("GracefulWatcher").finish() } } pin_project! { struct GracefulConnectionFuture { #[pin] conn: C, #[pin] cancel: F, #[pin] // If cancelled, this is held until the inner conn is done. cancelled_guard: Option, } } impl GracefulConnectionFuture { fn new(conn: C, cancel: F) -> Self { Self { conn, cancel, cancelled_guard: None, } } } impl Debug for GracefulConnectionFuture { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("GracefulConnectionFuture").finish() } } impl Future for GracefulConnectionFuture where C: GracefulConnection, F: Future, { type Output = C::Output; fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { let mut this = self.project(); if this.cancelled_guard.is_none() { if let Poll::Ready(guard) = this.cancel.poll(cx) { this.cancelled_guard.set(Some(guard)); this.conn.as_mut().graceful_shutdown(); } } this.conn.poll(cx) } } /// An internal utility trait as an umbrella target for all (hyper) connection /// types that the [`GracefulShutdown`] can watch. pub trait GracefulConnection: Future> + private::Sealed { /// The error type returned by the connection when used as a future. type Error; /// Start a graceful shutdown process for this connection. fn graceful_shutdown(self: Pin<&mut Self>); } #[cfg(feature = "http1")] impl GracefulConnection for hyper::server::conn::http1::Connection where S: hyper::service::HttpService, S::Error: Into>, I: hyper::rt::Read + hyper::rt::Write + Unpin + 'static, B: hyper::body::Body + 'static, B::Error: Into>, { type Error = hyper::Error; fn graceful_shutdown(self: Pin<&mut Self>) { hyper::server::conn::http1::Connection::graceful_shutdown(self); } } #[cfg(feature = "http2")] impl GracefulConnection for hyper::server::conn::http2::Connection where S: hyper::service::HttpService, S::Error: Into>, I: hyper::rt::Read + hyper::rt::Write + Unpin + 'static, B: hyper::body::Body + 'static, B::Error: Into>, E: hyper::rt::bounds::Http2ServerConnExec, { type Error = hyper::Error; fn graceful_shutdown(self: Pin<&mut Self>) { hyper::server::conn::http2::Connection::graceful_shutdown(self); } } #[cfg(feature = "server-auto")] impl GracefulConnection for crate::server::conn::auto::Connection<'_, I, S, E> where S: hyper::service::Service, Response = http::Response>, S::Error: Into>, S::Future: 'static, I: hyper::rt::Read + hyper::rt::Write + Unpin + 'static, B: hyper::body::Body + 'static, B::Error: Into>, E: hyper::rt::bounds::Http2ServerConnExec, { type Error = Box; fn graceful_shutdown(self: Pin<&mut Self>) { crate::server::conn::auto::Connection::graceful_shutdown(self); } } #[cfg(feature = "server-auto")] impl GracefulConnection for crate::server::conn::auto::UpgradeableConnection<'_, I, S, E> where S: hyper::service::Service, Response = http::Response>, S::Error: Into>, S::Future: 'static, I: hyper::rt::Read + hyper::rt::Write + Unpin + Send + 'static, B: hyper::body::Body + 'static, B::Error: Into>, E: hyper::rt::bounds::Http2ServerConnExec, { type Error = Box; fn graceful_shutdown(self: Pin<&mut Self>) { crate::server::conn::auto::UpgradeableConnection::graceful_shutdown(self); } } mod private { pub trait Sealed {} #[cfg(feature = "http1")] impl Sealed for hyper::server::conn::http1::Connection where S: hyper::service::HttpService, S::Error: Into>, I: hyper::rt::Read + hyper::rt::Write + Unpin + 'static, B: hyper::body::Body + 'static, B::Error: Into>, { } #[cfg(feature = "http1")] impl Sealed for hyper::server::conn::http1::UpgradeableConnection where S: hyper::service::HttpService, S::Error: Into>, I: hyper::rt::Read + hyper::rt::Write + Unpin + 'static, B: hyper::body::Body + 'static, B::Error: Into>, { } #[cfg(feature = "http2")] impl Sealed for hyper::server::conn::http2::Connection where S: hyper::service::HttpService, S::Error: Into>, I: hyper::rt::Read + hyper::rt::Write + Unpin + 'static, B: hyper::body::Body + 'static, B::Error: Into>, E: hyper::rt::bounds::Http2ServerConnExec, { } #[cfg(feature = "server-auto")] impl Sealed for crate::server::conn::auto::Connection<'_, I, S, E> where S: hyper::service::Service< http::Request, Response = http::Response, >, S::Error: Into>, S::Future: 'static, I: hyper::rt::Read + hyper::rt::Write + Unpin + 'static, B: hyper::body::Body + 'static, B::Error: Into>, E: hyper::rt::bounds::Http2ServerConnExec, { } #[cfg(feature = "server-auto")] impl Sealed for crate::server::conn::auto::UpgradeableConnection<'_, I, S, E> where S: hyper::service::Service< http::Request, Response = http::Response, >, S::Error: Into>, S::Future: 'static, I: hyper::rt::Read + hyper::rt::Write + Unpin + Send + 'static, B: hyper::body::Body + 'static, B::Error: Into>, E: hyper::rt::bounds::Http2ServerConnExec, { } } #[cfg(test)] mod test { use super::*; use pin_project_lite::pin_project; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; pin_project! { #[derive(Debug)] struct DummyConnection { #[pin] future: F, shutdown_counter: Arc, } } impl private::Sealed for DummyConnection {} impl GracefulConnection for DummyConnection { type Error = (); fn graceful_shutdown(self: Pin<&mut Self>) { self.shutdown_counter.fetch_add(1, Ordering::SeqCst); } } impl Future for DummyConnection { type Output = Result<(), ()>; fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { match self.project().future.poll(cx) { Poll::Ready(_) => Poll::Ready(Ok(())), Poll::Pending => Poll::Pending, } } } #[cfg(not(miri))] #[tokio::test] async fn test_graceful_shutdown_ok() { let graceful = GracefulShutdown::new(); let shutdown_counter = Arc::new(AtomicUsize::new(0)); let (dummy_tx, _) = tokio::sync::broadcast::channel(1); for i in 1..=3 { let mut dummy_rx = dummy_tx.subscribe(); let shutdown_counter = shutdown_counter.clone(); let future = async move { tokio::time::sleep(std::time::Duration::from_millis(i * 10)).await; let _ = dummy_rx.recv().await; }; let dummy_conn = DummyConnection { future, shutdown_counter, }; let conn = graceful.watch(dummy_conn); tokio::spawn(async move { conn.await.unwrap(); }); } assert_eq!(shutdown_counter.load(Ordering::SeqCst), 0); let _ = dummy_tx.send(()); tokio::select! { _ = tokio::time::sleep(std::time::Duration::from_millis(100)) => { panic!("timeout") }, _ = graceful.shutdown() => { assert_eq!(shutdown_counter.load(Ordering::SeqCst), 3); } } } #[cfg(not(miri))] #[tokio::test] async fn test_graceful_shutdown_delayed_ok() { let graceful = GracefulShutdown::new(); let shutdown_counter = Arc::new(AtomicUsize::new(0)); for i in 1..=3 { let shutdown_counter = shutdown_counter.clone(); //tokio::time::sleep(std::time::Duration::from_millis(i * 5)).await; let future = async move { tokio::time::sleep(std::time::Duration::from_millis(i * 50)).await; }; let dummy_conn = DummyConnection { future, shutdown_counter, }; let conn = graceful.watch(dummy_conn); tokio::spawn(async move { conn.await.unwrap(); }); } assert_eq!(shutdown_counter.load(Ordering::SeqCst), 0); tokio::select! { _ = tokio::time::sleep(std::time::Duration::from_millis(200)) => { panic!("timeout") }, _ = graceful.shutdown() => { assert_eq!(shutdown_counter.load(Ordering::SeqCst), 3); } } } #[cfg(not(miri))] #[tokio::test] async fn test_graceful_shutdown_multi_per_watcher_ok() { let graceful = GracefulShutdown::new(); let shutdown_counter = Arc::new(AtomicUsize::new(0)); for i in 1..=3 { let shutdown_counter = shutdown_counter.clone(); let mut futures = Vec::new(); for u in 1..=i { let future = tokio::time::sleep(std::time::Duration::from_millis(u * 50)); let dummy_conn = DummyConnection { future, shutdown_counter: shutdown_counter.clone(), }; let conn = graceful.watch(dummy_conn); futures.push(conn); } tokio::spawn(async move { futures_util::future::join_all(futures).await; }); } assert_eq!(shutdown_counter.load(Ordering::SeqCst), 0); tokio::select! { _ = tokio::time::sleep(std::time::Duration::from_millis(200)) => { panic!("timeout") }, _ = graceful.shutdown() => { assert_eq!(shutdown_counter.load(Ordering::SeqCst), 6); } } } #[cfg(not(miri))] #[tokio::test] async fn test_graceful_shutdown_timeout() { let graceful = GracefulShutdown::new(); let shutdown_counter = Arc::new(AtomicUsize::new(0)); for i in 1..=3 { let shutdown_counter = shutdown_counter.clone(); let future = async move { if i == 1 { std::future::pending::<()>().await } else { std::future::ready(()).await } }; let dummy_conn = DummyConnection { future, shutdown_counter, }; let conn = graceful.watch(dummy_conn); tokio::spawn(async move { conn.await.unwrap(); }); } assert_eq!(shutdown_counter.load(Ordering::SeqCst), 0); tokio::select! { _ = tokio::time::sleep(std::time::Duration::from_millis(100)) => { assert_eq!(shutdown_counter.load(Ordering::SeqCst), 3); }, _ = graceful.shutdown() => { panic!("shutdown should not be completed: as not all our conns finish") } } } } hyper-util-0.1.19/src/server/mod.rs000064400000000000000000000001341046102023000152360ustar 00000000000000//! Server utilities. pub mod conn; #[cfg(feature = "server-graceful")] pub mod graceful; hyper-util-0.1.19/src/service/glue.rs000064400000000000000000000036441046102023000155560ustar 00000000000000use pin_project_lite::pin_project; use std::{ future::Future, pin::Pin, task::{Context, Poll}, }; use super::Oneshot; /// A tower [`Service`][tower-svc] converted into a hyper [`Service`][hyper-svc]. /// /// This wraps an inner tower service `S` in a [`hyper::service::Service`] implementation. See /// the module-level documentation of [`service`][crate::service] for more information about using /// [`tower`][tower] services and middleware with [`hyper`]. /// /// [hyper-svc]: hyper::service::Service /// [tower]: https://docs.rs/tower/latest/tower/ /// [tower-svc]: https://docs.rs/tower/latest/tower/trait.Service.html #[derive(Debug, Copy, Clone)] pub struct TowerToHyperService { service: S, } impl TowerToHyperService { /// Create a new [`TowerToHyperService`] from a tower service. pub fn new(tower_service: S) -> Self { Self { service: tower_service, } } } impl hyper::service::Service for TowerToHyperService where S: tower_service::Service + Clone, { type Response = S::Response; type Error = S::Error; type Future = TowerToHyperServiceFuture; fn call(&self, req: R) -> Self::Future { TowerToHyperServiceFuture { future: Oneshot::new(self.service.clone(), req), } } } pin_project! { /// Response future for [`TowerToHyperService`]. /// /// This future is acquired by [`call`][hyper::service::Service::call]ing a /// [`TowerToHyperService`]. pub struct TowerToHyperServiceFuture where S: tower_service::Service, { #[pin] future: Oneshot, } } impl Future for TowerToHyperServiceFuture where S: tower_service::Service, { type Output = Result; #[inline] fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { self.project().future.poll(cx) } } hyper-util-0.1.19/src/service/mod.rs000064400000000000000000000026631046102023000154010ustar 00000000000000//! Service utilities. //! //! [`hyper::service`] provides a [`Service`][hyper-svc] trait, representing an asynchronous //! function from a `Request` to a `Response`. This provides an interface allowing middleware for //! network application to be written in a modular and reusable way. //! //! This submodule provides an assortment of utilities for working with [`Service`][hyper-svc]s. //! See the module-level documentation of [`hyper::service`] for more information. //! //! # Tower //! //! While [`hyper`] uses its own notion of a [`Service`][hyper-svc] internally, many other //! libraries use a library such as [`tower`][tower] to provide the fundamental model of an //! asynchronous function. //! //! The [`TowerToHyperService`] type provided by this submodule can be used to bridge these //! ecosystems together. By wrapping a [`tower::Service`][tower-svc] in [`TowerToHyperService`], //! it can be passed into [`hyper`] interfaces that expect a [`hyper::service::Service`]. //! //! [hyper-svc]: hyper::service::Service //! [tower]: https://docs.rs/tower/latest/tower/ //! [tower-svc]: https://docs.rs/tower/latest/tower/trait.Service.html #[cfg(feature = "service")] mod glue; #[cfg(any(feature = "client-legacy", feature = "service"))] mod oneshot; #[cfg(feature = "service")] pub use self::glue::{TowerToHyperService, TowerToHyperServiceFuture}; #[cfg(any(feature = "client-legacy", feature = "service"))] pub(crate) use self::oneshot::Oneshot; hyper-util-0.1.19/src/service/oneshot.rs000064400000000000000000000031631046102023000162750ustar 00000000000000use futures_core::ready; use pin_project_lite::pin_project; use std::future::Future; use std::pin::Pin; use std::task::{Context, Poll}; use tower_service::Service; // Vendored from tower::util to reduce dependencies, the code is small enough. // Not really pub, but used in a trait for bounds pin_project! { #[project = OneshotProj] #[derive(Debug)] pub enum Oneshot, Req> { NotReady { svc: S, req: Option, }, Called { #[pin] fut: S::Future, }, Done, } } impl Oneshot where S: Service, { pub(crate) const fn new(svc: S, req: Req) -> Self { Oneshot::NotReady { svc, req: Some(req), } } } impl Future for Oneshot where S: Service, { type Output = Result; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { loop { let this = self.as_mut().project(); match this { OneshotProj::NotReady { svc, req } => { ready!(svc.poll_ready(cx))?; let fut = svc.call(req.take().expect("already called")); self.set(Oneshot::Called { fut }); } OneshotProj::Called { fut } => { let res = ready!(fut.poll(cx))?; self.set(Oneshot::Done); return Poll::Ready(Ok(res)); } OneshotProj::Done => panic!("polled after complete"), } } } } hyper-util-0.1.19/tests/legacy_client.rs000064400000000000000000001573741046102023000163510ustar 00000000000000mod test_utils; use std::io::{Read, Write}; use std::net::{SocketAddr, TcpListener}; use std::pin::Pin; use std::sync::atomic::Ordering; use std::sync::Arc; use std::task::Poll; use std::thread; use std::time::Duration; use futures_channel::{mpsc, oneshot}; use futures_util::future::{self, FutureExt, TryFutureExt}; use futures_util::stream::StreamExt; use futures_util::{self, Stream}; use http_body_util::BodyExt; use http_body_util::{Empty, Full, StreamBody}; use tokio::io::{AsyncReadExt, AsyncWriteExt}; use hyper::body::Bytes; use hyper::body::Frame; use hyper::Request; use hyper_util::client::legacy::connect::{capture_connection, HttpConnector}; use hyper_util::client::legacy::Client; use hyper_util::rt::{TokioExecutor, TokioIo}; use test_utils::{DebugConnector, DebugStream}; pub fn runtime() -> tokio::runtime::Runtime { tokio::runtime::Builder::new_current_thread() .enable_all() .build() .expect("new rt") } fn s(buf: &[u8]) -> &str { std::str::from_utf8(buf).expect("from_utf8") } #[cfg(not(miri))] #[test] fn drop_body_before_eof_closes_connection() { // https://github.com/hyperium/hyper/issues/1353 let _ = pretty_env_logger::try_init(); let server = TcpListener::bind("127.0.0.1:0").unwrap(); let addr = server.local_addr().unwrap(); let rt = runtime(); let (closes_tx, closes) = mpsc::channel::<()>(10); let client = Client::builder(hyper_util::rt::TokioExecutor::new()).build( DebugConnector::with_http_and_closes(HttpConnector::new(), closes_tx), ); let (tx1, rx1) = oneshot::channel(); thread::spawn(move || { let mut sock = server.accept().unwrap().0; sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); sock.set_write_timeout(Some(Duration::from_secs(5))) .unwrap(); let mut buf = [0; 4096]; sock.read(&mut buf).expect("read 1"); let body = vec![b'x'; 1024 * 128]; write!( sock, "HTTP/1.1 200 OK\r\nContent-Length: {}\r\n\r\n", body.len() ) .expect("write head"); let _ = sock.write_all(&body); let _ = tx1.send(()); }); let req = Request::builder() .uri(&*format!("http://{addr}/a")) .body(Empty::::new()) .unwrap(); let res = client.request(req).map_ok(move |res| { assert_eq!(res.status(), hyper::StatusCode::OK); }); let rx = rx1; rt.block_on(async move { let (res, _) = future::join(res, rx).await; res.unwrap(); tokio::time::sleep(Duration::from_secs(1)).await; }); rt.block_on(closes.into_future()).0.expect("closes"); } #[cfg(not(miri))] #[tokio::test] async fn drop_client_closes_idle_connections() { let _ = pretty_env_logger::try_init(); let server = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap(); let addr = server.local_addr().unwrap(); let (closes_tx, mut closes) = mpsc::channel(10); let (tx1, rx1) = oneshot::channel(); let t1 = tokio::spawn(async move { let mut sock = server.accept().await.unwrap().0; let mut buf = [0; 4096]; sock.read(&mut buf).await.expect("read 1"); let body = [b'x'; 64]; let headers = format!("HTTP/1.1 200 OK\r\nContent-Length: {}\r\n\r\n", body.len()); sock.write_all(headers.as_bytes()) .await .expect("write head"); sock.write_all(&body).await.expect("write body"); let _ = tx1.send(()); // prevent this thread from closing until end of test, so the connection // stays open and idle until Client is dropped if let Ok(n) = sock.read(&mut buf).await { assert_eq!(n, 0); } }); let client = Client::builder(TokioExecutor::new()).build(DebugConnector::with_http_and_closes( HttpConnector::new(), closes_tx, )); let req = Request::builder() .uri(&*format!("http://{addr}/a")) .body(Empty::::new()) .unwrap(); let res = client.request(req).map_ok(move |res| { assert_eq!(res.status(), hyper::StatusCode::OK); }); let rx = rx1; let (res, _) = future::join(res, rx).await; res.unwrap(); // not closed yet, just idle future::poll_fn(|ctx| { assert!(Pin::new(&mut closes).poll_next(ctx).is_pending()); Poll::Ready(()) }) .await; // drop to start the connections closing drop(client); // and wait a few ticks for the connections to close let t = tokio::time::sleep(Duration::from_millis(100)).map(|_| panic!("time out")); futures_util::pin_mut!(t); let close = closes.into_future().map(|(opt, _)| opt.expect("closes")); future::select(t, close).await; t1.await.unwrap(); } #[cfg(not(miri))] #[tokio::test] async fn drop_response_future_closes_in_progress_connection() { let _ = pretty_env_logger::try_init(); let server = TcpListener::bind("127.0.0.1:0").unwrap(); let addr = server.local_addr().unwrap(); let (closes_tx, closes) = mpsc::channel(10); let (tx1, rx1) = oneshot::channel(); let (_client_drop_tx, client_drop_rx) = std::sync::mpsc::channel::<()>(); thread::spawn(move || { let mut sock = server.accept().unwrap().0; sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); sock.set_write_timeout(Some(Duration::from_secs(5))) .unwrap(); let mut buf = [0; 4096]; sock.read(&mut buf).expect("read 1"); // we never write a response head // simulates a slow server operation let _ = tx1.send(()); // prevent this thread from closing until end of test, so the connection // stays open and idle until Client is dropped let _ = client_drop_rx.recv(); }); let res = { let client = Client::builder(TokioExecutor::new()).build( DebugConnector::with_http_and_closes(HttpConnector::new(), closes_tx), ); let req = Request::builder() .uri(&*format!("http://{addr}/a")) .body(Empty::::new()) .unwrap(); client.request(req).map(|_| unreachable!()) }; future::select(res, rx1).await; // res now dropped let t = tokio::time::sleep(Duration::from_millis(100)).map(|_| panic!("time out")); futures_util::pin_mut!(t); let close = closes.into_future().map(|(opt, _)| opt.expect("closes")); future::select(t, close).await; } #[cfg(not(miri))] #[tokio::test] async fn drop_response_body_closes_in_progress_connection() { let _ = pretty_env_logger::try_init(); let server = TcpListener::bind("127.0.0.1:0").unwrap(); let addr = server.local_addr().unwrap(); let (closes_tx, closes) = mpsc::channel(10); let (tx1, rx1) = oneshot::channel(); let (_client_drop_tx, client_drop_rx) = std::sync::mpsc::channel::<()>(); thread::spawn(move || { let mut sock = server.accept().unwrap().0; sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); sock.set_write_timeout(Some(Duration::from_secs(5))) .unwrap(); let mut buf = [0; 4096]; sock.read(&mut buf).expect("read 1"); write!( sock, "HTTP/1.1 200 OK\r\nTransfer-Encoding: chunked\r\n\r\n" ) .expect("write head"); let _ = tx1.send(()); // prevent this thread from closing until end of test, so the connection // stays open and idle until Client is dropped let _ = client_drop_rx.recv(); }); let rx = rx1; let res = { let client = Client::builder(TokioExecutor::new()).build( DebugConnector::with_http_and_closes(HttpConnector::new(), closes_tx), ); let req = Request::builder() .uri(&*format!("http://{addr}/a")) .body(Empty::::new()) .unwrap(); // notably, haven't read body yet client.request(req) }; let (res, _) = future::join(res, rx).await; // drop the body res.unwrap(); // and wait a few ticks to see the connection drop let t = tokio::time::sleep(Duration::from_millis(100)).map(|_| panic!("time out")); futures_util::pin_mut!(t); let close = closes.into_future().map(|(opt, _)| opt.expect("closes")); future::select(t, close).await; } #[cfg(not(miri))] #[tokio::test] async fn no_keep_alive_closes_connection() { // https://github.com/hyperium/hyper/issues/1383 let _ = pretty_env_logger::try_init(); let server = TcpListener::bind("127.0.0.1:0").unwrap(); let addr = server.local_addr().unwrap(); let (closes_tx, closes) = mpsc::channel(10); let (tx1, rx1) = oneshot::channel(); let (_tx2, rx2) = std::sync::mpsc::channel::<()>(); thread::spawn(move || { let mut sock = server.accept().unwrap().0; sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); sock.set_write_timeout(Some(Duration::from_secs(5))) .unwrap(); let mut buf = [0; 4096]; sock.read(&mut buf).expect("read 1"); sock.write(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n") .unwrap(); let _ = tx1.send(()); // prevent this thread from closing until end of test, so the connection // stays open and idle until Client is dropped let _ = rx2.recv(); }); let client = Client::builder(TokioExecutor::new()) .pool_max_idle_per_host(0) .build(DebugConnector::with_http_and_closes( HttpConnector::new(), closes_tx, )); let req = Request::builder() .uri(&*format!("http://{addr}/a")) .body(Empty::::new()) .unwrap(); let res = client.request(req).map_ok(move |res| { assert_eq!(res.status(), hyper::StatusCode::OK); }); let rx = rx1; let (res, _) = future::join(res, rx).await; res.unwrap(); let t = tokio::time::sleep(Duration::from_millis(100)).map(|_| panic!("time out")); futures_util::pin_mut!(t); let close = closes.into_future().map(|(opt, _)| opt.expect("closes")); future::select(close, t).await; } #[cfg(not(miri))] #[tokio::test] async fn socket_disconnect_closes_idle_conn() { // notably when keep-alive is enabled let _ = pretty_env_logger::try_init(); let server = TcpListener::bind("127.0.0.1:0").unwrap(); let addr = server.local_addr().unwrap(); let (closes_tx, closes) = mpsc::channel(10); let (tx1, rx1) = oneshot::channel(); thread::spawn(move || { let mut sock = server.accept().unwrap().0; sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); sock.set_write_timeout(Some(Duration::from_secs(5))) .unwrap(); let mut buf = [0; 4096]; sock.read(&mut buf).expect("read 1"); sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n") .unwrap(); let _ = tx1.send(()); }); let client = Client::builder(TokioExecutor::new()).build(DebugConnector::with_http_and_closes( HttpConnector::new(), closes_tx, )); let req = Request::builder() .uri(&*format!("http://{addr}/a")) .body(Empty::::new()) .unwrap(); let res = client.request(req).map_ok(move |res| { assert_eq!(res.status(), hyper::StatusCode::OK); }); let rx = rx1; let (res, _) = future::join(res, rx).await; res.unwrap(); let t = tokio::time::sleep(Duration::from_millis(100)).map(|_| panic!("time out")); futures_util::pin_mut!(t); let close = closes.into_future().map(|(opt, _)| opt.expect("closes")); future::select(t, close).await; } #[test] fn connect_call_is_lazy() { // We especially don't want connects() triggered if there's // idle connections that the Checkout would have found let _ = pretty_env_logger::try_init(); let _rt = runtime(); let connector = DebugConnector::new(); let connects = connector.connects.clone(); let client = Client::builder(TokioExecutor::new()).build(connector); assert_eq!(connects.load(Ordering::Relaxed), 0); let req = Request::builder() .uri("http://hyper.local/a") .body(Empty::::new()) .unwrap(); let _fut = client.request(req); // internal Connect::connect should have been lazy, and not // triggered an actual connect yet. assert_eq!(connects.load(Ordering::Relaxed), 0); } #[cfg(not(miri))] #[test] fn client_keep_alive_0() { let _ = pretty_env_logger::try_init(); let server = TcpListener::bind("127.0.0.1:0").unwrap(); let addr = server.local_addr().unwrap(); let rt = runtime(); let connector = DebugConnector::new(); let connects = connector.connects.clone(); let client = Client::builder(TokioExecutor::new()).build(connector); let (tx1, rx1) = oneshot::channel(); let (tx2, rx2) = oneshot::channel(); thread::spawn(move || { let mut sock = server.accept().unwrap().0; //drop(server); sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); sock.set_write_timeout(Some(Duration::from_secs(5))) .unwrap(); let mut buf = [0; 4096]; sock.read(&mut buf).expect("read 1"); sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n") .expect("write 1"); let _ = tx1.send(()); let n2 = sock.read(&mut buf).expect("read 2"); assert_ne!(n2, 0); let second_get = "GET /b HTTP/1.1\r\n"; assert_eq!(s(&buf[..second_get.len()]), second_get); sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n") .expect("write 2"); let _ = tx2.send(()); }); assert_eq!(connects.load(Ordering::SeqCst), 0); let rx = rx1; let req = Request::builder() .uri(&*format!("http://{addr}/a")) .body(Empty::::new()) .unwrap(); let res = client.request(req); rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap(); assert_eq!(connects.load(Ordering::SeqCst), 1); // sleep real quick to let the threadpool put connection in ready // state and back into client pool thread::sleep(Duration::from_millis(50)); let rx = rx2; let req = Request::builder() .uri(&*format!("http://{addr}/b")) .body(Empty::::new()) .unwrap(); let res = client.request(req); rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap(); assert_eq!( connects.load(Ordering::SeqCst), 1, "second request should still only have 1 connect" ); drop(client); } #[cfg(not(miri))] #[test] fn client_keep_alive_extra_body() { let _ = pretty_env_logger::try_init(); let server = TcpListener::bind("127.0.0.1:0").unwrap(); let addr = server.local_addr().unwrap(); let rt = runtime(); let connector = DebugConnector::new(); let connects = connector.connects.clone(); let client = Client::builder(TokioExecutor::new()).build(connector); let (tx1, rx1) = oneshot::channel(); let (tx2, rx2) = oneshot::channel(); thread::spawn(move || { let mut sock = server.accept().unwrap().0; sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); sock.set_write_timeout(Some(Duration::from_secs(5))) .unwrap(); let mut buf = [0; 4096]; sock.read(&mut buf).expect("read 1"); sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 5\r\n\r\nhello") .expect("write 1"); // the body "hello", while ignored because its a HEAD request, should mean the connection // cannot be put back in the pool let _ = tx1.send(()); let mut sock2 = server.accept().unwrap().0; let n2 = sock2.read(&mut buf).expect("read 2"); assert_ne!(n2, 0); let second_get = "GET /b HTTP/1.1\r\n"; assert_eq!(s(&buf[..second_get.len()]), second_get); sock2 .write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n") .expect("write 2"); let _ = tx2.send(()); }); assert_eq!(connects.load(Ordering::Relaxed), 0); let rx = rx1; let req = Request::builder() .method("HEAD") .uri(&*format!("http://{addr}/a")) .body(Empty::::new()) .unwrap(); let res = client.request(req); rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap(); assert_eq!(connects.load(Ordering::Relaxed), 1); let rx = rx2; let req = Request::builder() .uri(&*format!("http://{addr}/b")) .body(Empty::::new()) .unwrap(); let res = client.request(req); rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap(); assert_eq!(connects.load(Ordering::Relaxed), 2); } #[cfg(not(miri))] #[tokio::test] async fn client_keep_alive_when_response_before_request_body_ends() { let _ = pretty_env_logger::try_init(); let server = TcpListener::bind("127.0.0.1:0").unwrap(); let addr = server.local_addr().unwrap(); let (closes_tx, mut closes) = mpsc::channel::<()>(10); let connector = DebugConnector::with_http_and_closes(HttpConnector::new(), closes_tx); let connects = connector.connects.clone(); let client = Client::builder(TokioExecutor::new()).build(connector.clone()); let (tx1, rx1) = oneshot::channel(); let (tx2, rx2) = oneshot::channel(); let (_tx3, rx3) = std::sync::mpsc::channel::<()>(); thread::spawn(move || { let mut sock = server.accept().unwrap().0; sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); sock.set_write_timeout(Some(Duration::from_secs(5))) .unwrap(); let mut buf = [0; 4096]; sock.read(&mut buf).expect("read 1"); sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n") .expect("write 1"); // after writing the response, THEN stream the body let _ = tx1.send(()); sock.read(&mut buf).expect("read 2"); let _ = tx2.send(()); // prevent this thread from closing until end of test, so the connection // stays open and idle until Client is dropped let _ = rx3.recv(); }); assert_eq!(connects.load(Ordering::Relaxed), 0); let delayed_body = rx1 .then(|_| Box::pin(tokio::time::sleep(Duration::from_millis(200)))) .map(|_| Ok::<_, ()>(Frame::data(&b"hello a"[..]))) .map_err(|_| -> hyper::Error { panic!("rx1") }) .into_stream(); let req = Request::builder() .method("POST") .uri(&*format!("http://{addr}/a")) .body(StreamBody::new(delayed_body)) .unwrap(); let res = client.request(req).map_ok(move |res| { assert_eq!(res.status(), hyper::StatusCode::OK); }); future::join(res, rx2).await.0.unwrap(); future::poll_fn(|ctx| { assert!(Pin::new(&mut closes).poll_next(ctx).is_pending()); Poll::Ready(()) }) .await; assert_eq!(connects.load(Ordering::Relaxed), 1); drop(client); let t = tokio::time::sleep(Duration::from_millis(100)).map(|_| panic!("time out")); futures_util::pin_mut!(t); let close = closes.into_future().map(|(opt, _)| opt.expect("closes")); future::select(t, close).await; } #[cfg(not(miri))] #[tokio::test] async fn client_keep_alive_eager_when_chunked() { // If a response body has been read to completion, with completion // determined by some other factor, like decompression, and thus // it is in't polled a final time to clear the final 0-len chunk, // try to eagerly clear it so the connection can still be used. let _ = pretty_env_logger::try_init(); let server = TcpListener::bind("127.0.0.1:0").unwrap(); let addr = server.local_addr().unwrap(); let connector = DebugConnector::new(); let connects = connector.connects.clone(); let client = Client::builder(TokioExecutor::new()).build(connector); let (tx1, rx1) = oneshot::channel(); let (tx2, rx2) = oneshot::channel(); thread::spawn(move || { let mut sock = server.accept().unwrap().0; //drop(server); sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); sock.set_write_timeout(Some(Duration::from_secs(5))) .unwrap(); let mut buf = [0; 4096]; sock.read(&mut buf).expect("read 1"); sock.write_all( b"\ HTTP/1.1 200 OK\r\n\ transfer-encoding: chunked\r\n\ \r\n\ 5\r\n\ hello\r\n\ 0\r\n\r\n\ ", ) .expect("write 1"); let _ = tx1.send(()); let n2 = sock.read(&mut buf).expect("read 2"); assert_ne!(n2, 0, "bytes of second request"); let second_get = "GET /b HTTP/1.1\r\n"; assert_eq!(s(&buf[..second_get.len()]), second_get); sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n") .expect("write 2"); let _ = tx2.send(()); }); assert_eq!(connects.load(Ordering::SeqCst), 0); let rx = rx1; let req = Request::builder() .uri(&*format!("http://{addr}/a")) .body(Empty::::new()) .unwrap(); let fut = client.request(req); let resp = future::join(fut, rx).map(|r| r.0).await.unwrap(); assert_eq!(connects.load(Ordering::SeqCst), 1); assert_eq!(resp.status(), 200); assert_eq!(resp.headers()["transfer-encoding"], "chunked"); // Read the "hello" chunk... let chunk = resp.collect().await.unwrap().to_bytes(); assert_eq!(chunk, "hello"); // sleep real quick to let the threadpool put connection in ready // state and back into client pool tokio::time::sleep(Duration::from_millis(50)).await; let rx = rx2; let req = Request::builder() .uri(&*format!("http://{addr}/b")) .body(Empty::::new()) .unwrap(); let fut = client.request(req); future::join(fut, rx).map(|r| r.0).await.unwrap(); assert_eq!( connects.load(Ordering::SeqCst), 1, "second request should still only have 1 connect" ); drop(client); } #[cfg(not(miri))] #[test] fn connect_proxy_sends_absolute_uri() { let _ = pretty_env_logger::try_init(); let server = TcpListener::bind("127.0.0.1:0").unwrap(); let addr = server.local_addr().unwrap(); let rt = runtime(); let connector = DebugConnector::new().proxy(); let client = Client::builder(TokioExecutor::new()).build(connector); let (tx1, rx1) = oneshot::channel(); thread::spawn(move || { let mut sock = server.accept().unwrap().0; //drop(server); sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); sock.set_write_timeout(Some(Duration::from_secs(5))) .unwrap(); let mut buf = [0; 4096]; let n = sock.read(&mut buf).expect("read 1"); let expected = format!("GET http://{addr}/foo/bar HTTP/1.1\r\nhost: {addr}\r\n\r\n"); assert_eq!(s(&buf[..n]), expected); sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n") .expect("write 1"); let _ = tx1.send(()); }); let rx = rx1; let req = Request::builder() .uri(&*format!("http://{addr}/foo/bar")) .body(Empty::::new()) .unwrap(); let res = client.request(req); rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap(); } #[cfg(not(miri))] #[test] fn connect_proxy_http_connect_sends_authority_form() { let _ = pretty_env_logger::try_init(); let server = TcpListener::bind("127.0.0.1:0").unwrap(); let addr = server.local_addr().unwrap(); let rt = runtime(); let connector = DebugConnector::new().proxy(); let client = Client::builder(TokioExecutor::new()).build(connector); let (tx1, rx1) = oneshot::channel(); thread::spawn(move || { let mut sock = server.accept().unwrap().0; //drop(server); sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); sock.set_write_timeout(Some(Duration::from_secs(5))) .unwrap(); let mut buf = [0; 4096]; let n = sock.read(&mut buf).expect("read 1"); let expected = format!("CONNECT {addr} HTTP/1.1\r\nhost: {addr}\r\n\r\n"); assert_eq!(s(&buf[..n]), expected); sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n") .expect("write 1"); let _ = tx1.send(()); }); let rx = rx1; let req = Request::builder() .method("CONNECT") .uri(&*format!("http://{addr}/useless/path")) .body(Empty::::new()) .unwrap(); let res = client.request(req); rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap(); } #[cfg(not(miri))] #[test] fn client_upgrade() { use tokio::io::{AsyncReadExt, AsyncWriteExt}; let _ = pretty_env_logger::try_init(); let server = TcpListener::bind("127.0.0.1:0").unwrap(); let addr = server.local_addr().unwrap(); let rt = runtime(); let connector = DebugConnector::new(); let client = Client::builder(TokioExecutor::new()).build(connector); let (tx1, rx1) = oneshot::channel(); thread::spawn(move || { let mut sock = server.accept().unwrap().0; sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); sock.set_write_timeout(Some(Duration::from_secs(5))) .unwrap(); let mut buf = [0; 4096]; sock.read(&mut buf).expect("read 1"); sock.write_all( b"\ HTTP/1.1 101 Switching Protocols\r\n\ Upgrade: foobar\r\n\ \r\n\ foobar=ready\ ", ) .unwrap(); let _ = tx1.send(()); let n = sock.read(&mut buf).expect("read 2"); assert_eq!(&buf[..n], b"foo=bar"); sock.write_all(b"bar=foo").expect("write 2"); }); let rx = rx1; let req = Request::builder() .method("GET") .uri(&*format!("http://{addr}/up")) .body(Empty::::new()) .unwrap(); let res = client.request(req); let res = rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap(); assert_eq!(res.status(), 101); let upgraded = rt.block_on(hyper::upgrade::on(res)).expect("on_upgrade"); let parts = upgraded.downcast::().unwrap(); assert_eq!(s(&parts.read_buf), "foobar=ready"); let mut io = parts.io; rt.block_on(io.write_all(b"foo=bar")).unwrap(); let mut vec = vec![]; rt.block_on(io.read_to_end(&mut vec)).unwrap(); assert_eq!(vec, b"bar=foo"); } #[cfg(not(miri))] #[test] fn client_http2_upgrade() { use http::{Method, Response, Version}; use hyper::service::service_fn; use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tokio::net::TcpListener; let _ = pretty_env_logger::try_init(); let rt = runtime(); let server = rt .block_on(TcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0)))) .unwrap(); let addr = server.local_addr().unwrap(); let mut connector = DebugConnector::new(); connector.alpn_h2 = true; let client = Client::builder(TokioExecutor::new()).build(connector); rt.spawn(async move { let (stream, _) = server.accept().await.expect("accept"); let stream = TokioIo::new(stream); let mut builder = hyper_util::server::conn::auto::Builder::new(TokioExecutor::new()); // IMPORTANT: This is required to advertise our support for HTTP/2 websockets to the client. builder.http2().enable_connect_protocol(); builder .serve_connection_with_upgrades( stream, service_fn(|req| async move { assert_eq!(req.headers().get("host"), None); assert_eq!(req.version(), Version::HTTP_2); assert_eq!( req.headers().get(http::header::SEC_WEBSOCKET_VERSION), Some(&http::header::HeaderValue::from_static("13")) ); assert_eq!( req.extensions().get::(), Some(&hyper::ext::Protocol::from_static("websocket")) ); let on_upgrade = hyper::upgrade::on(req); tokio::spawn(async move { let upgraded = on_upgrade.await.unwrap(); let mut io = TokioIo::new(upgraded); let mut vec = vec![]; io.read_buf(&mut vec).await.unwrap(); assert_eq!(vec, b"foo=bar"); io.write_all(b"bar=foo").await.unwrap(); }); Ok::<_, hyper::Error>(Response::new(Empty::::new())) }), ) .await .expect("server"); }); let req = Request::builder() .method(Method::CONNECT) .uri(&*format!("http://{addr}/up")) .header(http::header::SEC_WEBSOCKET_VERSION, "13") .version(Version::HTTP_2) .extension(hyper::ext::Protocol::from_static("websocket")) .body(Empty::::new()) .unwrap(); let res = client.request(req); let res = rt.block_on(res).unwrap(); assert_eq!(res.status(), http::StatusCode::OK); assert_eq!(res.version(), Version::HTTP_2); let upgraded = rt.block_on(hyper::upgrade::on(res)).expect("on_upgrade"); let mut io = TokioIo::new(upgraded); rt.block_on(io.write_all(b"foo=bar")).unwrap(); let mut vec = vec![]; rt.block_on(io.read_to_end(&mut vec)).unwrap(); assert_eq!(vec, b"bar=foo"); } #[cfg(not(miri))] #[test] fn alpn_h2() { use http::Response; use hyper::service::service_fn; use tokio::net::TcpListener; let _ = pretty_env_logger::try_init(); let rt = runtime(); let listener = rt .block_on(TcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0)))) .unwrap(); let addr = listener.local_addr().unwrap(); let mut connector = DebugConnector::new(); connector.alpn_h2 = true; let connects = connector.connects.clone(); let client = Client::builder(TokioExecutor::new()).build(connector); rt.spawn(async move { let (stream, _) = listener.accept().await.expect("accept"); let stream = TokioIo::new(stream); hyper::server::conn::http2::Builder::new(TokioExecutor::new()) .serve_connection( stream, service_fn(|req| async move { assert_eq!(req.headers().get("host"), None); Ok::<_, hyper::Error>(Response::new(Full::::from("Hello, world"))) }), ) .await .expect("server"); }); assert_eq!(connects.load(Ordering::SeqCst), 0); let url = format!("http://{addr}/a").parse::<::hyper::Uri>().unwrap(); let res1 = client.get(url.clone()); let res2 = client.get(url.clone()); let res3 = client.get(url.clone()); rt.block_on(future::try_join3(res1, res2, res3)).unwrap(); // Since the client doesn't know it can ALPN at first, it will have // started 3 connections. But, the server above will only handle 1, // so the unwrapped responses futures show it still worked. assert_eq!(connects.load(Ordering::SeqCst), 3); let res4 = client.get(url.clone()); rt.block_on(res4).unwrap(); // HTTP/2 request allowed let res5 = client.request( Request::builder() .uri(url) .version(hyper::Version::HTTP_2) .body(Empty::::new()) .unwrap(), ); rt.block_on(res5).unwrap(); assert_eq!( connects.load(Ordering::SeqCst), 3, "after ALPN, no more connects" ); drop(client); } #[cfg(not(miri))] #[test] fn capture_connection_on_client() { let _ = pretty_env_logger::try_init(); let rt = runtime(); let connector = DebugConnector::new(); let client = Client::builder(TokioExecutor::new()).build(connector); let server = TcpListener::bind("127.0.0.1:0").unwrap(); let addr = server.local_addr().unwrap(); thread::spawn(move || { let mut sock = server.accept().unwrap().0; sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); sock.set_write_timeout(Some(Duration::from_secs(5))) .unwrap(); let mut buf = [0; 4096]; sock.read(&mut buf).expect("read 1"); sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n") .expect("write 1"); }); let mut req = Request::builder() .uri(&*format!("http://{addr}/a")) .body(Empty::::new()) .unwrap(); let captured_conn = capture_connection(&mut req); rt.block_on(client.request(req)).expect("200 OK"); assert!(captured_conn.connection_metadata().is_some()); } #[cfg(not(miri))] #[test] fn connection_poisoning() { use std::sync::atomic::AtomicUsize; let _ = pretty_env_logger::try_init(); let rt = runtime(); let connector = DebugConnector::new(); let client = Client::builder(TokioExecutor::new()).build(connector); let server = TcpListener::bind("127.0.0.1:0").unwrap(); let addr = server.local_addr().unwrap(); let num_conns: Arc = Default::default(); let num_requests: Arc = Default::default(); let num_requests_tracker = num_requests.clone(); let num_conns_tracker = num_conns.clone(); thread::spawn(move || loop { let mut sock = server.accept().unwrap().0; num_conns_tracker.fetch_add(1, Ordering::Relaxed); let num_requests_tracker = num_requests_tracker.clone(); thread::spawn(move || { sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); sock.set_write_timeout(Some(Duration::from_secs(5))) .unwrap(); let mut buf = [0; 4096]; loop { if sock.read(&mut buf).expect("read 1") > 0 { num_requests_tracker.fetch_add(1, Ordering::Relaxed); sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n") .expect("write 1"); } } }); }); let make_request = || { Request::builder() .uri(&*format!("http://{addr}/a")) .body(Empty::::new()) .unwrap() }; let mut req = make_request(); let captured_conn = capture_connection(&mut req); rt.block_on(client.request(req)).expect("200 OK"); assert_eq!(num_conns.load(Ordering::SeqCst), 1); assert_eq!(num_requests.load(Ordering::SeqCst), 1); rt.block_on(client.request(make_request())).expect("200 OK"); rt.block_on(client.request(make_request())).expect("200 OK"); // Before poisoning the connection is reused assert_eq!(num_conns.load(Ordering::SeqCst), 1); assert_eq!(num_requests.load(Ordering::SeqCst), 3); captured_conn .connection_metadata() .as_ref() .unwrap() .poison(); rt.block_on(client.request(make_request())).expect("200 OK"); // After poisoning, a new connection is established assert_eq!(num_conns.load(Ordering::SeqCst), 2); assert_eq!(num_requests.load(Ordering::SeqCst), 4); rt.block_on(client.request(make_request())).expect("200 OK"); // another request can still reuse: assert_eq!(num_conns.load(Ordering::SeqCst), 2); assert_eq!(num_requests.load(Ordering::SeqCst), 5); } // ------------------------------------------------------- // Below is our custom code for testing hyper legacy-client behavior with mock connections for PR #184 // We use fully qualified paths for all types and identifiers to make this code // copy/paste-able without relying on external 'use' statements. Detailed inline // comments explain the purpose and logic of each section. //XXX: can manually run like this: // $ cargo test --features="http1,http2,server,client-legacy" --test legacy_client -- test_connection_error_propagation test_incomplete_message_error --nocapture // $ cargo test --all-features --test legacy_client -- --nocapture // $ cargo test --all-features --test legacy_client use std::error::Error; // needed for .source() eg. error[E0599]: no method named `source` found for struct `hyper_util::client::legacy::Error` in the current scope // Helper function to debug byte slices by attempting to interpret them as UTF-8. // If the bytes are valid UTF-8, they are printed as a string; otherwise, they are // printed as a raw byte array. This aids in debugging tokio_test::io::Mock mismatches. fn debug_bytes(bytes: &[u8], label: &str) { // Try to convert the byte slice to a UTF-8 string. // If successful, print it with the provided label for context. if let Ok(s) = std::str::from_utf8(bytes) { eprintln!("{}: {}", label, s); } else { // If the bytes are not valid UTF-8, print them as a raw byte array. eprintln!("{}: {:?}", label, bytes); } } // Struct representing a mock connection for testing hyper client behavior. // Implements hyper::rt::Read, hyper::rt::Write, and hyper_util::client::legacy::connect::Connection // traits to simulate I/O operations. Uses tokio_test::io::Mock for controlled I/O behavior. struct MockConnection { // The underlying mock I/O object, wrapped in hyper_util::rt::TokioIo for compatibility. inner: hyper_util::rt::TokioIo, // Atomic flag to signal a connection failure, controlling poll_read behavior. failed: std::sync::Arc, // The error to return when failed=true, simulating an I/O failure. error: std::sync::Arc, // Optional channel to signal unexpected writes, used for debugging. error_tx: Option>, // Tracks total bytes written, for logging and verification. bytes_written: usize, } impl MockConnection { // Constructor for MockConnection, initializing all fields. // Takes a mock I/O object, failure flag, error, and optional error channel. fn new( mock: tokio_test::io::Mock, failed: std::sync::Arc, error: std::sync::Arc, error_tx: Option>, ) -> Self { MockConnection { inner: hyper_util::rt::TokioIo::new(mock), failed, error, error_tx, bytes_written: 0, } } } // Implement hyper::rt::Read trait to handle read operations on the mock connection. // Controls whether an error or mock I/O data is returned based on the failed flag. impl hyper::rt::Read for MockConnection { // Polls the connection for reading, filling the provided buffer. // If failed=true, returns the stored error; otherwise, delegates to the mock I/O. fn poll_read( mut self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, buf: hyper::rt::ReadBufCursor<'_>, ) -> std::task::Poll> { // Log the current state of the failed flag for debugging. eprintln!( "poll_read: failed={}", self.failed.load(std::sync::atomic::Ordering::SeqCst) ); // Check if the connection is marked as failed. // If true, return the stored error immediately to simulate a connection failure. if self.failed.load(std::sync::atomic::Ordering::SeqCst) { // Log the error being returned for traceability. eprintln!("poll_read: returning error: {}", self.error); // Create a new io::Error with the same kind and message as the stored error. return std::task::Poll::Ready(std::result::Result::Err(std::io::Error::new( self.error.kind(), self.error.to_string(), ))); } // If not failed, delegate to the mock I/O to simulate normal read behavior. // This may return EOF (Poll::Ready(Ok(0))) for empty IoBuilder. let inner = std::pin::Pin::new(&mut self.inner); inner.poll_read(cx, buf) } } // Implement hyper::rt::Write trait to handle write operations on the mock connection. // Logs writes and signals unexpected writes via error_tx. impl hyper::rt::Write for MockConnection { // Polls the connection for writing, sending the provided buffer. // Logs the write operation and tracks total bytes written. fn poll_write( mut self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, buf: &[u8], ) -> std::task::Poll> { // Log the size of the buffer being written for debugging. eprintln!("poll_write: {} bytes", buf.len()); // Debug the buffer contents as UTF-8 or raw bytes. debug_bytes(buf, "poll_write buffer"); // Delegate the write to the mock I/O object. let inner = std::pin::Pin::new(&mut self.inner); match inner.poll_write(cx, buf) { // If the write succeeds, update the bytes_written counter and log the result. std::task::Poll::Ready(std::result::Result::Ok(bytes)) => { // Increment the total bytes written for tracking. self.bytes_written += bytes; // Log the number of bytes written and the running total. eprintln!( "poll_write: wrote {} bytes, total={}", bytes, self.bytes_written ); // If error_tx is present, signal an unexpected write (used in error tests). // This helps detect writes when the connection should fail early. if let Some(tx) = self.error_tx.take() { // Log that an unexpected write is being signaled. eprintln!("poll_write: signaling unexpected write"); // Send a message through the channel, ignoring errors if the receiver is closed. let _ = tx.try_send(()); } // Return the successful write result. std::task::Poll::Ready(std::result::Result::Ok(bytes)) } // For pending or error results, propagate them directly. other => other, } } // Polls the connection to flush any buffered data. // Delegates to the mock I/O object. fn poll_flush( mut self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, ) -> std::task::Poll> { // Log the flush operation for debugging. eprintln!("poll_flush"); // Delegate the flush to the mock I/O object. let inner = std::pin::Pin::new(&mut self.inner); inner.poll_flush(cx) } // Polls the connection to shut down the write side. // Delegates to the mock I/O object. fn poll_shutdown( mut self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, ) -> std::task::Poll> { // Log the shutdown operation for debugging. eprintln!("poll_shutdown"); // Delegate the shutdown to the mock I/O object. let inner = std::pin::Pin::new(&mut self.inner); inner.poll_shutdown(cx) } } // Implement hyper_util::client::legacy::connect::Connection trait to provide connection metadata. // Required for hyper to use MockConnection as a valid connection. impl hyper_util::client::legacy::connect::Connection for MockConnection { // Returns metadata about the connection. // In this case, a default Connected object indicating a new connection. fn connected(&self) -> hyper_util::client::legacy::connect::Connected { hyper_util::client::legacy::connect::Connected::new() } } // Struct representing a mock connector for creating MockConnection instances. // Implements tower_service::Service to integrate with hyper’s client. #[derive(Clone)] struct MockConnector { // The IoBuilder used to create mock I/O objects for each connection. io_builder: tokio_test::io::Builder, // Optional error to simulate a connection failure, passed to MockConnection. conn_error: Option>, } impl MockConnector { // Constructor for MockConnector, initializing the IoBuilder and optional error. fn new( io_builder: tokio_test::io::Builder, conn_error: Option>, ) -> Self { MockConnector { io_builder, conn_error, } } } // Implement tower_service::Service for MockConnector to create MockConnection instances. // Takes a hyper::Uri and returns a future resolving to a MockConnection. impl tower_service::Service for MockConnector { type Response = crate::MockConnection; type Error = std::io::Error; type Future = std::pin::Pin< Box< dyn futures_util::Future> + Send, >, >; // Polls the connector to check if it’s ready to handle a request. // Always ready, as we don’t have resource constraints. fn poll_ready( &mut self, _cx: &mut std::task::Context<'_>, ) -> std::task::Poll> { std::task::Poll::Ready(std::result::Result::Ok(())) } // Creates a new MockConnection for the given URI. // Configures the connection based on io_builder and conn_error. fn call(&mut self, _req: hyper::Uri) -> Self::Future { // Clone the IoBuilder to create a fresh mock I/O object. let mut io_builder = self.io_builder.clone(); // Clone the optional connection error for this call. let conn_error = self.conn_error.clone(); // Return a pinned future that creates the MockConnection. Box::pin(async move { // Build the mock I/O object from the IoBuilder. // This defines the I/O behavior (e.g., EOF for empty builder). let mock = io_builder.build(); // Create an atomic flag to track connection failure, initially false. let failed = std::sync::Arc::new(std::sync::atomic::AtomicBool::new(false)); // Set the default error for non-failure cases. // Used when conn_error is None, simulating a clean EOF or connection close. let error = if let Some(ref err) = conn_error { err.clone() } else { std::sync::Arc::new(std::io::Error::new( std::io::ErrorKind::BrokenPipe, "connection closed", )) }; // Create an mpsc channel for signaling unexpected writes, if conn_error is set. // This helps debug cases where writes occur despite an expected failure. let error_tx = if conn_error.is_some() { // Create a channel with a buffer of 1 for signaling writes. let (tx, mut rx) = tokio::sync::mpsc::channel::<()>(1); // Spawn a task to log unexpected writes when received. tokio::spawn(async move { // Wait for a message indicating a write occurred. if rx.recv().await.is_some() { // Log the unexpected write for debugging. eprintln!("Unexpected write occurred"); } }); Some(tx) } else { None }; // If a connection error is provided, mark the connection as failed. // This causes poll_read to return the error immediately. if let Some(err_clone) = conn_error { // Set the failed flag to true atomically. failed.store(true, std::sync::atomic::Ordering::SeqCst); // Log the simulated error for traceability. eprintln!("Simulated conn task error: {}", err_clone); } // Create and return the MockConnection with all configured components. std::result::Result::Ok(crate::MockConnection::new(mock, failed, error, error_tx)) }) } } // Test for connection error propagation with PR #184. // Simulates a connection failure by setting failed=true and returning a custom io::Error. // Verifies the error propagates through hyper’s client as a hyper::Error(Io, ...). #[tokio::test] async fn test_connection_error_propagation_pr184() { // Define the error message for the simulated connection failure. // Reused for creating the error and verifying the result. let err_str = "mock connection failure"; // Create an io::Error with Other kind and the custom message. // Wrapped in Arc for sharing across threads and MockConnection. let io_error = std::sync::Arc::new(std::io::Error::new(std::io::ErrorKind::Other, err_str)); // Create an empty IoBuilder, as no I/O is expected. // The error triggers before any reads or writes occur. let io_builder = tokio_test::io::Builder::new(); // Create a MockConnector with the error to simulate a failed connection. // The error will set failed=true in MockConnection. let connector = crate::MockConnector::new(io_builder, Some(io_error.clone())); // Build the hyper client with TokioExecutor and our connector. // pool_max_idle_per_host(0) disables connection pooling for a fresh connection. let client = hyper_util::client::legacy::Client::builder(hyper_util::rt::TokioExecutor::new()) .pool_max_idle_per_host(0) .build::<_, http_body_util::Empty>(connector); // Build a GET request to a mock URI with custom headers. // Uses mixed-case headers to match your style, ensuring case-insensitive handling. let request = hyper::Request::builder() .uri("http://mocked") .header("hoSt", "mocked") .header("conNection", "close") .body(http_body_util::Empty::::new()) .expect("failed to build request"); // Send the request and capture the result. // Expect it to fail due to the simulated connection error. let result = client.request(request).await; // Extract the error, as the request should fail. let err = result.expect_err("expected request to fail"); // Log the full error for debugging, including its structure. // Matches your detailed logging style for traceability. eprintln!("Actually gotten error is: {:?}", err); // Downcast the error to a hyper::Error to verify its type. // Expect a hyper::Error wrapping an io::Error from MockConnection. let hyper_err = err .source() .and_then(|e| e.downcast_ref::()) .expect("expected hyper::Error"); // Downcast the hyper::Error’s source to an io::Error. // Verify it matches the simulated error from MockConnection. let io_err = hyper_err .source() .and_then(|e| e.downcast_ref::()) .expect(&format!("expected io::Error but got {:?}", hyper_err)); // Verify the io::Error has the expected kind (Other). assert_eq!(io_err.kind(), std::io::ErrorKind::Other); // Verify the io::Error’s message matches err_str. assert_eq!(io_err.to_string(), err_str); } // Test for consistent IncompleteMessage error with or without PR #184. // Simulates a connection that returns EOF immediately, causing hyper’s HTTP/1.1 parser // to fail with IncompleteMessage due to no response data. // Uses MockConnector with conn_error=None to keep failed=false, ensuring EOF behavior. #[tokio::test] async fn test_incomplete_message_error_pr184() { // Create an empty IoBuilder to simulate a connection with no data. // No write or read expectations, so poll_read returns EOF (Poll::Ready(Ok(0))). // This triggers IncompleteMessage in hyper’s parser. let io_builder = tokio_test::io::Builder::new(); // Create MockConnector with no error (conn_error=None). // Keeps failed=false in MockConnection, so poll_read delegates to the mock’s EOF. let connector = crate::MockConnector::new(io_builder, None); // Build the hyper client with TokioExecutor and our connector. // pool_max_idle_per_host(0) disables pooling for a fresh connection. let client = hyper_util::client::legacy::Client::builder(hyper_util::rt::TokioExecutor::new()) .pool_max_idle_per_host(0) .build::<_, http_body_util::Empty>(connector); // Build a GET request to a mock URI with headers. // Uses mixed-case headers to match test_connection_error_propagation_pr184. // Empty body ensures focus on response parsing failure. let request = hyper::Request::builder() .uri("http://mocked") .header("hoSt", "mocked") .header("conNection", "close") .body(http_body_util::Empty::::new()) .expect("failed to build request"); // Send the request and capture the result. // Expect failure due to EOF causing IncompleteMessage. let result = client.request(request).await; // Extract the error, as the request should fail. // Without PR #184, expect ChannelClosed; with PR #184, expect IncompleteMessage. let err = result.expect_err("expected request to fail"); // Log the full error for debugging, matching your style. eprintln!("Actually gotten error is: {:?}", err); // Downcast to hyper::Error to verify the error type. // Expect IncompleteMessage (with PR #184) or ChannelClosed (without). let hyper_err = err .source() .and_then(|e| e.downcast_ref::()) .expect("expected hyper::Error"); // Verify the error is IncompleteMessage when PR #184 is applied. // This checks the parser’s failure due to EOF. assert!( hyper_err.is_incomplete_message(), "expected IncompleteMessage, got {:?}", hyper_err ); // Confirm no io::Error is present, as this is a parsing failure, not I/O. // Ensures we’re testing the correct error type. assert!( hyper_err .source() .and_then(|e| e.downcast_ref::()) .is_none(), "expected no io::Error, got {:?}", hyper_err ); } // Test for a successful HTTP/1.1 connection using a mock connector. // Simulates a server that accepts a request and responds with a 200 OK. // Verifies the client correctly sends the request and receives the response. #[tokio::test] async fn test_successful_connection() { // Define the expected server response: a valid HTTP/1.1 200 OK with no body. let response = b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n"; // Define the expected client request, including headers and CRLF termination. // This ensures the client sends the correct request format. let expected_request = b"GET / HTTP/1.1\r\nhost: mocked\r\nconnection: close\r\n\r\n"; // Create an IoBuilder to simulate the server’s I/O behavior. // Expect the client to write the request and read the response. let mut io_builder = tokio_test::io::Builder::new(); // Configure the IoBuilder to expect the request and provide the response. io_builder.write(expected_request).read(response); // Finalize the IoBuilder for use in the connector. let io_builder = io_builder; // Create a MockConnector with no error (conn_error=None). // Ensures failed=false, allowing normal I/O operations. let connector = crate::MockConnector::new(io_builder, None); // Build the hyper client with TokioExecutor and our connector. // pool_max_idle_per_host(0) ensures a fresh connection. let client = hyper_util::client::legacy::Client::builder(hyper_util::rt::TokioExecutor::new()) .pool_max_idle_per_host(0) .build::<_, http_body_util::Empty>(connector); // Build a GET request to a mock URI with headers. // Uses mixed-case headers to match your style and verify case-insensitive handling. let request = hyper::Request::builder() .uri("http://mocked") .header("hOst", "mocked") .header("coNnection", "close") .body(http_body_util::Empty::::new()) .expect("failed to build request"); // Send the request and capture the response. // Expect a successful response due to the configured IoBuilder. let response = client .request(request) .await .expect("request should succeed"); // Verify the response status is 200 OK. assert_eq!(response.status(), 200); } hyper-util-0.1.19/tests/proxy.rs000064400000000000000000000413541046102023000147160ustar 00000000000000use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tokio::net::{TcpListener, TcpStream}; use tower_service::Service; use hyper_util::client::legacy::connect::proxy::{SocksV4, SocksV5, Tunnel}; use hyper_util::client::legacy::connect::HttpConnector; #[cfg(not(miri))] #[tokio::test] async fn test_tunnel_works() { let tcp = TcpListener::bind("127.0.0.1:0").await.expect("bind"); let addr = tcp.local_addr().expect("local_addr"); let proxy_dst = format!("http://{addr}").parse().expect("uri"); let mut connector = Tunnel::new(proxy_dst, HttpConnector::new()); let t1 = tokio::spawn(async move { let _conn = connector .call("https://hyper.rs".parse().unwrap()) .await .expect("tunnel"); }); let t2 = tokio::spawn(async move { let (mut io, _) = tcp.accept().await.expect("accept"); let mut buf = [0u8; 64]; let n = io.read(&mut buf).await.expect("read 1"); assert_eq!( &buf[..n], b"CONNECT hyper.rs:443 HTTP/1.1\r\nHost: hyper.rs:443\r\n\r\n" ); io.write_all(b"HTTP/1.1 200 OK\r\n\r\n") .await .expect("write 1"); }); t1.await.expect("task 1"); t2.await.expect("task 2"); } #[cfg(not(miri))] #[tokio::test] async fn test_socks_v5_without_auth_works() { let proxy_tcp = TcpListener::bind("127.0.0.1:0").await.expect("bind"); let proxy_addr = proxy_tcp.local_addr().expect("local_addr"); let proxy_dst = format!("http://{proxy_addr}").parse().expect("uri"); let target_tcp = TcpListener::bind("127.0.0.1:0").await.expect("bind"); let target_addr = target_tcp.local_addr().expect("local_addr"); let target_dst = format!("http://{target_addr}").parse().expect("uri"); let mut connector = SocksV5::new(proxy_dst, HttpConnector::new()); // Client // // Will use `SocksV5` to establish proxy tunnel. // Will send "Hello World!" to the target and receive "Goodbye!" back. let t1 = tokio::spawn(async move { let conn = connector.call(target_dst).await.expect("tunnel"); let mut tcp = conn.into_inner(); tcp.write_all(b"Hello World!").await.expect("write 1"); let mut buf = [0u8; 64]; let n = tcp.read(&mut buf).await.expect("read 1"); assert_eq!(&buf[..n], b"Goodbye!"); }); // Proxy // // Will receive CONNECT command from client. // Will connect to target and success code back to client. // Will blindly tunnel between client and target. let t2 = tokio::spawn(async move { let (mut to_client, _) = proxy_tcp.accept().await.expect("accept"); let mut buf = [0u8; 513]; // negotiation req/res let n = to_client.read(&mut buf).await.expect("read 1"); assert_eq!(&buf[..n], [0x05, 0x01, 0x00]); to_client.write_all(&[0x05, 0x00]).await.expect("write 1"); // command req/rs let [p1, p2] = target_addr.port().to_be_bytes(); let [ip1, ip2, ip3, ip4] = [0x7f, 0x00, 0x00, 0x01]; let message = [0x05, 0x01, 0x00, 0x01, ip1, ip2, ip3, ip4, p1, p2]; let n = to_client.read(&mut buf).await.expect("read 2"); assert_eq!(&buf[..n], message); let mut to_target = TcpStream::connect(target_addr).await.expect("connect"); let message = [0x05, 0x00, 0x00, 0x01, ip1, ip2, ip3, ip4, p1, p2]; to_client.write_all(&message).await.expect("write 2"); let (from_client, from_target) = tokio::io::copy_bidirectional(&mut to_client, &mut to_target) .await .expect("proxy"); assert_eq!(from_client, 12); assert_eq!(from_target, 8) }); // Target server // // Will accept connection from proxy server // Will receive "Hello World!" from the client and return "Goodbye!" let t3 = tokio::spawn(async move { let (mut io, _) = target_tcp.accept().await.expect("accept"); let mut buf = [0u8; 64]; let n = io.read(&mut buf).await.expect("read 1"); assert_eq!(&buf[..n], b"Hello World!"); io.write_all(b"Goodbye!").await.expect("write 1"); }); t1.await.expect("task - client"); t2.await.expect("task - proxy"); t3.await.expect("task - target"); } #[cfg(not(miri))] #[tokio::test] async fn test_socks_v5_with_auth_works() { let proxy_tcp = TcpListener::bind("127.0.0.1:0").await.expect("bind"); let proxy_addr = proxy_tcp.local_addr().expect("local_addr"); let proxy_dst = format!("http://{proxy_addr}").parse().expect("uri"); let target_tcp = TcpListener::bind("127.0.0.1:0").await.expect("bind"); let target_addr = target_tcp.local_addr().expect("local_addr"); let target_dst = format!("http://{target_addr}").parse().expect("uri"); let mut connector = SocksV5::new(proxy_dst, HttpConnector::new()).with_auth("user".into(), "pass".into()); // Client // // Will use `SocksV5` to establish proxy tunnel. // Will send "Hello World!" to the target and receive "Goodbye!" back. let t1 = tokio::spawn(async move { let conn = connector.call(target_dst).await.expect("tunnel"); let mut tcp = conn.into_inner(); tcp.write_all(b"Hello World!").await.expect("write 1"); let mut buf = [0u8; 64]; let n = tcp.read(&mut buf).await.expect("read 1"); assert_eq!(&buf[..n], b"Goodbye!"); }); // Proxy // // Will receive CONNECT command from client. // Will connect to target and success code back to client. // Will blindly tunnel between client and target. let t2 = tokio::spawn(async move { let (mut to_client, _) = proxy_tcp.accept().await.expect("accept"); let mut buf = [0u8; 513]; // negotiation req/res let n = to_client.read(&mut buf).await.expect("read 1"); assert_eq!(&buf[..n], [0x05, 0x01, 0x02]); to_client.write_all(&[0x05, 0x02]).await.expect("write 1"); // auth req/res let n = to_client.read(&mut buf).await.expect("read 2"); let [u1, u2, u3, u4] = b"user"; let [p1, p2, p3, p4] = b"pass"; let message = [0x01, 0x04, *u1, *u2, *u3, *u4, 0x04, *p1, *p2, *p3, *p4]; assert_eq!(&buf[..n], message); to_client.write_all(&[0x01, 0x00]).await.expect("write 2"); // command req/res let n = to_client.read(&mut buf).await.expect("read 3"); let [p1, p2] = target_addr.port().to_be_bytes(); let [ip1, ip2, ip3, ip4] = [0x7f, 0x00, 0x00, 0x01]; let message = [0x05, 0x01, 0x00, 0x01, ip1, ip2, ip3, ip4, p1, p2]; assert_eq!(&buf[..n], message); let mut to_target = TcpStream::connect(target_addr).await.expect("connect"); let message = [0x05, 0x00, 0x00, 0x01, ip1, ip2, ip3, ip4, p1, p2]; to_client.write_all(&message).await.expect("write 3"); let (from_client, from_target) = tokio::io::copy_bidirectional(&mut to_client, &mut to_target) .await .expect("proxy"); assert_eq!(from_client, 12); assert_eq!(from_target, 8) }); // Target server // // Will accept connection from proxy server // Will receive "Hello World!" from the client and return "Goodbye!" let t3 = tokio::spawn(async move { let (mut io, _) = target_tcp.accept().await.expect("accept"); let mut buf = [0u8; 64]; let n = io.read(&mut buf).await.expect("read 1"); assert_eq!(&buf[..n], b"Hello World!"); io.write_all(b"Goodbye!").await.expect("write 1"); }); t1.await.expect("task - client"); t2.await.expect("task - proxy"); t3.await.expect("task - target"); } #[cfg(not(miri))] #[tokio::test] async fn test_socks_v5_with_server_resolved_domain_works() { let proxy_tcp = TcpListener::bind("127.0.0.1:0").await.expect("bind"); let proxy_addr = proxy_tcp.local_addr().expect("local_addr"); let proxy_addr = format!("http://{proxy_addr}").parse().expect("uri"); let mut connector = SocksV5::new(proxy_addr, HttpConnector::new()) .with_auth("user".into(), "pass".into()) .local_dns(false); // Client // // Will use `SocksV5` to establish proxy tunnel. // Will send "Hello World!" to the target and receive "Goodbye!" back. let t1 = tokio::spawn(async move { let _conn = connector .call("https://hyper.rs:443".try_into().unwrap()) .await .expect("tunnel"); }); // Proxy // // Will receive CONNECT command from client. // Will connect to target and success code back to client. // Will blindly tunnel between client and target. let t2 = tokio::spawn(async move { let (mut to_client, _) = proxy_tcp.accept().await.expect("accept"); let mut buf = [0u8; 513]; // negotiation req/res let n = to_client.read(&mut buf).await.expect("read 1"); assert_eq!(&buf[..n], [0x05, 0x01, 0x02]); to_client.write_all(&[0x05, 0x02]).await.expect("write 1"); // auth req/res let n = to_client.read(&mut buf).await.expect("read 2"); let [u1, u2, u3, u4] = b"user"; let [p1, p2, p3, p4] = b"pass"; let message = [0x01, 0x04, *u1, *u2, *u3, *u4, 0x04, *p1, *p2, *p3, *p4]; assert_eq!(&buf[..n], message); to_client.write_all(&[0x01, 0x00]).await.expect("write 2"); // command req/res let n = to_client.read(&mut buf).await.expect("read 3"); let host = "hyper.rs"; let port: u16 = 443; let mut message = vec![0x05, 0x01, 0x00, 0x03, host.len() as u8]; message.extend(host.bytes()); message.extend(port.to_be_bytes()); assert_eq!(&buf[..n], message); let mut message = vec![0x05, 0x00, 0x00, 0x03, host.len() as u8]; message.extend(host.bytes()); message.extend(port.to_be_bytes()); to_client.write_all(&message).await.expect("write 3"); }); t1.await.expect("task - client"); t2.await.expect("task - proxy"); } #[cfg(not(miri))] #[tokio::test] async fn test_socks_v5_with_locally_resolved_domain_works() { let proxy_tcp = TcpListener::bind("127.0.0.1:0").await.expect("bind"); let proxy_addr = proxy_tcp.local_addr().expect("local_addr"); let proxy_addr = format!("http://{proxy_addr}").parse().expect("uri"); let mut connector = SocksV5::new(proxy_addr, HttpConnector::new()) .with_auth("user".into(), "pass".into()) .local_dns(true); // Client // // Will use `SocksV5` to establish proxy tunnel. // Will send "Hello World!" to the target and receive "Goodbye!" back. let t1 = tokio::spawn(async move { let _conn = connector .call("https://hyper.rs:443".try_into().unwrap()) .await .expect("tunnel"); }); // Proxy // // Will receive CONNECT command from client. // Will connect to target and success code back to client. // Will blindly tunnel between client and target. let t2 = tokio::spawn(async move { let (mut to_client, _) = proxy_tcp.accept().await.expect("accept"); let mut buf = [0u8; 513]; // negotiation req/res let n = to_client.read(&mut buf).await.expect("read 1"); assert_eq!(&buf[..n], [0x05, 0x01, 0x02]); to_client.write_all(&[0x05, 0x02]).await.expect("write 1"); // auth req/res let n = to_client.read(&mut buf).await.expect("read 2"); let [u1, u2, u3, u4] = b"user"; let [p1, p2, p3, p4] = b"pass"; let message = [0x01, 0x04, *u1, *u2, *u3, *u4, 0x04, *p1, *p2, *p3, *p4]; assert_eq!(&buf[..n], message); to_client.write_all(&[0x01, 0x00]).await.expect("write 2"); // command req/res let n = to_client.read(&mut buf).await.expect("read 3"); let message = [0x05, 0x01, 0x00]; assert_eq!(&buf[..3], message); assert!(buf[3] == 0x01 || buf[3] == 0x04); // IPv4 or IPv6 assert_eq!(n, 4 + 4 * (buf[3] as usize) + 2); let message = vec![0x05, 0x00, 0x00, 0x01, 0, 0, 0, 0, 0, 0]; to_client.write_all(&message).await.expect("write 3"); }); t1.await.expect("task - client"); t2.await.expect("task - proxy"); } #[cfg(not(miri))] #[tokio::test] async fn test_socks_v4_works() { let proxy_tcp = TcpListener::bind("127.0.0.1:0").await.expect("bind"); let proxy_addr = proxy_tcp.local_addr().expect("local_addr"); let proxy_dst = format!("http://{proxy_addr}").parse().expect("uri"); let target_tcp = TcpListener::bind("127.0.0.1:0").await.expect("bind"); let target_addr = target_tcp.local_addr().expect("local_addr"); let target_dst = format!("http://{target_addr}").parse().expect("uri"); let mut connector = SocksV4::new(proxy_dst, HttpConnector::new()); // Client // // Will use `SocksV4` to establish proxy tunnel. // Will send "Hello World!" to the target and receive "Goodbye!" back. let t1 = tokio::spawn(async move { let conn = connector.call(target_dst).await.expect("tunnel"); let mut tcp = conn.into_inner(); tcp.write_all(b"Hello World!").await.expect("write 1"); let mut buf = [0u8; 64]; let n = tcp.read(&mut buf).await.expect("read 1"); assert_eq!(&buf[..n], b"Goodbye!"); }); // Proxy // // Will receive CONNECT command from client. // Will connect to target and success code back to client. // Will blindly tunnel between client and target. let t2 = tokio::spawn(async move { let (mut to_client, _) = proxy_tcp.accept().await.expect("accept"); let mut buf = [0u8; 512]; let [p1, p2] = target_addr.port().to_be_bytes(); let [ip1, ip2, ip3, ip4] = [127, 0, 0, 1]; let message = [4, 0x01, p1, p2, ip1, ip2, ip3, ip4, 0, 0]; let n = to_client.read(&mut buf).await.expect("read"); assert_eq!(&buf[..n], message); let mut to_target = TcpStream::connect(target_addr).await.expect("connect"); let message = [0, 90, p1, p2, ip1, ip2, ip3, ip4]; to_client.write_all(&message).await.expect("write"); let (from_client, from_target) = tokio::io::copy_bidirectional(&mut to_client, &mut to_target) .await .expect("proxy"); assert_eq!(from_client, 12); assert_eq!(from_target, 8) }); // Target server // // Will accept connection from proxy server // Will receive "Hello World!" from the client and return "Goodbye!" let t3 = tokio::spawn(async move { let (mut io, _) = target_tcp.accept().await.expect("accept"); let mut buf = [0u8; 64]; let n = io.read(&mut buf).await.expect("read 1"); assert_eq!(&buf[..n], b"Hello World!"); io.write_all(b"Goodbye!").await.expect("write 1"); }); t1.await.expect("task - client"); t2.await.expect("task - proxy"); t3.await.expect("task - target"); } #[cfg(not(miri))] #[tokio::test] async fn test_socks_v5_optimistic_works() { let proxy_tcp = TcpListener::bind("127.0.0.1:0").await.expect("bind"); let proxy_addr = proxy_tcp.local_addr().expect("local_addr"); let proxy_dst = format!("http://{proxy_addr}").parse().expect("uri"); let target_addr = std::net::SocketAddr::new([127, 0, 0, 1].into(), 1234); let target_dst = format!("http://{target_addr}").parse().expect("uri"); let mut connector = SocksV5::new(proxy_dst, HttpConnector::new()) .with_auth("ABC".into(), "XYZ".into()) .send_optimistically(true); // Client // // Will use `SocksV5` to establish proxy tunnel. // Will send "Hello World!" to the target and receive "Goodbye!" back. let t1 = tokio::spawn(async move { let _ = connector.call(target_dst).await.expect("tunnel"); }); // Proxy // // Will receive SOCKS handshake from client. // Will connect to target and success code back to client. // Will blindly tunnel between client and target. let t2 = tokio::spawn(async move { let (mut to_client, _) = proxy_tcp.accept().await.expect("accept"); let [p1, p2] = target_addr.port().to_be_bytes(); let mut buf = [0; 22]; let request = vec![ 5, 1, 2, // Negotiation 1, 3, 65, 66, 67, 3, 88, 89, 90, // Auth ("ABC"/"XYZ") 5, 1, 0, 1, 127, 0, 0, 1, p1, p2, // Reply ]; let response = vec![ 5, 2, // Negotiation, 1, 0, // Auth, 5, 0, 0, 1, 127, 0, 0, 1, p1, p2, // Reply ]; // Accept all handshake messages to_client.read_exact(&mut buf).await.expect("read"); assert_eq!(request.as_slice(), buf); // Send all handshake messages back to_client .write_all(response.as_slice()) .await .expect("write"); to_client.flush().await.expect("flush"); }); t1.await.expect("task - client"); t2.await.expect("task - proxy"); } hyper-util-0.1.19/tests/test_utils/mod.rs000064400000000000000000000113161046102023000165060ustar 00000000000000use std::pin::Pin; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use futures_channel::mpsc; use futures_util::task::{Context, Poll}; use futures_util::Future; use futures_util::TryFutureExt; use hyper::Uri; use tokio::io::{self, AsyncRead, AsyncWrite, ReadBuf}; use tokio::net::TcpStream; use hyper::rt::ReadBufCursor; use hyper_util::client::legacy::connect::HttpConnector; use hyper_util::client::legacy::connect::{Connected, Connection}; use hyper_util::rt::TokioIo; #[derive(Clone)] pub struct DebugConnector { pub http: HttpConnector, pub closes: mpsc::Sender<()>, pub connects: Arc, pub is_proxy: bool, pub alpn_h2: bool, } impl DebugConnector { pub fn new() -> DebugConnector { let http = HttpConnector::new(); let (tx, _) = mpsc::channel(10); DebugConnector::with_http_and_closes(http, tx) } pub fn with_http_and_closes(http: HttpConnector, closes: mpsc::Sender<()>) -> DebugConnector { DebugConnector { http, closes, connects: Arc::new(AtomicUsize::new(0)), is_proxy: false, alpn_h2: false, } } pub fn proxy(mut self) -> Self { self.is_proxy = true; self } } impl tower_service::Service for DebugConnector { type Response = DebugStream; type Error = >::Error; type Future = Pin> + Send>>; fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { // don't forget to check inner service is ready :) tower_service::Service::::poll_ready(&mut self.http, cx) } fn call(&mut self, dst: Uri) -> Self::Future { self.connects.fetch_add(1, Ordering::SeqCst); let closes = self.closes.clone(); let is_proxy = self.is_proxy; let is_alpn_h2 = self.alpn_h2; Box::pin(self.http.call(dst).map_ok(move |tcp| DebugStream { tcp, on_drop: closes, is_alpn_h2, is_proxy, })) } } pub struct DebugStream { tcp: TokioIo, on_drop: mpsc::Sender<()>, is_alpn_h2: bool, is_proxy: bool, } impl Drop for DebugStream { fn drop(&mut self) { let _ = self.on_drop.try_send(()); } } impl Connection for DebugStream { fn connected(&self) -> Connected { let connected = self.tcp.connected().proxy(self.is_proxy); if self.is_alpn_h2 { connected.negotiated_h2() } else { connected } } } impl hyper::rt::Read for DebugStream { fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: ReadBufCursor<'_>, ) -> Poll> { hyper::rt::Read::poll_read(Pin::new(&mut self.tcp), cx, buf) } } impl hyper::rt::Write for DebugStream { fn poll_write( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { hyper::rt::Write::poll_write(Pin::new(&mut self.tcp), cx, buf) } fn poll_flush( mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll> { hyper::rt::Write::poll_flush(Pin::new(&mut self.tcp), cx) } fn poll_shutdown( mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll> { hyper::rt::Write::poll_shutdown(Pin::new(&mut self.tcp), cx) } fn is_write_vectored(&self) -> bool { hyper::rt::Write::is_write_vectored(&self.tcp) } fn poll_write_vectored( mut self: Pin<&mut Self>, cx: &mut Context<'_>, bufs: &[std::io::IoSlice<'_>], ) -> Poll> { hyper::rt::Write::poll_write_vectored(Pin::new(&mut self.tcp), cx, bufs) } } impl AsyncWrite for DebugStream { fn poll_shutdown( mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll> { Pin::new(self.tcp.inner_mut()).poll_shutdown(cx) } fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { Pin::new(self.tcp.inner_mut()).poll_flush(cx) } fn poll_write( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { Pin::new(self.tcp.inner_mut()).poll_write(cx, buf) } } impl AsyncRead for DebugStream { fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll> { Pin::new(self.tcp.inner_mut()).poll_read(cx, buf) } }