insta-1.46.1/.cargo_vcs_info.json0000644000000001430000000000100122410ustar { "git": { "sha1": "8a5b77531f89bc78d00cab17f2ac8b2c69ceadab" }, "path_in_vcs": "insta" }insta-1.46.1/Cargo.lock0000644000000646350000000000100102340ustar # This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 3 [[package]] name = "aho-corasick" version = "0.7.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4f55bd91a0978cbfd91c457a164bab8b4001c833b7f323132c0a4e1922dd44e" dependencies = [ "memchr", ] [[package]] name = "bitflags" version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" dependencies = [ "serde_core", ] [[package]] name = "block-buffer" version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" dependencies = [ "generic-array", ] [[package]] name = "bstr" version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223" dependencies = [ "lazy_static", "memchr", "regex-automata 0.1.10", "serde", ] [[package]] name = "bstr" version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "40723b8fb387abc38f4f4a37c09073622e41dd12327033091ef8950659e6dc0c" dependencies = [ "memchr", "regex-automata 0.4.7", "serde", ] [[package]] name = "cc" version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" [[package]] name = "cfg-if" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "clap" version = "4.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "906f7fe1da4185b7a282b2bc90172a496f9def1aca4545fe7526810741591e14" dependencies = [ "clap_builder", "clap_derive", "once_cell", ] [[package]] name = "clap_builder" version = "4.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "351f9ad9688141ed83dfd8f5fb998a06225ef444b48ff4dc43de6d409b7fd10b" dependencies = [ "bitflags 1.3.2", "clap_lex", "is-terminal", "strsim", "termcolor", ] [[package]] name = "clap_derive" version = "4.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81d7dc0031c3a59a04fc2ba395c8e2dd463cba1859275f065d225f6122221b45" dependencies = [ "heck", "proc-macro2", "quote", "syn", ] [[package]] name = "clap_lex" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a2dd5a6fe8c6e3502f568a6353e5273bbb15193ad9a89e457b9970798efbea1" [[package]] name = "console" version = "0.15.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9b6515d269224923b26b5febea2ed42b2d5f2ce37284a4dd670fedd6cb8347a" dependencies = [ "encode_unicode", "lazy_static", "libc", "windows-sys 0.42.0", ] [[package]] name = "cpufeatures" version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" dependencies = [ "libc", ] [[package]] name = "crypto-common" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", "typenum", ] [[package]] name = "csv" version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22813a6dc45b335f9bade10bf7271dc477e81113e89eb251a0bc2a8a81c536e1" dependencies = [ "bstr 0.2.17", "csv-core", "itoa", "ryu", "serde", ] [[package]] name = "csv-core" version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5efa2b3d7902f4b634a20cae3c9c4e6209dc4779feb6863329607560143efa70" dependencies = [ "memchr", ] [[package]] name = "digest" version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer", "crypto-common", ] [[package]] name = "encode_unicode" version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" [[package]] name = "equivalent" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" dependencies = [ "errno-dragonfly", "libc", "windows-sys 0.48.0", ] [[package]] name = "errno-dragonfly" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" dependencies = [ "cc", "libc", ] [[package]] name = "fastrand" version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" dependencies = [ "instant", ] [[package]] name = "fnv" version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "generic-array" version = "0.14.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" dependencies = [ "typenum", "version_check", ] [[package]] name = "globset" version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0a1e17342619edbc21a964c2afbeb6c820c6a2560032872f397bb97ea127bd0a" dependencies = [ "aho-corasick", "bstr 0.2.17", "fnv", "log", "regex", ] [[package]] name = "hashbrown" version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" [[package]] name = "heck" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "hermit-abi" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" [[package]] name = "indexmap" version = "2.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ad4bb2b565bca0645f4d68c5c9af97fba094e9791da685bf83cb5f3ce74acf2" dependencies = [ "equivalent", "hashbrown", ] [[package]] name = "insta" version = "1.46.1" dependencies = [ "clap", "console", "csv", "globset", "once_cell", "pest", "pest_derive", "regex", "ron", "rustc_version", "serde", "similar", "similar-asserts", "tempfile", "toml_edit", "toml_writer", "walkdir", ] [[package]] name = "instant" version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ "cfg-if", ] [[package]] name = "io-lifetimes" version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ "hermit-abi", "libc", "windows-sys 0.48.0", ] [[package]] name = "is-terminal" version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f23ff5ef2b80d608d61efee834934d862cd92461afc0560dedf493e4c033738b" dependencies = [ "hermit-abi", "libc", "windows-sys 0.52.0", ] [[package]] name = "itoa" version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" [[package]] name = "lazy_static" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" version = "0.2.174" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" [[package]] name = "linux-raw-sys" version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "log" version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" dependencies = [ "cfg-if", ] [[package]] name = "memchr" version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] name = "once_cell" version = "1.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" [[package]] name = "pest" version = "2.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd53dff83f26735fdc1ca837098ccf133605d794cdae66acfc2bfac3ec809d95" dependencies = [ "memchr", "thiserror", "ucd-trie", ] [[package]] name = "pest_derive" version = "2.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a548d2beca6773b1c244554d36fcf8548a8a58e74156968211567250e48e49a" dependencies = [ "pest", "pest_generator", ] [[package]] name = "pest_generator" version = "2.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c93a82e8d145725dcbaf44e5ea887c8a869efdcc28706df2d08c69e17077183" dependencies = [ "pest", "pest_meta", "proc-macro2", "quote", "syn", ] [[package]] name = "pest_meta" version = "2.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a941429fea7e08bedec25e4f6785b6ffaacc6b755da98df5ef3e7dcf4a124c4f" dependencies = [ "once_cell", "pest", "sha2", ] [[package]] name = "proc-macro2" version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84" dependencies = [ "unicode-ident", ] [[package]] name = "quote" version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" dependencies = [ "proc-macro2", ] [[package]] name = "redox_syscall" version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" dependencies = [ "bitflags 1.3.2", ] [[package]] name = "regex" version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b" dependencies = [ "aho-corasick", "memchr", "regex-syntax", ] [[package]] name = "regex-automata" version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" [[package]] name = "regex-automata" version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" [[package]] name = "regex-syntax" version = "0.6.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244" [[package]] name = "ron" version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd490c5b18261893f14449cbd28cb9c0b637aebf161cd77900bfdedaff21ec32" dependencies = [ "bitflags 2.10.0", "once_cell", "serde", "serde_derive", "typeid", "unicode-ident", ] [[package]] name = "rustc_version" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ "semver", ] [[package]] name = "rustix" version = "0.37.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fea8ca367a3a01fe35e6943c400addf443c0f57670e6ec51196f71a4b8762dd2" dependencies = [ "bitflags 1.3.2", "errno", "io-lifetimes", "libc", "linux-raw-sys", "windows-sys 0.48.0", ] [[package]] name = "ryu" version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09" [[package]] name = "same-file" version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" dependencies = [ "winapi-util", ] [[package]] name = "semver" version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ad977052201c6de01a8ef2aa3378c4bd23217a056337d1d6da40468d267a4fb0" [[package]] name = "serde" version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" dependencies = [ "serde_core", "serde_derive", ] [[package]] name = "serde_core" version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "serde_spanned" version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e24345aa0fe688594e73770a5f6d1b216508b4f93484c0026d521acd30134392" dependencies = [ "serde_core", ] [[package]] name = "sha2" version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ "cfg-if", "cpufeatures", "digest", ] [[package]] name = "similar" version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1de1d4f81173b03af4c0cbed3c898f6bff5b870e4a7f5d6f4057d62a7a4b686e" dependencies = [ "bstr 1.10.0", "unicode-segmentation", ] [[package]] name = "similar-asserts" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e041bb827d1bfca18f213411d51b665309f1afb37a04a5d1464530e13779fc0f" dependencies = [ "console", "similar", ] [[package]] name = "strsim" version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "syn" version = "2.0.100" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "tempfile" version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9fbec84f381d5795b08656e4912bec604d162bff9291d6189a78f4c8ab87998" dependencies = [ "cfg-if", "fastrand", "redox_syscall", "rustix", "windows-sys 0.45.0", ] [[package]] name = "termcolor" version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" dependencies = [ "winapi-util", ] [[package]] name = "thiserror" version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "978c9a314bd8dc99be594bc3c175faaa9794be04a5a5e153caba6915336cebac" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "toml_datetime" version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f2cdb639ebbc97961c51720f858597f7f24c4fc295327923af55b74c3c724533" dependencies = [ "serde_core", ] [[package]] name = "toml_edit" version = "0.23.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6485ef6d0d9b5d0ec17244ff7eb05310113c3f316f2d14200d4de56b3cb98f8d" dependencies = [ "indexmap", "serde_core", "serde_spanned", "toml_datetime", "toml_parser", "toml_writer", "winnow", ] [[package]] name = "toml_parser" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0cbe268d35bdb4bb5a56a2de88d0ad0eb70af5384a99d648cd4b3d04039800e" dependencies = [ "winnow", ] [[package]] name = "toml_writer" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df8b2b54733674ad286d16267dcfc7a71ed5c776e4ac7aa3c3e2561f7c637bf2" [[package]] name = "typeid" version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc7d623258602320d5c55d1bc22793b57daff0ec7efc270ea7d55ce1d5f5471c" [[package]] name = "typenum" version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" [[package]] name = "ucd-trie" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e79c4d996edb816c91e4308506774452e55e95c3c9de07b6729e17e15a5ef81" [[package]] name = "unicode-ident" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcc811dc4066ac62f84f11307873c4850cb653bfa9b1719cee2bd2204a4bc5dd" [[package]] name = "unicode-segmentation" version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fdbf052a0783de01e944a6ce7a8cb939e295b1e7be835a1112c3b9a7f047a5a" [[package]] name = "version_check" version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "walkdir" version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56" dependencies = [ "same-file", "winapi", "winapi-util", ] [[package]] name = "winapi" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" dependencies = [ "winapi-i686-pc-windows-gnu", "winapi-x86_64-pc-windows-gnu", ] [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" dependencies = [ "winapi", ] [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-sys" version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" dependencies = [ "windows_aarch64_gnullvm 0.42.2", "windows_aarch64_msvc 0.42.2", "windows_i686_gnu 0.42.2", "windows_i686_msvc 0.42.2", "windows_x86_64_gnu 0.42.2", "windows_x86_64_gnullvm 0.42.2", "windows_x86_64_msvc 0.42.2", ] [[package]] name = "windows-sys" version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" dependencies = [ "windows-targets 0.42.2", ] [[package]] name = "windows-sys" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ "windows-targets 0.48.0", ] [[package]] name = "windows-sys" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ "windows-targets 0.52.6", ] [[package]] name = "windows-targets" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" dependencies = [ "windows_aarch64_gnullvm 0.42.2", "windows_aarch64_msvc 0.42.2", "windows_i686_gnu 0.42.2", "windows_i686_msvc 0.42.2", "windows_x86_64_gnu 0.42.2", "windows_x86_64_gnullvm 0.42.2", "windows_x86_64_msvc 0.42.2", ] [[package]] name = "windows-targets" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" dependencies = [ "windows_aarch64_gnullvm 0.48.0", "windows_aarch64_msvc 0.48.0", "windows_i686_gnu 0.48.0", "windows_i686_msvc 0.48.0", "windows_x86_64_gnu 0.48.0", "windows_x86_64_gnullvm 0.48.0", "windows_x86_64_msvc 0.48.0", ] [[package]] name = "windows-targets" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ "windows_aarch64_gnullvm 0.52.6", "windows_aarch64_msvc 0.52.6", "windows_i686_gnu 0.52.6", "windows_i686_gnullvm", "windows_i686_msvc 0.52.6", "windows_x86_64_gnu 0.52.6", "windows_x86_64_gnullvm 0.52.6", "windows_x86_64_msvc 0.52.6", ] [[package]] name = "windows_aarch64_gnullvm" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" [[package]] name = "windows_aarch64_gnullvm" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" [[package]] name = "windows_aarch64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" [[package]] name = "windows_aarch64_msvc" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" [[package]] name = "windows_aarch64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_i686_gnu" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" [[package]] name = "windows_i686_gnu" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" [[package]] name = "windows_i686_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" [[package]] name = "windows_i686_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" [[package]] name = "windows_i686_msvc" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" [[package]] name = "windows_i686_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_x86_64_gnu" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" [[package]] name = "windows_x86_64_gnu" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" [[package]] name = "windows_x86_64_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnullvm" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" [[package]] name = "windows_x86_64_gnullvm" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" [[package]] name = "windows_x86_64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" [[package]] name = "windows_x86_64_msvc" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" [[package]] name = "windows_x86_64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" version = "0.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21a0236b59786fed61e2a80582dd500fe61f18b5dca67a4a067d0bc9039339cf" dependencies = [ "memchr", ] insta-1.46.1/Cargo.toml0000644000000062730000000000100102510ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" rust-version = "1.64.0" name = "insta" version = "1.46.1" authors = ["Armin Ronacher "] build = false exclude = ["assets/*"] autolib = false autobins = false autoexamples = false autotests = false autobenches = false description = "A snapshot testing library for Rust" homepage = "https://insta.rs/" readme = "README.md" keywords = [ "snapshot", "testing", "jest", "approval", ] categories = ["development-tools::testing"] license = "Apache-2.0" repository = "https://github.com/mitsuhiko/insta" [package.metadata.docs.rs] all-features = true rustdoc-args = [ "--cfg", "docsrs", ] [features] _cargo_insta_internal = ["clap"] colors = ["console"] csv = [ "dep:csv", "serde", ] default = ["colors"] filters = ["regex"] glob = [ "walkdir", "globset", ] json = ["serde"] redactions = [ "pest", "pest_derive", "serde", ] ron = [ "dep:ron", "serde", ] toml = [ "dep:toml_edit", "dep:toml_writer", "serde", ] yaml = ["serde"] [lib] name = "insta" path = "src/lib.rs" [[test]] name = "test_advanced" path = "tests/test_advanced.rs" [[test]] name = "test_basic" path = "tests/test_basic.rs" [[test]] name = "test_binary" path = "tests/test_binary.rs" [[test]] name = "test_glob" path = "tests/test_glob.rs" [[test]] name = "test_inline" path = "tests/test_inline.rs" [[test]] name = "test_redaction" path = "tests/test_redaction.rs" [[test]] name = "test_settings" path = "tests/test_settings.rs" [[test]] name = "test_toml" path = "tests/test_toml.rs" [dependencies.clap] version = "4.1" features = [ "derive", "env", ] optional = true [dependencies.console] version = "0.15.4" optional = true default-features = false [dependencies.csv] version = "1.1.6" optional = true [dependencies.globset] version = ">= 0.4.6, < 0.4.17" optional = true [dependencies.once_cell] version = "1.20.2" [dependencies.pest] version = "2.1.3" optional = true [dependencies.pest_derive] version = "2.1.0" optional = true [dependencies.regex] version = "1.6.0" features = [ "std", "unicode", ] optional = true default-features = false [dependencies.ron] version = "0.12.0" optional = true [dependencies.serde] version = "1.0.117" optional = true [dependencies.similar] version = "2.1.0" features = ["inline"] [dependencies.tempfile] version = "3" [dependencies.toml_edit] version = "0.23.0" features = [ "serde", "parse", "display", ] optional = true [dependencies.toml_writer] version = "1" optional = true [dependencies.walkdir] version = "2.3.1" optional = true [dev-dependencies.rustc_version] version = "0.4.0" [dev-dependencies.serde] version = "1.0.117" features = ["derive"] [dev-dependencies.similar-asserts] version = "1.4.2" insta-1.46.1/Cargo.toml.orig000064400000000000000000000040161046102023000137230ustar 00000000000000[package] name = "insta" version = "1.46.1" license = "Apache-2.0" authors = ["Armin Ronacher "] description = "A snapshot testing library for Rust" edition = "2021" rust-version = "1.64.0" homepage = "https://insta.rs/" repository = "https://github.com/mitsuhiko/insta" keywords = ["snapshot", "testing", "jest", "approval"] categories = ["development-tools::testing"] readme = "README.md" exclude = ["assets/*"] [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [features] default = ["colors"] # when the redactions feature is enabled values can be redacted in serialized # snapshots. redactions = ["pest", "pest_derive", "serde"] # Enables support for running filters on snapshot filters = ["regex"] # Glob support glob = ["walkdir", "globset"] # Color support colors = ["console"] # Serialization formats csv = ["dep:csv", "serde"] json = ["serde"] ron = ["dep:ron", "serde"] toml = ["dep:toml_edit", "dep:toml_writer", "serde"] yaml = ["serde"] # internal feature exclusive to cargo-insta _cargo_insta_internal = ["clap"] [dependencies] csv = { version = "1.1.6", optional = true } console = { version = "0.15.4", optional = true, default-features = false } pest = { version = "2.1.3", optional = true } pest_derive = { version = "2.1.0", optional = true } ron = { version = "0.12.0", optional = true } toml_edit = { version = "0.23.0", optional = true, features = [ "serde", "parse", "display", ] } toml_writer = { version = "1", optional = true } globset = { version = ">= 0.4.6, < 0.4.17", optional = true } walkdir = { version = "2.3.1", optional = true } similar = { version = "2.1.0", features = ["inline"] } regex = { version = "1.6.0", default-features = false, optional = true, features = [ "std", "unicode", ] } serde = { version = "1.0.117", optional = true } once_cell = "1.20.2" clap = { workspace = true, optional = true } tempfile = "3" [dev-dependencies] rustc_version = "0.4.0" serde = { version = "1.0.117", features = ["derive"] } similar-asserts = "1.4.2" insta-1.46.1/LICENSE000064400000000000000000000251371046102023000120500ustar 00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. insta-1.46.1/README.md000064400000000000000000000060131046102023000123120ustar 00000000000000

insta: a snapshot testing library for Rust

[![Crates.io](https://img.shields.io/crates/d/insta.svg)](https://crates.io/crates/insta) [![License](https://img.shields.io/github/license/mitsuhiko/insta)](https://github.com/mitsuhiko/insta/blob/master/LICENSE) [![Documentation](https://docs.rs/insta/badge.svg)](https://docs.rs/insta) [![VSCode Extension](https://img.shields.io/visual-studio-marketplace/v/mitsuhiko.insta?label=vscode%20extension)](https://marketplace.visualstudio.com/items?itemName=mitsuhiko.insta) ## Introduction Snapshots tests (also sometimes called approval tests) are tests that assert values against a reference value (the snapshot). This is similar to how `assert_eq!` lets you compare a value against a reference value but unlike simple string assertions, snapshot tests let you test against complex values and come with comprehensive tools to review changes. Snapshot tests are particularly useful if your reference values are very large or change often. ## Example ```rust #[test] fn test_hello_world() { insta::assert_debug_snapshot!(vec![1, 2, 3]); } ``` Curious? There is a screencast that shows the entire workflow: [watch the insta introduction screencast](https://www.youtube.com/watch?v=rCHrMqE4JOY&feature=youtu.be). Or if you're not into videos, read the [5 minute introduction](https://insta.rs/docs/quickstart/). Insta also supports inline snapshots which are stored right in your source file instead of separate files. This is accomplished by the companion [cargo-insta](https://github.com/mitsuhiko/insta/tree/master/cargo-insta) tool. ## Editor Support For looking at `.snap` files there is a [vscode extension](https://github.com/mitsuhiko/insta/tree/master/vscode-insta) which can syntax highlight snapshot files, review snapshots and more. It can be installed from the marketplace: [view on marketplace](https://marketplace.visualstudio.com/items?itemName=mitsuhiko.insta). ![jump to definition](https://raw.githubusercontent.com/mitsuhiko/insta/master/vscode-insta/images/jump-to-definition.gif) ## Diffing Insta uses [`similar`](https://github.com/mitsuhiko/similar) for all its diffing operations. You can use it independently of insta. You can use the [`similar-asserts`](https://github.com/mitsuhiko/similar-asserts) crate to get inline diffs for the standard `assert_eq!` macro to achieve insta like diffs for regular comparisons: ```rust use similar_asserts::assert_eq; fn main() { let reference = vec![1, 2, 3, 4]; assert_eq!(reference, (0..4).collect::>()); } ``` ## Sponsor If you like the project and find it useful you can [become a sponsor](https://github.com/sponsors/mitsuhiko). ## License and Links - [Project Website](https://insta.rs/) - [Documentation](https://docs.rs/insta/) - [Issue Tracker](https://github.com/mitsuhiko/insta/issues) - License: [Apache-2.0](https://github.com/mitsuhiko/insta/blob/master/LICENSE) insta-1.46.1/src/content/json.rs000064400000000000000000000413111046102023000146130ustar 00000000000000use std::fmt::{Display, Write}; use crate::content::Content; /// The maximum number of characters to print in a single line /// when [`to_string_pretty`] is used. const COMPACT_MAX_CHARS: usize = 120; #[derive(PartialEq, Eq, Copy, Clone, Debug)] pub enum Format { Condensed, SingleLine, Pretty, } /// Serializes a serializable to JSON. pub struct Serializer { out: String, format: Format, indentation: usize, } impl Serializer { /// Creates a new [`Serializer`] that writes into the given writer. pub fn new() -> Serializer { Serializer { out: String::new(), format: Format::Condensed, indentation: 0, } } pub fn into_result(self) -> String { self.out } fn write_indentation(&mut self) { if self.format == Format::Pretty { write!(self.out, "{: ^1$}", "", self.indentation * 2).unwrap(); } } fn start_container(&mut self, c: char) { self.write_char(c); self.indentation += 1; } fn end_container(&mut self, c: char, empty: bool) { self.indentation -= 1; if self.format == Format::Pretty && !empty { self.write_char('\n'); self.write_indentation(); } self.write_char(c); } fn write_comma(&mut self, first: bool) { match self.format { Format::Pretty => { if first { self.write_char('\n'); } else { self.write_str(",\n"); } self.write_indentation(); } Format::Condensed => { if !first { self.write_char(','); } } Format::SingleLine => { if !first { self.write_str(", "); } } } } fn write_colon(&mut self) { match self.format { Format::Pretty | Format::SingleLine => self.write_str(": "), Format::Condensed => self.write_char(':'), } } fn serialize_array(&mut self, items: &[Content]) { self.start_container('['); for (idx, item) in items.iter().enumerate() { self.write_comma(idx == 0); self.serialize(item); } self.end_container(']', items.is_empty()); } fn serialize_object(&mut self, fields: &[(&str, Content)]) { self.start_container('{'); for (idx, (key, value)) in fields.iter().enumerate() { self.write_comma(idx == 0); self.write_escaped_str(key); self.write_colon(); self.serialize(value); } self.end_container('}', fields.is_empty()); } pub fn serialize(&mut self, value: &Content) { match value { Content::Bool(true) => self.write_str("true"), Content::Bool(false) => self.write_str("false"), Content::U8(n) => write!(self.out, "{n}").unwrap(), Content::U16(n) => write!(self.out, "{n}").unwrap(), Content::U32(n) => write!(self.out, "{n}").unwrap(), Content::U64(n) => write!(self.out, "{n}").unwrap(), Content::U128(n) => write!(self.out, "{n}").unwrap(), Content::I8(n) => write!(self.out, "{n}").unwrap(), Content::I16(n) => write!(self.out, "{n}").unwrap(), Content::I32(n) => write!(self.out, "{n}").unwrap(), Content::I64(n) => write!(self.out, "{n}").unwrap(), Content::I128(n) => write!(self.out, "{n}").unwrap(), Content::F32(f) => self.write_float(f, f.is_finite()), Content::F64(f) => self.write_float(f, f.is_finite()), Content::Char(c) => self.write_escaped_str(&(*c).to_string()), Content::String(s) => self.write_escaped_str(s), Content::Bytes(bytes) => { self.start_container('['); for (idx, byte) in bytes.iter().enumerate() { self.write_comma(idx == 0); self.write_str(&byte.to_string()); } self.end_container(']', bytes.is_empty()); } Content::None | Content::Unit | Content::UnitStruct(_) => self.write_str("null"), Content::Some(content) => self.serialize(content), Content::UnitVariant(_, _, variant) => self.write_escaped_str(variant), Content::NewtypeStruct(_, content) => self.serialize(content), Content::NewtypeVariant(_, _, variant, content) => { self.start_container('{'); self.write_comma(true); self.write_escaped_str(variant); self.write_colon(); self.serialize(content); self.end_container('}', false); } Content::Seq(seq) | Content::Tuple(seq) | Content::TupleStruct(_, seq) => { self.serialize_array(seq); } Content::TupleVariant(_, _, variant, seq) => { self.start_container('{'); self.write_comma(true); self.write_escaped_str(variant); self.write_colon(); self.serialize_array(seq); self.end_container('}', false); } Content::Map(map) => { self.start_container('{'); for (idx, (key, value)) in map.iter().enumerate() { self.write_comma(idx == 0); let real_key = key.resolve_inner(); if let Content::String(ref s) = real_key { self.write_escaped_str(s); } else if let Some(num) = real_key.as_i64() { self.write_escaped_str(&num.to_string()); } else if let Some(num) = real_key.as_i128() { self.write_escaped_str(&num.to_string()); } else { panic!("cannot serialize maps without string keys to JSON"); } self.write_colon(); self.serialize(value); } self.end_container('}', map.is_empty()); } Content::Struct(_, fields) => { self.serialize_object(fields); } Content::StructVariant(_, _, variant, fields) => { self.start_container('{'); self.write_comma(true); self.write_escaped_str(variant); self.write_colon(); self.serialize_object(fields); self.end_container('}', false); } } } fn write_float(&mut self, n: impl Display, is_finite: bool) { if is_finite { let start = self.out.len(); write!(self.out, "{n}").unwrap(); // ensure the result has .0 for whole numbers to be round-trip safe if !self.out[start..].contains('.') { self.out.push_str(".0"); } } else { self.write_str("null"); } } fn write_str(&mut self, s: &str) { self.out.push_str(s); } fn write_char(&mut self, c: char) { self.out.push(c); } fn write_escaped_str(&mut self, value: &str) { self.write_char('"'); let bytes = value.as_bytes(); let mut start = 0; for (i, &byte) in bytes.iter().enumerate() { let escape = ESCAPE[byte as usize]; if escape == 0 { continue; } if start < i { self.write_str(&value[start..i]); } match escape { self::BB => self.write_str("\\b"), self::TT => self.write_str("\\t"), self::NN => self.write_str("\\n"), self::FF => self.write_str("\\f"), self::RR => self.write_str("\\r"), self::QU => self.write_str("\\\""), self::BS => self.write_str("\\\\"), self::U => { static HEX_DIGITS: [u8; 16] = *b"0123456789abcdef"; self.write_str("\\u00"); self.write_char(HEX_DIGITS[(byte >> 4) as usize] as char); self.write_char(HEX_DIGITS[(byte & 0xF) as usize] as char); } _ => unreachable!(), } start = i + 1; } if start != bytes.len() { self.write_str(&value[start..]); } self.write_char('"'); } } const BB: u8 = b'b'; // \x08 const TT: u8 = b't'; // \x09 const NN: u8 = b'n'; // \x0A const FF: u8 = b'f'; // \x0C const RR: u8 = b'r'; // \x0D const QU: u8 = b'"'; // \x22 const BS: u8 = b'\\'; // \x5C const U: u8 = b'u'; // \x00...\x1F except the ones above // Lookup table of escape sequences. A value of b'x' at index i means that byte // i is escaped as "\x" in JSON. A value of 0 means that byte i is not escaped. #[rustfmt::skip] static ESCAPE: [u8; 256] = [ // 1 2 3 4 5 6 7 8 9 A B C D E F U, U, U, U, U, U, U, U, BB, TT, NN, U, FF, RR, U, U, // 0 U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, U, // 1 0, 0, QU, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 2 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 3 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 4 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, BS, 0, 0, 0, // 5 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 6 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 7 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 8 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 9 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // A 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // B 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // C 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // D 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // E 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // F ]; /// Serializes a value to JSON. pub fn to_string(value: &Content) -> String { let mut ser = Serializer::new(); ser.serialize(value); ser.into_result() } /// Serializes a value to JSON in single-line format. #[allow(unused)] pub fn to_string_compact(value: &Content) -> String { let mut ser = Serializer::new(); ser.format = Format::SingleLine; ser.serialize(value); let rv = ser.into_result(); // this is pretty wasteful as we just format twice // but it's acceptable for the way this is used in // insta. if rv.chars().count() > COMPACT_MAX_CHARS { to_string_pretty(value) } else { rv } } /// Serializes a value to JSON pretty #[allow(unused)] pub fn to_string_pretty(value: &Content) -> String { let mut ser = Serializer::new(); ser.format = Format::Pretty; ser.serialize(value); ser.into_result() } #[test] fn test_to_string() { let json = to_string(&Content::Map(vec![ ( Content::from("environments"), Content::Seq(vec![ Content::from("development"), Content::from("production"), ]), ), (Content::from("cmdline"), Content::Seq(vec![])), (Content::from("extra"), Content::Map(vec![])), ])); crate::assert_snapshot!(&json, @r#"{"environments":["development","production"],"cmdline":[],"extra":{}}"#); } #[test] fn test_to_string_pretty() { let json = to_string_pretty(&Content::Map(vec![ ( Content::from("environments"), Content::Seq(vec![ Content::from("development"), Content::from("production"), ]), ), (Content::from("cmdline"), Content::Seq(vec![])), (Content::from("extra"), Content::Map(vec![])), ])); crate::assert_snapshot!(&json, @r#" { "environments": [ "development", "production" ], "cmdline": [], "extra": {} } "#); } #[test] fn test_to_string_num_keys() { let content = Content::Map(vec![ (Content::from(42u32), Content::from(true)), (Content::from(-23i32), Content::from(false)), ]); let json = to_string_pretty(&content); crate::assert_snapshot!(&json, @r#" { "42": true, "-23": false } "#); } #[test] fn test_to_string_pretty_complex() { let content = Content::Map(vec![ ( Content::from("is_alive"), Content::NewtypeStruct("Some", Content::from(true).into()), ), ( Content::from("newtype_variant"), Content::NewtypeVariant( "Foo", 0, "variant_a", Box::new(Content::Struct( "VariantA", vec![ ("field_a", Content::String("value_a".into())), ("field_b", 42u32.into()), ], )), ), ), ( Content::from("struct_variant"), Content::StructVariant( "Foo", 0, "variant_b", vec![ ("field_a", Content::String("value_a".into())), ("field_b", 42u32.into()), ], ), ), ( Content::from("tuple_variant"), Content::TupleVariant( "Foo", 0, "variant_c", vec![(Content::String("value_a".into())), (42u32.into())], ), ), (Content::from("empty_array"), Content::Seq(vec![])), (Content::from("empty_object"), Content::Map(vec![])), (Content::from("array"), Content::Seq(vec![true.into()])), ( Content::from("object"), Content::Map(vec![("foo".into(), true.into())]), ), ( Content::from("array_of_objects"), Content::Seq(vec![Content::Struct( "MyType", vec![ ("foo", Content::from("bar".to_string())), ("bar", Content::from("xxx".to_string())), ], )]), ), ( Content::from("unit_variant"), Content::UnitVariant("Stuff", 0, "value"), ), (Content::from("u8"), Content::U8(8)), (Content::from("u16"), Content::U16(16)), (Content::from("u32"), Content::U32(32)), (Content::from("u64"), Content::U64(64)), (Content::from("u128"), Content::U128(128)), (Content::from("i8"), Content::I8(8)), (Content::from("i16"), Content::I16(16)), (Content::from("i32"), Content::I32(32)), (Content::from("i64"), Content::I64(64)), (Content::from("i128"), Content::I128(128)), (Content::from("f32"), Content::F32(32.0)), (Content::from("f64"), Content::F64(64.0)), (Content::from("char"), Content::Char('A')), (Content::from("bytes"), Content::Bytes(b"hehe".to_vec())), (Content::from("null"), Content::None), (Content::from("unit"), Content::Unit), ( Content::from("crazy_string"), Content::String((0u8..=126).map(|x| x as char).collect()), ), ]); let json = to_string_pretty(&content); crate::assert_snapshot!(&json, @r##" { "is_alive": true, "newtype_variant": { "variant_a": { "field_a": "value_a", "field_b": 42 } }, "struct_variant": { "variant_b": { "field_a": "value_a", "field_b": 42 } }, "tuple_variant": { "variant_c": [ "value_a", 42 ] }, "empty_array": [], "empty_object": {}, "array": [ true ], "object": { "foo": true }, "array_of_objects": [ { "foo": "bar", "bar": "xxx" } ], "unit_variant": "value", "u8": 8, "u16": 16, "u32": 32, "u64": 64, "u128": 128, "i8": 8, "i16": 16, "i32": 32, "i64": 64, "i128": 128, "f32": 32.0, "f64": 64.0, "char": "A", "bytes": [ 104, 101, 104, 101 ], "null": null, "unit": null, "crazy_string": "\u0000\u0001\u0002\u0003\u0004\u0005\u0006\u0007\b\t\n\u000b\f\r\u000e\u000f\u0010\u0011\u0012\u0013\u0014\u0015\u0016\u0017\u0018\u0019\u001a\u001b\u001c\u001d\u001e\u001f !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~" } "##); } insta-1.46.1/src/content/mod.rs000064400000000000000000000273751046102023000144370ustar 00000000000000//! This module implements a generic `Content` type that can hold //! runtime typed data. //! //! It's modelled after serde's data format but it's in fact possible to use //! this independently of serde. The `yaml` and `json` support implemented //! here works without serde. Only `yaml` has an implemented parser but since //! YAML is a superset of JSON insta instead currently parses JSON via the //! YAML implementation. pub mod json; #[cfg(feature = "serde")] mod serialization; pub mod yaml; #[cfg(feature = "serde")] pub use serialization::*; use std::fmt; /// An internal error type for content related errors. #[derive(Debug)] pub enum Error { FailedParsingYaml(std::path::PathBuf), UnexpectedDataType, MissingField, FileIo(std::io::Error, std::path::PathBuf), } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Error::FailedParsingYaml(p) => { f.write_str(format!("Failed parsing the YAML from {:?}", p.display()).as_str()) } Error::UnexpectedDataType => { f.write_str("The present data type wasn't what was expected") } Error::MissingField => f.write_str("A required field was missing"), Error::FileIo(e, p) => { f.write_str(format!("File error for {:?}: {}", p.display(), e).as_str()) } } } } impl std::error::Error for Error {} /// Represents variable typed content. /// /// This is used for the serialization system to represent values /// before the actual snapshots are written and is also exposed to /// dynamic redaction functions. /// /// Some enum variants are intentionally not exposed to user code. /// It's generally recommended to construct content objects by /// using the [`From`] trait and by using the /// accessor methods to assert on it. /// /// While matching on the content is possible in theory it is /// recommended against. The reason for this is that the content /// enum holds variants that can "wrap" values where it's not /// expected. For instance if a field holds an `Option` /// you cannot use pattern matching to extract the string as it /// will be contained in an internal [`Some`] variant that is not /// exposed. On the other hand the [`Content::as_str`] method will /// automatically resolve such internal wrappers. /// /// If you do need to pattern match you should use the /// [`Content::resolve_inner`] method to resolve such internal wrappers. #[derive(Debug, Clone, PartialEq, PartialOrd)] pub enum Content { Bool(bool), U8(u8), U16(u16), U32(u32), U64(u64), U128(u128), I8(i8), I16(i16), I32(i32), I64(i64), I128(i128), F32(f32), F64(f64), Char(char), String(String), Bytes(Vec), #[doc(hidden)] None, #[doc(hidden)] Some(Box), #[doc(hidden)] Unit, #[doc(hidden)] UnitStruct(&'static str), #[doc(hidden)] UnitVariant(&'static str, u32, &'static str), #[doc(hidden)] NewtypeStruct(&'static str, Box), #[doc(hidden)] NewtypeVariant(&'static str, u32, &'static str, Box), Seq(Vec), #[doc(hidden)] Tuple(Vec), #[doc(hidden)] TupleStruct(&'static str, Vec), #[doc(hidden)] TupleVariant(&'static str, u32, &'static str, Vec), Map(Vec<(Content, Content)>), #[doc(hidden)] Struct(&'static str, Vec<(&'static str, Content)>), #[doc(hidden)] StructVariant( &'static str, u32, &'static str, Vec<(&'static str, Content)>, ), } macro_rules! impl_from { ($ty:ty, $newty:ident) => { impl From<$ty> for Content { fn from(value: $ty) -> Content { Content::$newty(value) } } }; } impl_from!(bool, Bool); impl_from!(u8, U8); impl_from!(u16, U16); impl_from!(u32, U32); impl_from!(u64, U64); impl_from!(u128, U128); impl_from!(i8, I8); impl_from!(i16, I16); impl_from!(i32, I32); impl_from!(i64, I64); impl_from!(i128, I128); impl_from!(f32, F32); impl_from!(f64, F64); impl_from!(char, Char); impl_from!(String, String); impl_from!(Vec, Bytes); impl From<()> for Content { fn from(_value: ()) -> Content { Content::Unit } } impl<'a> From<&'a str> for Content { fn from(value: &'a str) -> Content { Content::String(value.to_string()) } } impl<'a> From<&'a [u8]> for Content { fn from(value: &'a [u8]) -> Content { Content::Bytes(value.to_vec()) } } impl Content { /// This resolves the innermost content in a chain of /// wrapped content. /// /// For instance if you encounter an `Option>` /// field the content will be wrapped twice in an internal /// option wrapper. If you need to pattern match you will /// need in some situations to first resolve the inner value /// before such matching can take place as there is no exposed /// way to match on these wrappers. /// /// This method does not need to be called for the `as_` /// methods which resolve automatically. pub fn resolve_inner(&self) -> &Content { match *self { Content::Some(ref v) | Content::NewtypeStruct(_, ref v) | Content::NewtypeVariant(_, _, _, ref v) => v.resolve_inner(), ref other => other, } } /// Mutable version of [`Self::resolve_inner`]. pub fn resolve_inner_mut(&mut self) -> &mut Content { match *self { Content::Some(ref mut v) | Content::NewtypeStruct(_, ref mut v) | Content::NewtypeVariant(_, _, _, ref mut v) => v.resolve_inner_mut(), ref mut other => other, } } /// Returns the value as string pub fn as_str(&self) -> Option<&str> { match self.resolve_inner() { Content::String(ref s) => Some(s.as_str()), _ => None, } } /// Returns the value as bytes pub fn as_bytes(&self) -> Option<&[u8]> { match self.resolve_inner() { Content::Bytes(ref b) => Some(b), _ => None, } } /// Returns the value as slice of content values. pub fn as_slice(&self) -> Option<&[Content]> { match self.resolve_inner() { Content::Seq(ref v) | Content::Tuple(ref v) | Content::TupleVariant(_, _, _, ref v) => { Some(&v[..]) } _ => None, } } /// Returns true if the value is nil. pub fn is_nil(&self) -> bool { matches!(self.resolve_inner(), Content::None | Content::Unit) } /// Returns the value as bool pub fn as_bool(&self) -> Option { match *self.resolve_inner() { Content::Bool(val) => Some(val), _ => None, } } /// Returns the value as u64 pub fn as_u64(&self) -> Option { match *self.resolve_inner() { Content::U8(v) => Some(u64::from(v)), Content::U16(v) => Some(u64::from(v)), Content::U32(v) => Some(u64::from(v)), Content::U64(v) => Some(v), Content::U128(v) => { let rv = v as u64; if rv as u128 == v { Some(rv) } else { None } } Content::I8(v) if v >= 0 => Some(v as u64), Content::I16(v) if v >= 0 => Some(v as u64), Content::I32(v) if v >= 0 => Some(v as u64), Content::I64(v) if v >= 0 => Some(v as u64), Content::I128(v) => { let rv = v as u64; if rv as i128 == v { Some(rv) } else { None } } _ => None, } } /// Returns the value as u128 pub fn as_u128(&self) -> Option { match *self.resolve_inner() { Content::U128(v) => Some(v), Content::I128(v) if v >= 0 => Some(v as u128), _ => self.as_u64().map(u128::from), } } /// Returns the value as i64 pub fn as_i64(&self) -> Option { match *self.resolve_inner() { Content::U8(v) => Some(i64::from(v)), Content::U16(v) => Some(i64::from(v)), Content::U32(v) => Some(i64::from(v)), Content::U64(v) => { let rv = v as i64; if rv as u64 == v { Some(rv) } else { None } } Content::U128(v) => { let rv = v as i64; if rv as u128 == v { Some(rv) } else { None } } Content::I8(v) => Some(i64::from(v)), Content::I16(v) => Some(i64::from(v)), Content::I32(v) => Some(i64::from(v)), Content::I64(v) => Some(v), Content::I128(v) => { let rv = v as i64; if rv as i128 == v { Some(rv) } else { None } } _ => None, } } /// Returns the value as i128 pub fn as_i128(&self) -> Option { match *self.resolve_inner() { Content::U128(v) => { let rv = v as i128; if rv as u128 == v { Some(rv) } else { None } } Content::I128(v) => Some(v), _ => self.as_i64().map(i128::from), } } /// Returns the value as f64 pub fn as_f64(&self) -> Option { match *self.resolve_inner() { Content::F32(v) => Some(f64::from(v)), Content::F64(v) => Some(v), _ => None, } } /// Recursively walks the content structure mutably. /// /// The callback is invoked for every content in the tree. pub fn walk bool>(&mut self, visit: &mut F) { if !visit(self) { return; } match *self { Content::Some(ref mut inner) => { Self::walk(&mut *inner, visit); } Content::NewtypeStruct(_, ref mut inner) => { Self::walk(&mut *inner, visit); } Content::NewtypeVariant(_, _, _, ref mut inner) => { Self::walk(&mut *inner, visit); } Content::Seq(ref mut vec) => { for inner in vec.iter_mut() { Self::walk(inner, visit); } } Content::Map(ref mut vec) => { for inner in vec.iter_mut() { Self::walk(&mut inner.0, visit); Self::walk(&mut inner.1, visit); } } Content::Struct(_, ref mut vec) => { for inner in vec.iter_mut() { Self::walk(&mut inner.1, visit); } } Content::StructVariant(_, _, _, ref mut vec) => { for inner in vec.iter_mut() { Self::walk(&mut inner.1, visit); } } Content::Tuple(ref mut vec) => { for inner in vec.iter_mut() { Self::walk(inner, visit); } } Content::TupleStruct(_, ref mut vec) => { for inner in vec.iter_mut() { Self::walk(inner, visit); } } Content::TupleVariant(_, _, _, ref mut vec) => { for inner in vec.iter_mut() { Self::walk(inner, visit); } } _ => {} } } } insta-1.46.1/src/content/serialization.rs000064400000000000000000000434361046102023000165310ustar 00000000000000use std::cmp::Ordering; use std::marker::PhantomData; use crate::content::Content; use serde::{ser, Serialize, Serializer}; #[derive(PartialEq, Debug)] pub enum Key<'a> { Bool(bool), U64(u64), I64(i64), F64(f64), U128(u128), I128(i128), Str(&'a str), Bytes(&'a [u8]), Other, } impl Key<'_> { /// Needed because [`std::mem::discriminant`] is not [`Ord`] fn discriminant(&self) -> usize { match self { Key::Bool(_) => 1, Key::U64(_) => 2, Key::I64(_) => 3, Key::F64(_) => 4, Key::U128(_) => 5, Key::I128(_) => 6, Key::Str(_) => 7, Key::Bytes(_) => 8, Key::Other => 9, } } } impl Eq for Key<'_> {} impl Ord for Key<'_> { fn cmp(&self, other: &Self) -> Ordering { let self_discriminant = self.discriminant(); let other_discriminant = other.discriminant(); match Ord::cmp(&self_discriminant, &other_discriminant) { Ordering::Equal => match (self, other) { (Key::Bool(a), Key::Bool(b)) => Ord::cmp(a, b), (Key::U64(a), Key::U64(b)) => Ord::cmp(a, b), (Key::I64(a), Key::I64(b)) => Ord::cmp(a, b), (Key::F64(a), Key::F64(b)) => f64_total_cmp(*a, *b), (Key::U128(a), Key::U128(b)) => Ord::cmp(a, b), (Key::I128(a), Key::I128(b)) => Ord::cmp(a, b), (Key::Str(a), Key::Str(b)) => Ord::cmp(a, b), (Key::Bytes(a), Key::Bytes(b)) => Ord::cmp(a, b), _ => Ordering::Equal, }, cmp => cmp, } } } impl PartialOrd for Key<'_> { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } fn f64_total_cmp(left: f64, right: f64) -> Ordering { // this is taken from f64::total_cmp on newer rust versions let mut left = left.to_bits() as i64; let mut right = right.to_bits() as i64; left ^= (((left >> 63) as u64) >> 1) as i64; right ^= (((right >> 63) as u64) >> 1) as i64; left.cmp(&right) } impl Content { pub(crate) fn as_key(&self) -> Key<'_> { match *self.resolve_inner() { Content::Bool(val) => Key::Bool(val), Content::Char(val) => Key::U64(val as u64), Content::U16(val) => Key::U64(val.into()), Content::U32(val) => Key::U64(val.into()), Content::U64(val) => Key::U64(val), Content::U128(val) => Key::U128(val), Content::I16(val) => Key::I64(val.into()), Content::I32(val) => Key::I64(val.into()), Content::I64(val) => Key::I64(val), Content::I128(val) => Key::I128(val), Content::F32(val) => Key::F64(val.into()), Content::F64(val) => Key::F64(val), Content::String(ref val) => Key::Str(val.as_str()), Content::Bytes(ref val) => Key::Bytes(&val[..]), _ => Key::Other, } } pub(crate) fn sort_maps(&mut self) { self.walk(&mut |content| { if let Content::Map(ref mut items) = content { // try to compare by key first, if that fails compare by the // object value. That way some values normalize, and if we // can't normalize we still have a stable order. items.sort_by(|a, b| match (a.0.as_key(), b.0.as_key()) { (Key::Other, _) | (_, Key::Other) => { a.0.partial_cmp(&b.0).unwrap_or(Ordering::Equal) } (ref a, ref b) => a.cmp(b), }) } true }) } } #[cfg_attr(docsrs, doc(cfg(feature = "serde")))] impl Serialize for Content { fn serialize(&self, serializer: S) -> Result where S: Serializer, { match *self { Content::Bool(b) => serializer.serialize_bool(b), Content::U8(u) => serializer.serialize_u8(u), Content::U16(u) => serializer.serialize_u16(u), Content::U32(u) => serializer.serialize_u32(u), Content::U64(u) => serializer.serialize_u64(u), Content::U128(u) => serializer.serialize_u128(u), Content::I8(i) => serializer.serialize_i8(i), Content::I16(i) => serializer.serialize_i16(i), Content::I32(i) => serializer.serialize_i32(i), Content::I64(i) => serializer.serialize_i64(i), Content::I128(i) => serializer.serialize_i128(i), Content::F32(f) => serializer.serialize_f32(f), Content::F64(f) => serializer.serialize_f64(f), Content::Char(c) => serializer.serialize_char(c), Content::String(ref s) => serializer.serialize_str(s), Content::Bytes(ref b) => serializer.serialize_bytes(b), Content::None => serializer.serialize_none(), Content::Some(ref c) => serializer.serialize_some(&**c), Content::Unit => serializer.serialize_unit(), Content::UnitStruct(n) => serializer.serialize_unit_struct(n), Content::UnitVariant(n, i, v) => serializer.serialize_unit_variant(n, i, v), Content::NewtypeStruct(n, ref c) => serializer.serialize_newtype_struct(n, &**c), Content::NewtypeVariant(n, i, v, ref c) => { serializer.serialize_newtype_variant(n, i, v, &**c) } Content::Seq(ref elements) => elements.serialize(serializer), Content::Tuple(ref elements) => { use serde::ser::SerializeTuple; let mut tuple = serializer.serialize_tuple(elements.len())?; for e in elements { tuple.serialize_element(e)?; } tuple.end() } Content::TupleStruct(n, ref fields) => { use serde::ser::SerializeTupleStruct; let mut ts = serializer.serialize_tuple_struct(n, fields.len())?; for f in fields { ts.serialize_field(f)?; } ts.end() } Content::TupleVariant(n, i, v, ref fields) => { use serde::ser::SerializeTupleVariant; let mut tv = serializer.serialize_tuple_variant(n, i, v, fields.len())?; for f in fields { tv.serialize_field(f)?; } tv.end() } Content::Map(ref entries) => { use serde::ser::SerializeMap; let mut map = serializer.serialize_map(Some(entries.len()))?; for (k, v) in entries { map.serialize_entry(k, v)?; } map.end() } Content::Struct(n, ref fields) => { use serde::ser::SerializeStruct; let mut s = serializer.serialize_struct(n, fields.len())?; for &(k, ref v) in fields { s.serialize_field(k, v)?; } s.end() } Content::StructVariant(n, i, v, ref fields) => { use serde::ser::SerializeStructVariant; let mut sv = serializer.serialize_struct_variant(n, i, v, fields.len())?; for &(k, ref v) in fields { sv.serialize_field(k, v)?; } sv.end() } } } } pub struct ContentSerializer { error: PhantomData, } impl ContentSerializer { pub fn new() -> Self { ContentSerializer { error: PhantomData } } } impl Serializer for ContentSerializer where E: ser::Error, { type Ok = Content; type Error = E; type SerializeSeq = SerializeSeq; type SerializeTuple = SerializeTuple; type SerializeTupleStruct = SerializeTupleStruct; type SerializeTupleVariant = SerializeTupleVariant; type SerializeMap = SerializeMap; type SerializeStruct = SerializeStruct; type SerializeStructVariant = SerializeStructVariant; fn serialize_bool(self, v: bool) -> Result { Ok(Content::Bool(v)) } fn serialize_i8(self, v: i8) -> Result { Ok(Content::I8(v)) } fn serialize_i16(self, v: i16) -> Result { Ok(Content::I16(v)) } fn serialize_i32(self, v: i32) -> Result { Ok(Content::I32(v)) } fn serialize_i64(self, v: i64) -> Result { Ok(Content::I64(v)) } fn serialize_i128(self, v: i128) -> Result { Ok(Content::I128(v)) } fn serialize_u8(self, v: u8) -> Result { Ok(Content::U8(v)) } fn serialize_u16(self, v: u16) -> Result { Ok(Content::U16(v)) } fn serialize_u32(self, v: u32) -> Result { Ok(Content::U32(v)) } fn serialize_u64(self, v: u64) -> Result { Ok(Content::U64(v)) } fn serialize_u128(self, v: u128) -> Result { Ok(Content::U128(v)) } fn serialize_f32(self, v: f32) -> Result { Ok(Content::F32(v)) } fn serialize_f64(self, v: f64) -> Result { Ok(Content::F64(v)) } fn serialize_char(self, v: char) -> Result { Ok(Content::Char(v)) } fn serialize_str(self, value: &str) -> Result { Ok(Content::String(value.to_owned())) } fn serialize_bytes(self, value: &[u8]) -> Result { Ok(Content::Bytes(value.to_owned())) } fn serialize_none(self) -> Result { Ok(Content::None) } fn serialize_some(self, value: &T) -> Result where T: Serialize + ?Sized, { Ok(Content::Some(Box::new(value.serialize(self)?))) } fn serialize_unit(self) -> Result { Ok(Content::Unit) } fn serialize_unit_struct(self, name: &'static str) -> Result { Ok(Content::UnitStruct(name)) } fn serialize_unit_variant( self, name: &'static str, variant_index: u32, variant: &'static str, ) -> Result { Ok(Content::UnitVariant(name, variant_index, variant)) } fn serialize_newtype_struct(self, name: &'static str, value: &T) -> Result where T: Serialize + ?Sized, { Ok(Content::NewtypeStruct( name, Box::new(value.serialize(self)?), )) } fn serialize_newtype_variant( self, name: &'static str, variant_index: u32, variant: &'static str, value: &T, ) -> Result where T: Serialize + ?Sized, { Ok(Content::NewtypeVariant( name, variant_index, variant, Box::new(value.serialize(self)?), )) } fn serialize_seq(self, len: Option) -> Result { Ok(SerializeSeq { elements: Vec::with_capacity(len.unwrap_or(0)), error: PhantomData, }) } fn serialize_tuple(self, len: usize) -> Result { Ok(SerializeTuple { elements: Vec::with_capacity(len), error: PhantomData, }) } fn serialize_tuple_struct( self, name: &'static str, len: usize, ) -> Result { Ok(SerializeTupleStruct { name, fields: Vec::with_capacity(len), error: PhantomData, }) } fn serialize_tuple_variant( self, name: &'static str, variant_index: u32, variant: &'static str, len: usize, ) -> Result { Ok(SerializeTupleVariant { name, variant_index, variant, fields: Vec::with_capacity(len), error: PhantomData, }) } fn serialize_map(self, len: Option) -> Result { Ok(SerializeMap { entries: Vec::with_capacity(len.unwrap_or(0)), key: None, error: PhantomData, }) } fn serialize_struct(self, name: &'static str, len: usize) -> Result { Ok(SerializeStruct { name, fields: Vec::with_capacity(len), error: PhantomData, }) } fn serialize_struct_variant( self, name: &'static str, variant_index: u32, variant: &'static str, len: usize, ) -> Result { Ok(SerializeStructVariant { name, variant_index, variant, fields: Vec::with_capacity(len), error: PhantomData, }) } } pub struct SerializeSeq { elements: Vec, error: PhantomData, } impl ser::SerializeSeq for SerializeSeq where E: ser::Error, { type Ok = Content; type Error = E; fn serialize_element(&mut self, value: &T) -> Result<(), E> where T: Serialize + ?Sized, { let value = value.serialize(ContentSerializer::::new())?; self.elements.push(value); Ok(()) } fn end(self) -> Result { Ok(Content::Seq(self.elements)) } } pub struct SerializeTuple { elements: Vec, error: PhantomData, } impl ser::SerializeTuple for SerializeTuple where E: ser::Error, { type Ok = Content; type Error = E; fn serialize_element(&mut self, value: &T) -> Result<(), E> where T: Serialize + ?Sized, { let value = value.serialize(ContentSerializer::::new())?; self.elements.push(value); Ok(()) } fn end(self) -> Result { Ok(Content::Tuple(self.elements)) } } pub struct SerializeTupleStruct { name: &'static str, fields: Vec, error: PhantomData, } impl ser::SerializeTupleStruct for SerializeTupleStruct where E: ser::Error, { type Ok = Content; type Error = E; fn serialize_field(&mut self, value: &T) -> Result<(), E> where T: Serialize + ?Sized, { let value = value.serialize(ContentSerializer::::new())?; self.fields.push(value); Ok(()) } fn end(self) -> Result { Ok(Content::TupleStruct(self.name, self.fields)) } } pub struct SerializeTupleVariant { name: &'static str, variant_index: u32, variant: &'static str, fields: Vec, error: PhantomData, } impl ser::SerializeTupleVariant for SerializeTupleVariant where E: ser::Error, { type Ok = Content; type Error = E; fn serialize_field(&mut self, value: &T) -> Result<(), E> where T: Serialize + ?Sized, { let value = value.serialize(ContentSerializer::::new())?; self.fields.push(value); Ok(()) } fn end(self) -> Result { Ok(Content::TupleVariant( self.name, self.variant_index, self.variant, self.fields, )) } } pub struct SerializeMap { entries: Vec<(Content, Content)>, key: Option, error: PhantomData, } impl ser::SerializeMap for SerializeMap where E: ser::Error, { type Ok = Content; type Error = E; fn serialize_key(&mut self, key: &T) -> Result<(), E> where T: Serialize + ?Sized, { let key = key.serialize(ContentSerializer::::new())?; self.key = Some(key); Ok(()) } fn serialize_value(&mut self, value: &T) -> Result<(), E> where T: Serialize + ?Sized, { let key = self .key .take() .expect("serialize_value called before serialize_key"); let value = value.serialize(ContentSerializer::::new())?; self.entries.push((key, value)); Ok(()) } fn end(self) -> Result { Ok(Content::Map(self.entries)) } fn serialize_entry(&mut self, key: &K, value: &V) -> Result<(), E> where K: Serialize + ?Sized, V: Serialize + ?Sized, { let key = key.serialize(ContentSerializer::::new())?; let value = value.serialize(ContentSerializer::::new())?; self.entries.push((key, value)); Ok(()) } } pub struct SerializeStruct { name: &'static str, fields: Vec<(&'static str, Content)>, error: PhantomData, } impl ser::SerializeStruct for SerializeStruct where E: ser::Error, { type Ok = Content; type Error = E; fn serialize_field(&mut self, key: &'static str, value: &T) -> Result<(), E> where T: Serialize + ?Sized, { let value = value.serialize(ContentSerializer::::new())?; self.fields.push((key, value)); Ok(()) } fn end(self) -> Result { Ok(Content::Struct(self.name, self.fields)) } } pub struct SerializeStructVariant { name: &'static str, variant_index: u32, variant: &'static str, fields: Vec<(&'static str, Content)>, error: PhantomData, } impl ser::SerializeStructVariant for SerializeStructVariant where E: ser::Error, { type Ok = Content; type Error = E; fn serialize_field(&mut self, key: &'static str, value: &T) -> Result<(), E> where T: Serialize + ?Sized, { let value = value.serialize(ContentSerializer::::new())?; self.fields.push((key, value)); Ok(()) } fn end(self) -> Result { Ok(Content::StructVariant( self.name, self.variant_index, self.variant, self.fields, )) } } insta-1.46.1/src/content/yaml/mod.rs000064400000000000000000000113311046102023000153620ustar 00000000000000pub mod vendored; use std::path::Path; use crate::content::{Content, Error}; use crate::content::yaml::vendored::Yaml as YamlValue; pub fn parse_str(s: &str, filename: &Path) -> Result { let mut blobs = crate::content::yaml::vendored::yaml::YamlLoader::load_from_str(s) .map_err(|_| Error::FailedParsingYaml(filename.to_path_buf()))?; match (blobs.pop(), blobs.pop()) { (Some(blob), None) => from_yaml_blob(blob, filename), _ => Err(Error::FailedParsingYaml(filename.to_path_buf())), } } fn from_yaml_blob(blob: YamlValue, filename: &Path) -> Result { match blob { YamlValue::Null => Ok(Content::None), YamlValue::Boolean(b) => Ok(Content::from(b)), YamlValue::Integer(num) => Ok(Content::from(num)), YamlValue::Real(real_str) => { let real: f64 = real_str.parse().unwrap(); Ok(Content::from(real)) } YamlValue::String(s) => Ok(Content::from(s)), YamlValue::Array(seq) => { let seq = seq .into_iter() .map(|x| from_yaml_blob(x, filename)) .collect::>()?; Ok(Content::Seq(seq)) } YamlValue::Hash(obj) => { let obj = obj .into_iter() .map(|(k, v)| Ok((from_yaml_blob(k, filename)?, from_yaml_blob(v, filename)?))) .collect::>()?; Ok(Content::Map(obj)) } YamlValue::BadValue => Err(Error::FailedParsingYaml(filename.to_path_buf())), } } pub fn to_string(content: &Content) -> String { let yaml_blob = to_yaml_value(content); let mut buf = String::new(); let mut emitter = crate::content::yaml::vendored::emitter::YamlEmitter::new(&mut buf); emitter.dump(&yaml_blob).unwrap(); if !buf.ends_with('\n') { buf.push('\n'); } buf } fn to_yaml_value(content: &Content) -> YamlValue { fn translate_seq(seq: &[Content]) -> YamlValue { let seq = seq.iter().map(to_yaml_value).collect(); YamlValue::Array(seq) } fn translate_fields(fields: &[(&str, Content)]) -> YamlValue { let fields = fields .iter() .map(|(k, v)| (YamlValue::String(k.to_string()), to_yaml_value(v))) .collect(); YamlValue::Hash(fields) } match content { Content::Bool(b) => YamlValue::Boolean(*b), Content::U8(n) => YamlValue::Integer(i64::from(*n)), Content::U16(n) => YamlValue::Integer(i64::from(*n)), Content::U32(n) => YamlValue::Integer(i64::from(*n)), Content::U64(n) => YamlValue::Real(n.to_string()), Content::U128(n) => YamlValue::Real(n.to_string()), Content::I8(n) => YamlValue::Integer(i64::from(*n)), Content::I16(n) => YamlValue::Integer(i64::from(*n)), Content::I32(n) => YamlValue::Integer(i64::from(*n)), Content::I64(n) => YamlValue::Integer(*n), Content::I128(n) => YamlValue::Real(n.to_string()), Content::F32(f) => YamlValue::Real(f.to_string()), Content::F64(f) => YamlValue::Real(f.to_string()), Content::Char(c) => YamlValue::String(c.to_string()), Content::String(s) => YamlValue::String(s.to_owned()), Content::Bytes(bytes) => { let bytes = bytes .iter() .map(|b| YamlValue::Integer(i64::from(*b))) .collect(); YamlValue::Array(bytes) } Content::None | Content::Unit | Content::UnitStruct(_) => YamlValue::Null, Content::Some(content) => to_yaml_value(content), Content::UnitVariant(_, _, variant) => YamlValue::String(variant.to_string()), Content::NewtypeStruct(_, content) => to_yaml_value(content), Content::NewtypeVariant(_, _, variant, content) => YamlValue::Hash(vec![( YamlValue::String(variant.to_string()), to_yaml_value(content), )]), Content::Seq(seq) => translate_seq(seq), Content::Tuple(seq) => translate_seq(seq), Content::TupleStruct(_, seq) => translate_seq(seq), Content::TupleVariant(_, _, variant, seq) => YamlValue::Hash(vec![( YamlValue::String(variant.to_string()), translate_seq(seq), )]), Content::Map(map) => { let map = map .iter() .map(|(k, v)| (to_yaml_value(k), to_yaml_value(v))) .collect(); YamlValue::Hash(map) } Content::Struct(_name, fields) => translate_fields(fields), Content::StructVariant(_, _, variant, fields) => YamlValue::Hash(vec![( YamlValue::String(variant.to_string()), translate_fields(fields), )]), } } insta-1.46.1/src/content/yaml/vendored/emitter.rs000064400000000000000000000361471046102023000200760ustar 00000000000000use crate::content::yaml::vendored::yaml::{Hash, Yaml}; use std::error::Error; use std::fmt::{self, Display}; #[derive(Copy, Clone, Debug)] pub enum EmitError { FmtError(fmt::Error), } impl Error for EmitError { fn cause(&self) -> Option<&dyn Error> { None } } impl Display for EmitError { fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { match *self { EmitError::FmtError(ref err) => Display::fmt(err, formatter), } } } impl From for EmitError { fn from(f: fmt::Error) -> Self { EmitError::FmtError(f) } } pub struct YamlEmitter<'a> { writer: &'a mut dyn fmt::Write, best_indent: usize, compact: bool, level: isize, } pub type EmitResult = Result<(), EmitError>; /// From [`serialize::json`] fn escape_str(wr: &mut dyn fmt::Write, v: &str) -> Result<(), fmt::Error> { wr.write_str("\"")?; let mut start = 0; for (i, byte) in v.bytes().enumerate() { let escaped = match byte { b'"' => "\\\"", b'\\' => "\\\\", b'\x00' => "\\u0000", b'\x01' => "\\u0001", b'\x02' => "\\u0002", b'\x03' => "\\u0003", b'\x04' => "\\u0004", b'\x05' => "\\u0005", b'\x06' => "\\u0006", b'\x07' => "\\u0007", b'\x08' => "\\b", b'\t' => "\\t", b'\n' => "\\n", b'\x0b' => "\\u000b", b'\x0c' => "\\f", b'\r' => "\\r", b'\x0e' => "\\u000e", b'\x0f' => "\\u000f", b'\x10' => "\\u0010", b'\x11' => "\\u0011", b'\x12' => "\\u0012", b'\x13' => "\\u0013", b'\x14' => "\\u0014", b'\x15' => "\\u0015", b'\x16' => "\\u0016", b'\x17' => "\\u0017", b'\x18' => "\\u0018", b'\x19' => "\\u0019", b'\x1a' => "\\u001a", b'\x1b' => "\\u001b", b'\x1c' => "\\u001c", b'\x1d' => "\\u001d", b'\x1e' => "\\u001e", b'\x1f' => "\\u001f", b'\x7f' => "\\u007f", _ => continue, }; if start < i { wr.write_str(&v[start..i])?; } wr.write_str(escaped)?; start = i + 1; } if start != v.len() { wr.write_str(&v[start..])?; } wr.write_str("\"")?; Ok(()) } impl<'a> YamlEmitter<'a> { pub fn new(writer: &'a mut dyn fmt::Write) -> YamlEmitter<'a> { YamlEmitter { writer, best_indent: 2, compact: true, level: -1, } } pub fn dump(&mut self, doc: &Yaml) -> EmitResult { // write DocumentStart writeln!(self.writer, "---")?; self.level = -1; self.emit_node(doc) } fn write_indent(&mut self) -> EmitResult { if self.level <= 0 { return Ok(()); } for _ in 0..self.level { for _ in 0..self.best_indent { write!(self.writer, " ")?; } } Ok(()) } fn emit_node(&mut self, node: &Yaml) -> EmitResult { match *node { Yaml::Array(ref v) => self.emit_array(v), Yaml::Hash(ref h) => self.emit_hash(h), Yaml::String(ref v) => { if need_quotes(v) { escape_str(self.writer, v)?; } else { write!(self.writer, "{v}")?; } Ok(()) } Yaml::Boolean(v) => { if v { self.writer.write_str("true")?; } else { self.writer.write_str("false")?; } Ok(()) } Yaml::Integer(v) => { write!(self.writer, "{v}")?; Ok(()) } Yaml::Real(ref v) => { write!(self.writer, "{v}")?; Ok(()) } Yaml::Null | Yaml::BadValue => { write!(self.writer, "~")?; Ok(()) } } } fn emit_array(&mut self, v: &[Yaml]) -> EmitResult { if v.is_empty() { write!(self.writer, "[]")?; } else { self.level += 1; for (cnt, x) in v.iter().enumerate() { if cnt > 0 { writeln!(self.writer)?; self.write_indent()?; } write!(self.writer, "-")?; self.emit_val(true, x)?; } self.level -= 1; } Ok(()) } fn emit_hash(&mut self, h: &Hash) -> EmitResult { if h.is_empty() { self.writer.write_str("{}")?; } else { self.level += 1; for (cnt, (k, v)) in h.iter().enumerate() { let complex_key = matches!(*k, Yaml::Hash(_) | Yaml::Array(_)); if cnt > 0 { writeln!(self.writer)?; self.write_indent()?; } if complex_key { write!(self.writer, "?")?; self.emit_val(true, k)?; writeln!(self.writer)?; self.write_indent()?; write!(self.writer, ":")?; self.emit_val(true, v)?; } else { self.emit_node(k)?; write!(self.writer, ":")?; self.emit_val(false, v)?; } } self.level -= 1; } Ok(()) } /// Emit a yaml as a hash or array value: i.e., which should appear /// following a ":" or "-", either after a space, or on a new line. /// If `inline` is true, then the preceding characters are distinct /// and short enough to respect the compact flag. fn emit_val(&mut self, inline: bool, val: &Yaml) -> EmitResult { match *val { Yaml::Array(ref v) => { if (inline && self.compact) || v.is_empty() { write!(self.writer, " ")?; } else { writeln!(self.writer)?; self.level += 1; self.write_indent()?; self.level -= 1; } self.emit_array(v) } Yaml::Hash(ref h) => { if (inline && self.compact) || h.is_empty() { write!(self.writer, " ")?; } else { writeln!(self.writer)?; self.level += 1; self.write_indent()?; self.level -= 1; } self.emit_hash(h) } _ => { write!(self.writer, " ")?; self.emit_node(val) } } } } #[allow(clippy::doc_markdown)] // \` is recognised as unbalanced backticks /// Check if the string requires quoting. /// /// Strings starting with any of the following characters must be quoted. /// `:`, `&`, `*`, `?`, `|`, `-`, `<`, `>`, `=`, `!`, `%`, `@` /// Strings containing any of the following characters must be quoted. /// `{`, `}`, `\[`, `\]`, `,`, `#`, `\`` /// /// If the string contains any of the following control characters, it must be escaped with double quotes: /// `\0`, `\x01`, `\x02`, `\x03`, `\x04`, `\x05`, `\x06`, `\a`, `\b`, `\t`, `\n, `\v, `\f`, `\r`, `\x0e`, `\x0f`, `\x10`, `\x11`, `\x12`, `\x13`, `\x14`, `\x15`, `\x16`, `\x17`, `\x18`, `\x19`, `\x1a`, `\e`, `\x1c`, `\x1d`, `\x1e`, `\x1f`, `\N`, `\_`, `\L`, `\P` /// /// Finally, there are other cases when the strings must be quoted, no matter if you're using single or double quotes: /// * When the string is `true` or `false` (otherwise, it would be treated as a boolean value); /// * When the string is `null` or `~` (otherwise, it would be considered as a null value); /// * When the string looks like a number, such as integers (e.g. `2`, `14`, etc.), floats (e.g. `2.6`, `14.9`) and exponential numbers (e.g. `12e7`, etc.) (otherwise, it would be treated as a numeric value); /// * When the string looks like a date (e.g. `2014-12-31`) (otherwise it would be automatically converted into a Unix timestamp). fn need_quotes(string: &str) -> bool { fn need_quotes_spaces(string: &str) -> bool { string.starts_with(' ') || string.ends_with(' ') } string.is_empty() || need_quotes_spaces(string) || string.starts_with(|character: char| { matches!( character, '&' | '*' | '?' | '|' | '-' | '<' | '>' | '=' | '!' | '%' | '@' ) }) || string.contains(|character: char| { matches!(character, ':' | '{' | '}' | '[' | ']' | ',' | '#' | '`' | '\"' | '\'' | '\\' | '\0'..='\x06' | '\t' | '\n' | '\r' | '\x0e'..='\x1a' | '\x1c'..='\x1f') }) || [ // http://yaml.org/type/bool.html // Note: 'y', 'Y', 'n', 'N', is not quoted deliberately, as in libyaml. PyYAML also parse // them as string, not booleans, although it is violating the YAML 1.1 specification. // See https://github.com/dtolnay/serde-yaml/pull/83#discussion_r152628088. "yes", "Yes", "YES", "no", "No", "NO", "True", "TRUE", "true", "False", "FALSE", "false", "on", "On", "ON", "off", "Off", "OFF", // http://yaml.org/type/null.html "null", "Null", "NULL", "~", ] .contains(&string) || string.starts_with('.') || string.starts_with("0x") || string.parse::().is_ok() || string.parse::().is_ok() } #[cfg(test)] mod test { use super::*; use crate::content::yaml::vendored::yaml::YamlLoader; #[test] fn test_emit_simple() { let s = " # comment a0 bb: val a1: b1: 4 b2: d a2: 4 # i'm comment a3: [1, 2, 3] a4: - [a1, a2] - 2 "; let docs = YamlLoader::load_from_str(s).unwrap(); let doc = &docs[0]; let mut writer = String::new(); { let mut emitter = YamlEmitter::new(&mut writer); emitter.dump(doc).unwrap(); } println!("original:\n{s}"); println!("emitted:\n{writer}"); let docs_new = match YamlLoader::load_from_str(&writer) { Ok(y) => y, Err(e) => panic!("{}", e), }; let doc_new = &docs_new[0]; assert_eq!(doc, doc_new); } #[test] fn test_emit_complex() { let s = r#" catalogue: product: &coffee { name: Coffee, price: 2.5 , unit: 1l } product: &cookies { name: Cookies!, price: 3.40 , unit: 400g} products: *coffee: amount: 4 *cookies: amount: 4 [1,2,3,4]: array key 2.4: real key true: bool key {}: empty hash key "#; let docs = YamlLoader::load_from_str(s).unwrap(); let doc = &docs[0]; let mut writer = String::new(); { let mut emitter = YamlEmitter::new(&mut writer); emitter.dump(doc).unwrap(); } let docs_new = match YamlLoader::load_from_str(&writer) { Ok(y) => y, Err(e) => panic!("{}", e), }; let doc_new = &docs_new[0]; assert_eq!(doc, doc_new); } #[test] fn test_emit_avoid_quotes() { let s = r#"--- a7: 你好 boolean: "true" boolean2: "false" date: 2014-12-31 empty_string: "" empty_string1: " " empty_string2: " a" empty_string3: " a " exp: "12e7" field: ":" field2: "{" field3: "\\" field4: "\n" field5: "can't avoid quote" float: "2.6" int: "4" nullable: "null" nullable2: "~" products: "*coffee": amount: 4 "*cookies": amount: 4 ".milk": amount: 1 "2.4": real key "[1,2,3,4]": array key "true": bool key "{}": empty hash key x: test y: avoid quoting here z: string with spaces"#; let docs = YamlLoader::load_from_str(s).unwrap(); let doc = &docs[0]; let mut writer = String::new(); { let mut emitter = YamlEmitter::new(&mut writer); emitter.dump(doc).unwrap(); } assert_eq!(s, writer, "actual:\n\n{writer}\n"); } #[test] fn emit_quoted_bools() { let input = r#"--- string0: yes string1: no string2: "true" string3: "false" string4: "~" null0: ~ [true, false]: real_bools [True, TRUE, False, FALSE, y,Y,yes,Yes,YES,n,N,no,No,NO,on,On,ON,off,Off,OFF]: false_bools bool0: true bool1: false"#; let expected = r#"--- string0: "yes" string1: "no" string2: "true" string3: "false" string4: "~" null0: ~ ? - true - false : real_bools ? - "True" - "TRUE" - "False" - "FALSE" - y - Y - "yes" - "Yes" - "YES" - n - N - "no" - "No" - "NO" - "on" - "On" - "ON" - "off" - "Off" - "OFF" : false_bools bool0: true bool1: false"#; let docs = YamlLoader::load_from_str(input).unwrap(); let doc = &docs[0]; let mut writer = String::new(); { let mut emitter = YamlEmitter::new(&mut writer); emitter.dump(doc).unwrap(); } assert_eq!( expected, writer, "expected:\n{expected}\nactual:\n{writer}\n" ); } #[test] fn test_empty_and_nested_compact() { let s = r#"--- a: b: c: hello d: {} e: - f - g - h: []"#; let docs = YamlLoader::load_from_str(s).unwrap(); let doc = &docs[0]; let mut writer = String::new(); { let mut emitter = YamlEmitter::new(&mut writer); emitter.dump(doc).unwrap(); } assert_eq!(s, writer); } #[test] fn test_nested_arrays() { let s = r#"--- a: - b - - c - d - - e - f"#; let docs = YamlLoader::load_from_str(s).unwrap(); let doc = &docs[0]; let mut writer = String::new(); { let mut emitter = YamlEmitter::new(&mut writer); emitter.dump(doc).unwrap(); } println!("original:\n{s}"); println!("emitted:\n{writer}"); assert_eq!(s, writer); } #[test] fn test_deeply_nested_arrays() { let s = r#"--- a: - b - - c - d - - e - - f - - e"#; let docs = YamlLoader::load_from_str(s).unwrap(); let doc = &docs[0]; let mut writer = String::new(); { let mut emitter = YamlEmitter::new(&mut writer); emitter.dump(doc).unwrap(); } println!("original:\n{s}"); println!("emitted:\n{writer}"); assert_eq!(s, writer); } #[test] fn test_nested_hashes() { let s = r#"--- a: b: c: d: e: f"#; let docs = YamlLoader::load_from_str(s).unwrap(); let doc = &docs[0]; let mut writer = String::new(); { let mut emitter = YamlEmitter::new(&mut writer); emitter.dump(doc).unwrap(); } println!("original:\n{s}"); println!("emitted:\n{writer}"); assert_eq!(s, writer); } } insta-1.46.1/src/content/yaml/vendored/mod.rs000064400000000000000000000034431046102023000171750ustar 00000000000000//! Copyright 2015, Yuheng Chen. Apache 2 licensed. //! //! This vendored code used to be yaml-rust. It's intended to be replaced in //! the next major version with a yaml-rust2 which is an actively maintained //! version of this. Is it has different snapshot formats and different //! MSRV requirements, we vendor it temporarily. #![allow(unused)] #![allow(clippy::needless_raw_strings)] pub mod emitter; pub mod parser; pub mod scanner; pub mod yaml; pub use self::yaml::Yaml; #[cfg(test)] mod tests { use super::*; use crate::content::yaml::vendored::emitter::YamlEmitter; use crate::content::yaml::vendored::scanner::ScanError; use crate::content::yaml::vendored::yaml::YamlLoader; #[test] fn test_api() { let s = " # from yaml-cpp example - name: Ogre position: [0, 5, 0] powers: - name: Club damage: 10 - name: Fist damage: 8 - name: Dragon position: [1, 0, 10] powers: - name: Fire Breath damage: 25 - name: Claws damage: 15 - name: Wizard position: [5, -3, 0] powers: - name: Acid Rain damage: 50 - name: Staff damage: 3 "; let docs = YamlLoader::load_from_str(s).unwrap(); let doc = &docs[0]; assert_eq!(doc[0]["name"].as_str().unwrap(), "Ogre"); let mut writer = String::new(); { let mut emitter = YamlEmitter::new(&mut writer); emitter.dump(doc).unwrap(); } assert!(!writer.is_empty()); } fn try_fail(s: &str) -> Result, ScanError> { let t = YamlLoader::load_from_str(s)?; Ok(t) } #[test] fn test_fail() { let s = " # syntax error scalar key: [1, 2]] key1:a2 "; assert!(YamlLoader::load_from_str(s).is_err()); assert!(try_fail(s).is_err()); } } insta-1.46.1/src/content/yaml/vendored/parser.rs000064400000000000000000000672441046102023000177230ustar 00000000000000use crate::content::yaml::vendored::scanner::*; use std::collections::HashMap; #[derive(Clone, Copy, PartialEq, Debug, Eq)] enum State { StreamStart, ImplicitDocumentStart, DocumentStart, DocumentContent, DocumentEnd, BlockNode, // BlockNodeOrIndentlessSequence, // FlowNode, BlockSequenceFirstEntry, BlockSequenceEntry, IndentlessSequenceEntry, BlockMappingFirstKey, BlockMappingKey, BlockMappingValue, FlowSequenceFirstEntry, FlowSequenceEntry, FlowSequenceEntryMappingKey, FlowSequenceEntryMappingValue, FlowSequenceEntryMappingEnd, FlowMappingFirstKey, FlowMappingKey, FlowMappingValue, FlowMappingEmptyValue, End, } /// [`Event`] is used with the low-level event base parsing API, /// see [`EventReceiver`] trait. #[derive(Clone, PartialEq, Debug, Eq)] pub enum Event { /// Reserved for internal use StreamStart, StreamEnd, DocumentStart, DocumentEnd, /// Refer to an anchor ID Alias(usize), /// Value, style, anchor ID, tag Scalar(String, TScalarStyle, usize, Option), /// Anchor ID SequenceStart(usize), SequenceEnd, /// Anchor ID MappingStart(usize), MappingEnd, } impl Event { fn empty_scalar() -> Event { // a null scalar Event::Scalar("~".to_owned(), TScalarStyle::Plain, 0, None) } fn empty_scalar_with_anchor(anchor: usize, tag: Option) -> Event { Event::Scalar("".to_owned(), TScalarStyle::Plain, anchor, tag) } } #[derive(Debug)] pub struct Parser { scanner: Scanner, states: Vec, state: State, token: Option, current: Option<(Event, Marker)>, anchors: HashMap, anchor_id: usize, } pub trait EventReceiver { fn on_event(&mut self, ev: Event); } pub trait MarkedEventReceiver { fn on_event(&mut self, ev: Event, _mark: Marker); } impl MarkedEventReceiver for R { fn on_event(&mut self, ev: Event, _mark: Marker) { self.on_event(ev) } } pub type ParseResult = Result<(Event, Marker), ScanError>; impl> Parser { pub fn new(src: T) -> Parser { Parser { scanner: Scanner::new(src), states: Vec::new(), state: State::StreamStart, token: None, current: None, anchors: HashMap::new(), // valid anchor_id starts from 1 anchor_id: 1, } } pub fn next(&mut self) -> ParseResult { match self.current { None => self.parse(), Some(_) => Ok(self.current.take().unwrap()), } } fn peek_token(&mut self) -> Result<&Token, ScanError> { match self.token { None => { self.token = Some(self.scan_next_token()?); Ok(self.token.as_ref().unwrap()) } Some(ref tok) => Ok(tok), } } fn scan_next_token(&mut self) -> Result { let token = self.scanner.next(); match token { None => match self.scanner.get_error() { None => Err(ScanError::new(self.scanner.mark(), "unexpected eof")), Some(e) => Err(e), }, Some(tok) => Ok(tok), } } fn fetch_token(&mut self) -> Token { self.token .take() .expect("fetch_token needs to be preceded by peek_token") } fn skip(&mut self) { self.token = None; //self.peek_token(); } fn pop_state(&mut self) { self.state = self.states.pop().unwrap() } fn push_state(&mut self, state: State) { self.states.push(state); } fn parse(&mut self) -> ParseResult { if self.state == State::End { return Ok((Event::StreamEnd, self.scanner.mark())); } let (ev, mark) = self.state_machine()?; // println!("EV {:?}", ev); Ok((ev, mark)) } pub fn load( &mut self, recv: &mut R, multi: bool, ) -> Result<(), ScanError> { if !self.scanner.stream_started() { let (ev, mark) = self.next()?; assert_eq!(ev, Event::StreamStart); recv.on_event(ev, mark); } if self.scanner.stream_ended() { // XXX has parsed? recv.on_event(Event::StreamEnd, self.scanner.mark()); return Ok(()); } loop { let (ev, mark) = self.next()?; if ev == Event::StreamEnd { recv.on_event(ev, mark); return Ok(()); } // clear anchors before a new document self.anchors.clear(); self.load_document(ev, mark, recv)?; if !multi { break; } } Ok(()) } fn load_document( &mut self, first_ev: Event, mark: Marker, recv: &mut R, ) -> Result<(), ScanError> { assert_eq!(first_ev, Event::DocumentStart); recv.on_event(first_ev, mark); let (ev, mark) = self.next()?; self.load_node(ev, mark, recv)?; // DOCUMENT-END is expected. let (ev, mark) = self.next()?; assert_eq!(ev, Event::DocumentEnd); recv.on_event(ev, mark); Ok(()) } fn load_node( &mut self, first_ev: Event, mark: Marker, recv: &mut R, ) -> Result<(), ScanError> { match first_ev { Event::Alias(..) | Event::Scalar(..) => { recv.on_event(first_ev, mark); Ok(()) } Event::SequenceStart(_) => { recv.on_event(first_ev, mark); self.load_sequence(recv) } Event::MappingStart(_) => { recv.on_event(first_ev, mark); self.load_mapping(recv) } _ => { println!("UNREACHABLE EVENT: {first_ev:?}"); unreachable!(); } } } fn load_mapping(&mut self, recv: &mut R) -> Result<(), ScanError> { let (mut key_ev, mut key_mark) = self.next()?; while key_ev != Event::MappingEnd { // key self.load_node(key_ev, key_mark, recv)?; // value let (ev, mark) = self.next()?; self.load_node(ev, mark, recv)?; // next event let (ev, mark) = self.next()?; key_ev = ev; key_mark = mark; } recv.on_event(key_ev, key_mark); Ok(()) } fn load_sequence(&mut self, recv: &mut R) -> Result<(), ScanError> { let (mut ev, mut mark) = self.next()?; while ev != Event::SequenceEnd { self.load_node(ev, mark, recv)?; // next event let (next_ev, next_mark) = self.next()?; ev = next_ev; mark = next_mark; } recv.on_event(ev, mark); Ok(()) } fn state_machine(&mut self) -> ParseResult { // let next_tok = self.peek_token()?; // println!("cur_state {:?}, next tok: {:?}", self.state, next_tok); match self.state { State::StreamStart => self.stream_start(), State::ImplicitDocumentStart => self.document_start(true), State::DocumentStart => self.document_start(false), State::DocumentContent => self.document_content(), State::DocumentEnd => self.document_end(), State::BlockNode => self.parse_node(true, false), // State::BlockNodeOrIndentlessSequence => self.parse_node(true, true), // State::FlowNode => self.parse_node(false, false), State::BlockMappingFirstKey => self.block_mapping_key(true), State::BlockMappingKey => self.block_mapping_key(false), State::BlockMappingValue => self.block_mapping_value(), State::BlockSequenceFirstEntry => self.block_sequence_entry(true), State::BlockSequenceEntry => self.block_sequence_entry(false), State::FlowSequenceFirstEntry => self.flow_sequence_entry(true), State::FlowSequenceEntry => self.flow_sequence_entry(false), State::FlowMappingFirstKey => self.flow_mapping_key(true), State::FlowMappingKey => self.flow_mapping_key(false), State::FlowMappingValue => self.flow_mapping_value(false), State::IndentlessSequenceEntry => self.indentless_sequence_entry(), State::FlowSequenceEntryMappingKey => self.flow_sequence_entry_mapping_key(), State::FlowSequenceEntryMappingValue => self.flow_sequence_entry_mapping_value(), State::FlowSequenceEntryMappingEnd => self.flow_sequence_entry_mapping_end(), State::FlowMappingEmptyValue => self.flow_mapping_value(true), /* impossible */ State::End => unreachable!(), } } fn stream_start(&mut self) -> ParseResult { match *self.peek_token()? { Token(mark, TokenType::StreamStart(_)) => { self.state = State::ImplicitDocumentStart; self.skip(); Ok((Event::StreamStart, mark)) } Token(mark, _) => Err(ScanError::new(mark, "did not find expected ")), } } fn document_start(&mut self, implicit: bool) -> ParseResult { if !implicit { while let TokenType::DocumentEnd = self.peek_token()?.1 { self.skip(); } } match *self.peek_token()? { Token(mark, TokenType::StreamEnd) => { self.state = State::End; self.skip(); Ok((Event::StreamEnd, mark)) } Token(_, TokenType::VersionDirective(..)) | Token(_, TokenType::TagDirective(..)) | Token(_, TokenType::DocumentStart) => { // explicit document self._explicit_document_start() } Token(mark, _) if implicit => { self.parser_process_directives()?; self.push_state(State::DocumentEnd); self.state = State::BlockNode; Ok((Event::DocumentStart, mark)) } _ => { // explicit document self._explicit_document_start() } } } fn parser_process_directives(&mut self) -> Result<(), ScanError> { loop { match self.peek_token()?.1 { TokenType::VersionDirective(_, _) => { // XXX parsing with warning according to spec //if major != 1 || minor > 2 { // return Err(ScanError::new(tok.0, // "found incompatible YAML document")); //} } TokenType::TagDirective(..) => { // TODO add tag directive } _ => break, } self.skip(); } // TODO tag directive Ok(()) } fn _explicit_document_start(&mut self) -> ParseResult { self.parser_process_directives()?; match *self.peek_token()? { Token(mark, TokenType::DocumentStart) => { self.push_state(State::DocumentEnd); self.state = State::DocumentContent; self.skip(); Ok((Event::DocumentStart, mark)) } Token(mark, _) => Err(ScanError::new( mark, "did not find expected ", )), } } fn document_content(&mut self) -> ParseResult { match *self.peek_token()? { Token(mark, TokenType::VersionDirective(..)) | Token(mark, TokenType::TagDirective(..)) | Token(mark, TokenType::DocumentStart) | Token(mark, TokenType::DocumentEnd) | Token(mark, TokenType::StreamEnd) => { self.pop_state(); // empty scalar Ok((Event::empty_scalar(), mark)) } _ => self.parse_node(true, false), } } fn document_end(&mut self) -> ParseResult { let mut _implicit = true; let marker: Marker = match *self.peek_token()? { Token(mark, TokenType::DocumentEnd) => { self.skip(); _implicit = false; mark } Token(mark, _) => mark, }; // TODO tag handling self.state = State::DocumentStart; Ok((Event::DocumentEnd, marker)) } fn register_anchor(&mut self, name: String, _: &Marker) -> Result { // anchors can be overridden/reused // if self.anchors.contains_key(name) { // return Err(ScanError::new(*mark, // "while parsing anchor, found duplicated anchor")); // } let new_id = self.anchor_id; self.anchor_id += 1; self.anchors.insert(name, new_id); Ok(new_id) } fn parse_node(&mut self, block: bool, indentless_sequence: bool) -> ParseResult { let mut anchor_id = 0; let mut tag = None; match *self.peek_token()? { Token(_, TokenType::Alias(_)) => { self.pop_state(); if let Token(mark, TokenType::Alias(name)) = self.fetch_token() { match self.anchors.get(&name) { None => { return Err(ScanError::new( mark, "while parsing node, found unknown anchor", )) } Some(id) => return Ok((Event::Alias(*id), mark)), } } else { unreachable!() } } Token(_, TokenType::Anchor(_)) => { if let Token(mark, TokenType::Anchor(name)) = self.fetch_token() { anchor_id = self.register_anchor(name, &mark)?; if let TokenType::Tag(..) = self.peek_token()?.1 { if let tg @ TokenType::Tag(..) = self.fetch_token().1 { tag = Some(tg); } else { unreachable!() } } } else { unreachable!() } } Token(_, TokenType::Tag(..)) => { if let tg @ TokenType::Tag(..) = self.fetch_token().1 { tag = Some(tg); if let TokenType::Anchor(_) = self.peek_token()?.1 { if let Token(mark, TokenType::Anchor(name)) = self.fetch_token() { anchor_id = self.register_anchor(name, &mark)?; } else { unreachable!() } } } else { unreachable!() } } _ => {} } match *self.peek_token()? { Token(mark, TokenType::BlockEntry) if indentless_sequence => { self.state = State::IndentlessSequenceEntry; Ok((Event::SequenceStart(anchor_id), mark)) } Token(_, TokenType::Scalar(..)) => { self.pop_state(); if let Token(mark, TokenType::Scalar(style, v)) = self.fetch_token() { Ok((Event::Scalar(v, style, anchor_id, tag), mark)) } else { unreachable!() } } Token(mark, TokenType::FlowSequenceStart) => { self.state = State::FlowSequenceFirstEntry; Ok((Event::SequenceStart(anchor_id), mark)) } Token(mark, TokenType::FlowMappingStart) => { self.state = State::FlowMappingFirstKey; Ok((Event::MappingStart(anchor_id), mark)) } Token(mark, TokenType::BlockSequenceStart) if block => { self.state = State::BlockSequenceFirstEntry; Ok((Event::SequenceStart(anchor_id), mark)) } Token(mark, TokenType::BlockMappingStart) if block => { self.state = State::BlockMappingFirstKey; Ok((Event::MappingStart(anchor_id), mark)) } // ex 7.2, an empty scalar can follow a secondary tag Token(mark, _) if tag.is_some() || anchor_id > 0 => { self.pop_state(); Ok((Event::empty_scalar_with_anchor(anchor_id, tag), mark)) } Token(mark, _) => Err(ScanError::new( mark, "while parsing a node, did not find expected node content", )), } } fn block_mapping_key(&mut self, first: bool) -> ParseResult { // skip BlockMappingStart if first { let _ = self.peek_token()?; //self.marks.push(tok.0); self.skip(); } match *self.peek_token()? { Token(_, TokenType::Key) => { self.skip(); match *self.peek_token()? { Token(mark, TokenType::Key) | Token(mark, TokenType::Value) | Token(mark, TokenType::BlockEnd) => { self.state = State::BlockMappingValue; // empty scalar Ok((Event::empty_scalar(), mark)) } _ => { self.push_state(State::BlockMappingValue); self.parse_node(true, true) } } } // XXX(chenyh): libyaml failed to parse spec 1.2, ex8.18 Token(mark, TokenType::Value) => { self.state = State::BlockMappingValue; Ok((Event::empty_scalar(), mark)) } Token(mark, TokenType::BlockEnd) => { self.pop_state(); self.skip(); Ok((Event::MappingEnd, mark)) } Token(mark, _) => Err(ScanError::new( mark, "while parsing a block mapping, did not find expected key", )), } } fn block_mapping_value(&mut self) -> ParseResult { match *self.peek_token()? { Token(_, TokenType::Value) => { self.skip(); match *self.peek_token()? { Token(mark, TokenType::Key) | Token(mark, TokenType::Value) | Token(mark, TokenType::BlockEnd) => { self.state = State::BlockMappingKey; // empty scalar Ok((Event::empty_scalar(), mark)) } _ => { self.push_state(State::BlockMappingKey); self.parse_node(true, true) } } } Token(mark, _) => { self.state = State::BlockMappingKey; // empty scalar Ok((Event::empty_scalar(), mark)) } } } fn flow_mapping_key(&mut self, first: bool) -> ParseResult { if first { let _ = self.peek_token()?; self.skip(); } let marker: Marker = { match *self.peek_token()? { Token(mark, TokenType::FlowMappingEnd) => mark, Token(mark, _) => { if !first { match *self.peek_token()? { Token(_, TokenType::FlowEntry) => self.skip(), Token(mark, _) => return Err(ScanError::new(mark, "while parsing a flow mapping, did not find expected ',' or '}'")) } } match *self.peek_token()? { Token(_, TokenType::Key) => { self.skip(); match *self.peek_token()? { Token(mark, TokenType::Value) | Token(mark, TokenType::FlowEntry) | Token(mark, TokenType::FlowMappingEnd) => { self.state = State::FlowMappingValue; return Ok((Event::empty_scalar(), mark)); } _ => { self.push_state(State::FlowMappingValue); return self.parse_node(false, false); } } } Token(marker, TokenType::Value) => { self.state = State::FlowMappingValue; return Ok((Event::empty_scalar(), marker)); } Token(_, TokenType::FlowMappingEnd) => (), _ => { self.push_state(State::FlowMappingEmptyValue); return self.parse_node(false, false); } } mark } } }; self.pop_state(); self.skip(); Ok((Event::MappingEnd, marker)) } fn flow_mapping_value(&mut self, empty: bool) -> ParseResult { let mark: Marker = { if empty { let Token(mark, _) = *self.peek_token()?; self.state = State::FlowMappingKey; return Ok((Event::empty_scalar(), mark)); } else { match *self.peek_token()? { Token(marker, TokenType::Value) => { self.skip(); match self.peek_token()?.1 { TokenType::FlowEntry | TokenType::FlowMappingEnd => {} _ => { self.push_state(State::FlowMappingKey); return self.parse_node(false, false); } } marker } Token(marker, _) => marker, } } }; self.state = State::FlowMappingKey; Ok((Event::empty_scalar(), mark)) } fn flow_sequence_entry(&mut self, first: bool) -> ParseResult { // skip FlowMappingStart if first { let _ = self.peek_token()?; //self.marks.push(tok.0); self.skip(); } match *self.peek_token()? { Token(mark, TokenType::FlowSequenceEnd) => { self.pop_state(); self.skip(); return Ok((Event::SequenceEnd, mark)); } Token(_, TokenType::FlowEntry) if !first => { self.skip(); } Token(mark, _) if !first => { return Err(ScanError::new( mark, "while parsing a flow sequence, expected ',' or ']'", )); } _ => { /* next */ } } match *self.peek_token()? { Token(mark, TokenType::FlowSequenceEnd) => { self.pop_state(); self.skip(); Ok((Event::SequenceEnd, mark)) } Token(mark, TokenType::Key) => { self.state = State::FlowSequenceEntryMappingKey; self.skip(); Ok((Event::MappingStart(0), mark)) } _ => { self.push_state(State::FlowSequenceEntry); self.parse_node(false, false) } } } fn indentless_sequence_entry(&mut self) -> ParseResult { match *self.peek_token()? { Token(_, TokenType::BlockEntry) => (), Token(mark, _) => { self.pop_state(); return Ok((Event::SequenceEnd, mark)); } } self.skip(); match *self.peek_token()? { Token(mark, TokenType::BlockEntry) | Token(mark, TokenType::Key) | Token(mark, TokenType::Value) | Token(mark, TokenType::BlockEnd) => { self.state = State::IndentlessSequenceEntry; Ok((Event::empty_scalar(), mark)) } _ => { self.push_state(State::IndentlessSequenceEntry); self.parse_node(true, false) } } } fn block_sequence_entry(&mut self, first: bool) -> ParseResult { // BLOCK-SEQUENCE-START if first { let _ = self.peek_token()?; //self.marks.push(tok.0); self.skip(); } match *self.peek_token()? { Token(mark, TokenType::BlockEnd) => { self.pop_state(); self.skip(); Ok((Event::SequenceEnd, mark)) } Token(_, TokenType::BlockEntry) => { self.skip(); match *self.peek_token()? { Token(mark, TokenType::BlockEntry) | Token(mark, TokenType::BlockEnd) => { self.state = State::BlockSequenceEntry; Ok((Event::empty_scalar(), mark)) } _ => { self.push_state(State::BlockSequenceEntry); self.parse_node(true, false) } } } Token(mark, _) => Err(ScanError::new( mark, "while parsing a block collection, did not find expected '-' indicator", )), } } fn flow_sequence_entry_mapping_key(&mut self) -> ParseResult { match *self.peek_token()? { Token(mark, TokenType::Value) | Token(mark, TokenType::FlowEntry) | Token(mark, TokenType::FlowSequenceEnd) => { self.skip(); self.state = State::FlowSequenceEntryMappingValue; Ok((Event::empty_scalar(), mark)) } _ => { self.push_state(State::FlowSequenceEntryMappingValue); self.parse_node(false, false) } } } fn flow_sequence_entry_mapping_value(&mut self) -> ParseResult { match *self.peek_token()? { Token(_, TokenType::Value) => { self.skip(); self.state = State::FlowSequenceEntryMappingValue; match *self.peek_token()? { Token(mark, TokenType::FlowEntry) | Token(mark, TokenType::FlowSequenceEnd) => { self.state = State::FlowSequenceEntryMappingEnd; Ok((Event::empty_scalar(), mark)) } _ => { self.push_state(State::FlowSequenceEntryMappingEnd); self.parse_node(false, false) } } } Token(mark, _) => { self.state = State::FlowSequenceEntryMappingEnd; Ok((Event::empty_scalar(), mark)) } } } fn flow_sequence_entry_mapping_end(&mut self) -> ParseResult { self.state = State::FlowSequenceEntry; Ok((Event::MappingEnd, self.scanner.mark())) } } insta-1.46.1/src/content/yaml/vendored/scanner.rs000064400000000000000000001753501046102023000200560ustar 00000000000000use std::collections::VecDeque; use std::error::Error; use std::{char, fmt}; #[derive(Clone, Copy, PartialEq, Debug, Eq)] pub enum TEncoding { Utf8, } #[derive(Clone, Copy, PartialEq, Debug, Eq)] pub enum TScalarStyle { Plain, SingleQuoted, DoubleQuoted, Literal, Foled, } #[derive(Clone, Copy, PartialEq, Debug, Eq)] pub struct Marker { index: usize, line: usize, col: usize, } impl Marker { fn new(index: usize, line: usize, col: usize) -> Marker { Marker { index, line, col } } } #[derive(Clone, PartialEq, Debug, Eq)] pub struct ScanError { mark: Marker, info: String, } impl ScanError { pub fn new(loc: Marker, info: &str) -> ScanError { ScanError { mark: loc, info: info.to_owned(), } } } impl Error for ScanError { fn description(&self) -> &str { self.info.as_ref() } fn cause(&self) -> Option<&dyn Error> { None } } impl fmt::Display for ScanError { // col starts from 0 fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { write!( formatter, "{} at line {} column {}", self.info, self.mark.line, self.mark.col + 1 ) } } #[derive(Clone, PartialEq, Debug, Eq)] pub enum TokenType { StreamStart(TEncoding), StreamEnd, /// major, minor VersionDirective(u32, u32), /// handle, prefix TagDirective(String, String), DocumentStart, DocumentEnd, BlockSequenceStart, BlockMappingStart, BlockEnd, FlowSequenceStart, FlowSequenceEnd, FlowMappingStart, FlowMappingEnd, BlockEntry, FlowEntry, Key, Value, Alias(String), Anchor(String), /// handle, suffix Tag(String, String), Scalar(TScalarStyle, String), } #[derive(Clone, PartialEq, Debug, Eq)] pub struct Token(pub Marker, pub TokenType); #[derive(Clone, PartialEq, Debug, Eq)] struct SimpleKey { possible: bool, required: bool, token_number: usize, mark: Marker, } impl SimpleKey { fn new(mark: Marker) -> SimpleKey { SimpleKey { possible: false, required: false, token_number: 0, mark, } } } #[derive(Debug)] pub struct Scanner { rdr: T, mark: Marker, tokens: VecDeque, buffer: VecDeque, error: Option, stream_start_produced: bool, stream_end_produced: bool, adjacent_value_allowed_at: usize, simple_key_allowed: bool, simple_keys: Vec, indent: isize, indents: Vec, flow_level: u8, tokens_parsed: usize, token_available: bool, } impl> Iterator for Scanner { type Item = Token; fn next(&mut self) -> Option { if self.error.is_some() { return None; } match self.next_token() { Ok(tok) => tok, Err(e) => { self.error = Some(e); None } } } } #[inline] fn is_z(c: char) -> bool { c == '\0' } #[inline] fn is_break(c: char) -> bool { c == '\n' || c == '\r' } #[inline] fn is_breakz(c: char) -> bool { is_break(c) || is_z(c) } #[inline] fn is_blank(c: char) -> bool { c == ' ' || c == '\t' } #[inline] fn is_blankz(c: char) -> bool { is_blank(c) || is_breakz(c) } #[inline] fn is_digit(c: char) -> bool { c.is_ascii_digit() } #[inline] fn is_alpha(c: char) -> bool { matches!(c, '0'..='9' | 'a'..='z' | 'A'..='Z' | '_' | '-') } #[inline] fn is_hex(c: char) -> bool { c.is_ascii_digit() || ('a'..='f').contains(&c) || ('A'..='F').contains(&c) } #[inline] fn as_hex(c: char) -> u32 { match c { '0'..='9' => (c as u32) - ('0' as u32), 'a'..='f' => (c as u32) - ('a' as u32) + 10, 'A'..='F' => (c as u32) - ('A' as u32) + 10, _ => unreachable!(), } } #[inline] fn is_flow(c: char) -> bool { matches!(c, ',' | '[' | ']' | '{' | '}') } pub type ScanResult = Result<(), ScanError>; impl> Scanner { /// Creates the YAML tokenizer. pub fn new(rdr: T) -> Scanner { Scanner { rdr, buffer: VecDeque::new(), mark: Marker::new(0, 1, 0), tokens: VecDeque::new(), error: None, stream_start_produced: false, stream_end_produced: false, adjacent_value_allowed_at: 0, simple_key_allowed: true, simple_keys: Vec::new(), indent: -1, indents: Vec::new(), flow_level: 0, tokens_parsed: 0, token_available: false, } } #[inline] pub fn get_error(&self) -> Option { self.error.clone() } #[inline] fn lookahead(&mut self, count: usize) { if self.buffer.len() >= count { return; } for _ in 0..(count - self.buffer.len()) { self.buffer.push_back(self.rdr.next().unwrap_or('\0')); } } #[inline] fn skip(&mut self) { let c = self.buffer.pop_front().unwrap(); self.mark.index += 1; if c == '\n' { self.mark.line += 1; self.mark.col = 0; } else { self.mark.col += 1; } } #[inline] fn skip_line(&mut self) { if self.buffer[0] == '\r' && self.buffer[1] == '\n' { self.skip(); self.skip(); } else if is_break(self.buffer[0]) { self.skip(); } } #[inline] fn ch(&self) -> char { self.buffer[0] } #[inline] fn ch_is(&self, c: char) -> bool { self.buffer[0] == c } #[allow(dead_code)] #[inline] fn eof(&self) -> bool { self.ch_is('\0') } #[inline] pub fn stream_started(&self) -> bool { self.stream_start_produced } #[inline] pub fn stream_ended(&self) -> bool { self.stream_end_produced } #[inline] pub fn mark(&self) -> Marker { self.mark } #[inline] fn read_break(&mut self, s: &mut String) { if self.buffer[0] == '\r' && self.buffer[1] == '\n' { s.push('\n'); self.skip(); self.skip(); } else if self.buffer[0] == '\r' || self.buffer[0] == '\n' { s.push('\n'); self.skip(); } else { unreachable!(); } } fn insert_token(&mut self, pos: usize, tok: Token) { let old_len = self.tokens.len(); assert!(pos <= old_len); self.tokens.push_back(tok); for i in 0..old_len - pos { self.tokens.swap(old_len - i, old_len - i - 1); } } fn allow_simple_key(&mut self) { self.simple_key_allowed = true; } fn disallow_simple_key(&mut self) { self.simple_key_allowed = false; } pub fn fetch_next_token(&mut self) -> ScanResult { self.lookahead(1); // println!("--> fetch_next_token Cur {:?} {:?}", self.mark, self.ch()); if !self.stream_start_produced { self.fetch_stream_start(); return Ok(()); } self.skip_to_next_token(); self.stale_simple_keys()?; let mark = self.mark; self.unroll_indent(mark.col as isize); self.lookahead(4); if is_z(self.ch()) { self.fetch_stream_end()?; return Ok(()); } // Is it a directive? if self.mark.col == 0 && self.ch_is('%') { return self.fetch_directive(); } if self.mark.col == 0 && self.buffer[0] == '-' && self.buffer[1] == '-' && self.buffer[2] == '-' && is_blankz(self.buffer[3]) { self.fetch_document_indicator(TokenType::DocumentStart)?; return Ok(()); } if self.mark.col == 0 && self.buffer[0] == '.' && self.buffer[1] == '.' && self.buffer[2] == '.' && is_blankz(self.buffer[3]) { self.fetch_document_indicator(TokenType::DocumentEnd)?; return Ok(()); } let c = self.buffer[0]; let nc = self.buffer[1]; match c { '[' => self.fetch_flow_collection_start(TokenType::FlowSequenceStart), '{' => self.fetch_flow_collection_start(TokenType::FlowMappingStart), ']' => self.fetch_flow_collection_end(TokenType::FlowSequenceEnd), '}' => self.fetch_flow_collection_end(TokenType::FlowMappingEnd), ',' => self.fetch_flow_entry(), '-' if is_blankz(nc) => self.fetch_block_entry(), '?' if is_blankz(nc) => self.fetch_key(), ':' if is_blankz(nc) || (self.flow_level > 0 && (is_flow(nc) || self.mark.index == self.adjacent_value_allowed_at)) => { self.fetch_value() } // Is it an alias? '*' => self.fetch_anchor(true), // Is it an anchor? '&' => self.fetch_anchor(false), '!' => self.fetch_tag(), // Is it a literal scalar? '|' if self.flow_level == 0 => self.fetch_block_scalar(true), // Is it a folded scalar? '>' if self.flow_level == 0 => self.fetch_block_scalar(false), '\'' => self.fetch_flow_scalar(true), '"' => self.fetch_flow_scalar(false), // plain scalar '-' if !is_blankz(nc) => self.fetch_plain_scalar(), ':' | '?' if !is_blankz(nc) && self.flow_level == 0 => self.fetch_plain_scalar(), '%' | '@' | '`' => Err(ScanError::new( self.mark, &format!("unexpected character: `{c}'"), )), _ => self.fetch_plain_scalar(), } } pub fn next_token(&mut self) -> Result, ScanError> { if self.stream_end_produced { return Ok(None); } if !self.token_available { self.fetch_more_tokens()?; } let t = self.tokens.pop_front().unwrap(); self.token_available = false; self.tokens_parsed += 1; if let TokenType::StreamEnd = t.1 { self.stream_end_produced = true; } Ok(Some(t)) } pub fn fetch_more_tokens(&mut self) -> ScanResult { let mut need_more; loop { need_more = false; if self.tokens.is_empty() { need_more = true; } else { self.stale_simple_keys()?; for sk in &self.simple_keys { if sk.possible && sk.token_number == self.tokens_parsed { need_more = true; break; } } } if !need_more { break; } self.fetch_next_token()?; } self.token_available = true; Ok(()) } fn stale_simple_keys(&mut self) -> ScanResult { for sk in &mut self.simple_keys { if sk.possible && (sk.mark.line < self.mark.line || sk.mark.index + 1024 < self.mark.index) { if sk.required { return Err(ScanError::new(self.mark, "simple key expect ':'")); } sk.possible = false; } } Ok(()) } fn skip_to_next_token(&mut self) { loop { self.lookahead(1); // TODO(chenyh) BOM match self.ch() { ' ' => self.skip(), '\t' if self.flow_level > 0 || !self.simple_key_allowed => self.skip(), '\n' | '\r' => { self.lookahead(2); self.skip_line(); if self.flow_level == 0 { self.allow_simple_key(); } } '#' => { while !is_breakz(self.ch()) { self.skip(); self.lookahead(1); } } _ => break, } } } fn fetch_stream_start(&mut self) { let mark = self.mark; self.indent = -1; self.stream_start_produced = true; self.allow_simple_key(); self.tokens .push_back(Token(mark, TokenType::StreamStart(TEncoding::Utf8))); self.simple_keys.push(SimpleKey::new(Marker::new(0, 0, 0))); } fn fetch_stream_end(&mut self) -> ScanResult { // force new line if self.mark.col != 0 { self.mark.col = 0; self.mark.line += 1; } self.unroll_indent(-1); self.remove_simple_key()?; self.disallow_simple_key(); self.tokens .push_back(Token(self.mark, TokenType::StreamEnd)); Ok(()) } fn fetch_directive(&mut self) -> ScanResult { self.unroll_indent(-1); self.remove_simple_key()?; self.disallow_simple_key(); let tok = self.scan_directive()?; self.tokens.push_back(tok); Ok(()) } fn scan_directive(&mut self) -> Result { let start_mark = self.mark; self.skip(); let name = self.scan_directive_name()?; let tok = match name.as_ref() { "YAML" => self.scan_version_directive_value(&start_mark)?, "TAG" => self.scan_tag_directive_value(&start_mark)?, // XXX This should be a warning instead of an error _ => { // skip current line self.lookahead(1); while !is_breakz(self.ch()) { self.skip(); self.lookahead(1); } // XXX return an empty TagDirective token Token( start_mark, TokenType::TagDirective(String::new(), String::new()), ) // return Err(ScanError::new(start_mark, // "while scanning a directive, found unknown directive name")) } }; self.lookahead(1); while is_blank(self.ch()) { self.skip(); self.lookahead(1); } if self.ch() == '#' { while !is_breakz(self.ch()) { self.skip(); self.lookahead(1); } } if !is_breakz(self.ch()) { return Err(ScanError::new( start_mark, "while scanning a directive, did not find expected comment or line break", )); } // Eat a line break if is_break(self.ch()) { self.lookahead(2); self.skip_line(); } Ok(tok) } fn scan_version_directive_value(&mut self, mark: &Marker) -> Result { self.lookahead(1); while is_blank(self.ch()) { self.skip(); self.lookahead(1); } let major = self.scan_version_directive_number(mark)?; if self.ch() != '.' { return Err(ScanError::new( *mark, "while scanning a YAML directive, did not find expected digit or '.' character", )); } self.skip(); let minor = self.scan_version_directive_number(mark)?; Ok(Token(*mark, TokenType::VersionDirective(major, minor))) } fn scan_directive_name(&mut self) -> Result { let start_mark = self.mark; let mut string = String::new(); self.lookahead(1); while is_alpha(self.ch()) { string.push(self.ch()); self.skip(); self.lookahead(1); } if string.is_empty() { return Err(ScanError::new( start_mark, "while scanning a directive, could not find expected directive name", )); } if !is_blankz(self.ch()) { return Err(ScanError::new( start_mark, "while scanning a directive, found unexpected non-alphabetical character", )); } Ok(string) } fn scan_version_directive_number(&mut self, mark: &Marker) -> Result { let mut val = 0u32; let mut length = 0usize; self.lookahead(1); while is_digit(self.ch()) { if length + 1 > 9 { return Err(ScanError::new( *mark, "while scanning a YAML directive, found extremely long version number", )); } length += 1; val = val * 10 + ((self.ch() as u32) - ('0' as u32)); self.skip(); self.lookahead(1); } if length == 0 { return Err(ScanError::new( *mark, "while scanning a YAML directive, did not find expected version number", )); } Ok(val) } fn scan_tag_directive_value(&mut self, mark: &Marker) -> Result { self.lookahead(1); /* Eat whitespaces. */ while is_blank(self.ch()) { self.skip(); self.lookahead(1); } let handle = self.scan_tag_handle(true, mark)?; self.lookahead(1); /* Eat whitespaces. */ while is_blank(self.ch()) { self.skip(); self.lookahead(1); } let is_secondary = handle == "!!"; let prefix = self.scan_tag_uri(true, is_secondary, "", mark)?; self.lookahead(1); if is_blankz(self.ch()) { Ok(Token(*mark, TokenType::TagDirective(handle, prefix))) } else { Err(ScanError::new( *mark, "while scanning TAG, did not find expected whitespace or line break", )) } } fn fetch_tag(&mut self) -> ScanResult { self.save_simple_key()?; self.disallow_simple_key(); let tok = self.scan_tag()?; self.tokens.push_back(tok); Ok(()) } fn scan_tag(&mut self) -> Result { let start_mark = self.mark; let mut handle = String::new(); let mut suffix; let mut secondary = false; // Check if the tag is in the canonical form (verbatim). self.lookahead(2); if self.buffer[1] == '<' { // Eat '!<' self.skip(); self.skip(); suffix = self.scan_tag_uri(false, false, "", &start_mark)?; if self.ch() != '>' { return Err(ScanError::new( start_mark, "while scanning a tag, did not find the expected '>'", )); } self.skip(); } else { // The tag has either the '!suffix' or the '!handle!suffix' handle = self.scan_tag_handle(false, &start_mark)?; // Check if it is, indeed, handle. if handle.len() >= 2 && handle.starts_with('!') && handle.ends_with('!') { if handle == "!!" { secondary = true; } suffix = self.scan_tag_uri(false, secondary, "", &start_mark)?; } else { suffix = self.scan_tag_uri(false, false, &handle, &start_mark)?; handle = "!".to_string(); // A special case: the '!' tag. Set the handle to '' and the // suffix to '!'. if suffix.is_empty() { handle.clear(); suffix = "!".to_owned(); } } } self.lookahead(1); if is_blankz(self.ch()) { // XXX: ex 7.2, an empty scalar can follow a secondary tag Ok(Token(start_mark, TokenType::Tag(handle, suffix))) } else { Err(ScanError::new( start_mark, "while scanning a tag, did not find expected whitespace or line break", )) } } fn scan_tag_handle(&mut self, directive: bool, mark: &Marker) -> Result { let mut string = String::new(); self.lookahead(1); if self.ch() != '!' { return Err(ScanError::new( *mark, "while scanning a tag, did not find expected '!'", )); } string.push(self.ch()); self.skip(); self.lookahead(1); while is_alpha(self.ch()) { string.push(self.ch()); self.skip(); self.lookahead(1); } // Check if the trailing character is '!' and copy it. if self.ch() == '!' { string.push(self.ch()); self.skip(); } else if directive && string != "!" { // It's either the '!' tag or not really a tag handle. If it's a %TAG // directive, it's an error. If it's a tag token, it must be a part of // URI. return Err(ScanError::new( *mark, "while parsing a tag directive, did not find expected '!'", )); } Ok(string) } fn scan_tag_uri( &mut self, directive: bool, _is_secondary: bool, head: &str, mark: &Marker, ) -> Result { let mut length = head.len(); let mut string = String::new(); // Copy the head if needed. // Note that we don't copy the leading '!' character. if length > 1 { string.extend(head.chars().skip(1)); } self.lookahead(1); /* * The set of characters that may appear in URI is as follows: * * '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', * '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', * '%'. */ while match self.ch() { ';' | '/' | '?' | ':' | '@' | '&' => true, '=' | '+' | '$' | ',' | '.' | '!' | '~' | '*' | '\'' | '(' | ')' | '[' | ']' => true, '%' => true, c if is_alpha(c) => true, _ => false, } { // Check if it is a URI-escape sequence. if self.ch() == '%' { string.push(self.scan_uri_escapes(directive, mark)?); } else { string.push(self.ch()); self.skip(); } length += 1; self.lookahead(1); } if length == 0 { return Err(ScanError::new( *mark, "while parsing a tag, did not find expected tag URI", )); } Ok(string) } fn scan_uri_escapes(&mut self, _directive: bool, mark: &Marker) -> Result { let mut width = 0usize; let mut code = 0u32; loop { self.lookahead(3); if !(self.ch() == '%' && is_hex(self.buffer[1]) && is_hex(self.buffer[2])) { return Err(ScanError::new( *mark, "while parsing a tag, did not find URI escaped octet", )); } let octet = (as_hex(self.buffer[1]) << 4) + as_hex(self.buffer[2]); if width == 0 { width = match octet { _ if octet & 0x80 == 0x00 => 1, _ if octet & 0xE0 == 0xC0 => 2, _ if octet & 0xF0 == 0xE0 => 3, _ if octet & 0xF8 == 0xF0 => 4, _ => { return Err(ScanError::new( *mark, "while parsing a tag, found an incorrect leading UTF-8 octet", )); } }; code = octet; } else { if octet & 0xc0 != 0x80 { return Err(ScanError::new( *mark, "while parsing a tag, found an incorrect trailing UTF-8 octet", )); } code = (code << 8) + octet; } self.skip(); self.skip(); self.skip(); width -= 1; if width == 0 { break; } } match char::from_u32(code) { Some(ch) => Ok(ch), None => Err(ScanError::new( *mark, "while parsing a tag, found an invalid UTF-8 codepoint", )), } } fn fetch_anchor(&mut self, alias: bool) -> ScanResult { self.save_simple_key()?; self.disallow_simple_key(); let tok = self.scan_anchor(alias)?; self.tokens.push_back(tok); Ok(()) } fn scan_anchor(&mut self, alias: bool) -> Result { let mut string = String::new(); let start_mark = self.mark; self.skip(); self.lookahead(1); while is_alpha(self.ch()) { string.push(self.ch()); self.skip(); self.lookahead(1); } if string.is_empty() || match self.ch() { c if is_blankz(c) => false, '?' | ':' | ',' | ']' | '}' | '%' | '@' | '`' => false, _ => true, } { return Err(ScanError::new(start_mark, "while scanning an anchor or alias, did not find expected alphabetic or numeric character")); } if alias { Ok(Token(start_mark, TokenType::Alias(string))) } else { Ok(Token(start_mark, TokenType::Anchor(string))) } } fn fetch_flow_collection_start(&mut self, tok: TokenType) -> ScanResult { // The indicators '[' and '{' may start a simple key. self.save_simple_key()?; self.increase_flow_level()?; self.allow_simple_key(); let start_mark = self.mark; self.skip(); self.tokens.push_back(Token(start_mark, tok)); Ok(()) } fn fetch_flow_collection_end(&mut self, tok: TokenType) -> ScanResult { self.remove_simple_key()?; self.decrease_flow_level(); self.disallow_simple_key(); let start_mark = self.mark; self.skip(); self.tokens.push_back(Token(start_mark, tok)); Ok(()) } fn fetch_flow_entry(&mut self) -> ScanResult { self.remove_simple_key()?; self.allow_simple_key(); let start_mark = self.mark; self.skip(); self.tokens .push_back(Token(start_mark, TokenType::FlowEntry)); Ok(()) } fn increase_flow_level(&mut self) -> ScanResult { self.simple_keys.push(SimpleKey::new(Marker::new(0, 0, 0))); self.flow_level = self .flow_level .checked_add(1) .ok_or_else(|| ScanError::new(self.mark, "recursion limit exceeded"))?; Ok(()) } fn decrease_flow_level(&mut self) { if self.flow_level > 0 { self.flow_level -= 1; self.simple_keys.pop().unwrap(); } } fn fetch_block_entry(&mut self) -> ScanResult { if self.flow_level == 0 { // Check if we are allowed to start a new entry. if !self.simple_key_allowed { return Err(ScanError::new( self.mark, "block sequence entries are not allowed in this context", )); } let mark = self.mark; // generate BLOCK-SEQUENCE-START if indented self.roll_indent(mark.col, None, TokenType::BlockSequenceStart, mark); } else { // - * only allowed in block return Err(ScanError::new( self.mark, r#""-" is only valid inside a block"#, )); } self.remove_simple_key()?; self.allow_simple_key(); let start_mark = self.mark; self.skip(); self.tokens .push_back(Token(start_mark, TokenType::BlockEntry)); Ok(()) } fn fetch_document_indicator(&mut self, t: TokenType) -> ScanResult { self.unroll_indent(-1); self.remove_simple_key()?; self.disallow_simple_key(); let mark = self.mark; self.skip(); self.skip(); self.skip(); self.tokens.push_back(Token(mark, t)); Ok(()) } fn fetch_block_scalar(&mut self, literal: bool) -> ScanResult { self.save_simple_key()?; self.allow_simple_key(); let tok = self.scan_block_scalar(literal)?; self.tokens.push_back(tok); Ok(()) } fn scan_block_scalar(&mut self, literal: bool) -> Result { let start_mark = self.mark; let mut chomping: i32 = 0; let mut increment: usize = 0; let mut indent: usize = 0; let mut trailing_blank: bool; let mut leading_blank: bool = false; let mut string = String::new(); let mut leading_break = String::new(); let mut trailing_breaks = String::new(); // skip '|' or '>' self.skip(); self.lookahead(1); if self.ch() == '+' || self.ch() == '-' { if self.ch() == '+' { chomping = 1; } else { chomping = -1; } self.skip(); self.lookahead(1); if is_digit(self.ch()) { if self.ch() == '0' { return Err(ScanError::new( start_mark, "while scanning a block scalar, found an indentation indicator equal to 0", )); } increment = (self.ch() as usize) - ('0' as usize); self.skip(); } } else if is_digit(self.ch()) { if self.ch() == '0' { return Err(ScanError::new( start_mark, "while scanning a block scalar, found an indentation indicator equal to 0", )); } increment = (self.ch() as usize) - ('0' as usize); self.skip(); self.lookahead(1); if self.ch() == '+' || self.ch() == '-' { if self.ch() == '+' { chomping = 1; } else { chomping = -1; } self.skip(); } } // Eat whitespaces and comments to the end of the line. self.lookahead(1); while is_blank(self.ch()) { self.skip(); self.lookahead(1); } if self.ch() == '#' { while !is_breakz(self.ch()) { self.skip(); self.lookahead(1); } } // Check if we are at the end of the line. if !is_breakz(self.ch()) { return Err(ScanError::new( start_mark, "while scanning a block scalar, did not find expected comment or line break", )); } if is_break(self.ch()) { self.lookahead(2); self.skip_line(); } if increment > 0 { indent = if self.indent >= 0 { (self.indent + increment as isize) as usize } else { increment } } // Scan the leading line breaks and determine the indentation level if needed. self.block_scalar_breaks(&mut indent, &mut trailing_breaks)?; self.lookahead(1); let start_mark = self.mark; while self.mark.col == indent && !is_z(self.ch()) { // We are at the beginning of a non-empty line. trailing_blank = is_blank(self.ch()); if !literal && !leading_break.is_empty() && !leading_blank && !trailing_blank { if trailing_breaks.is_empty() { string.push(' '); } leading_break.clear(); } else { string.push_str(&leading_break); leading_break.clear(); } string.push_str(&trailing_breaks); trailing_breaks.clear(); leading_blank = is_blank(self.ch()); while !is_breakz(self.ch()) { string.push(self.ch()); self.skip(); self.lookahead(1); } // break on EOF if is_z(self.ch()) { break; } self.lookahead(2); self.read_break(&mut leading_break); // Eat the following indentation spaces and line breaks. self.block_scalar_breaks(&mut indent, &mut trailing_breaks)?; } // Chomp the tail. if chomping != -1 { string.push_str(&leading_break); } if chomping == 1 { string.push_str(&trailing_breaks); } if literal { Ok(Token( start_mark, TokenType::Scalar(TScalarStyle::Literal, string), )) } else { Ok(Token( start_mark, TokenType::Scalar(TScalarStyle::Foled, string), )) } } fn block_scalar_breaks(&mut self, indent: &mut usize, breaks: &mut String) -> ScanResult { let mut max_indent = 0; loop { self.lookahead(1); while (*indent == 0 || self.mark.col < *indent) && self.buffer[0] == ' ' { self.skip(); self.lookahead(1); } if self.mark.col > max_indent { max_indent = self.mark.col; } // Check for a tab character messing the indentation. if (*indent == 0 || self.mark.col < *indent) && self.buffer[0] == '\t' { return Err(ScanError::new(self.mark, "while scanning a block scalar, found a tab character where an indentation space is expected")); } if !is_break(self.ch()) { break; } self.lookahead(2); // Consume the line break. self.read_break(breaks); } if *indent == 0 { *indent = max_indent; if *indent < (self.indent + 1) as usize { *indent = (self.indent + 1) as usize; } if *indent < 1 { *indent = 1; } } Ok(()) } fn fetch_flow_scalar(&mut self, single: bool) -> ScanResult { self.save_simple_key()?; self.disallow_simple_key(); let tok = self.scan_flow_scalar(single)?; // From spec: To ensure JSON compatibility, if a key inside a flow mapping is JSON-like, // YAML allows the following value to be specified adjacent to the “:”. self.adjacent_value_allowed_at = self.mark.index; self.tokens.push_back(tok); Ok(()) } fn scan_flow_scalar(&mut self, single: bool) -> Result { let start_mark = self.mark; let mut string = String::new(); let mut leading_break = String::new(); let mut trailing_breaks = String::new(); let mut whitespaces = String::new(); let mut leading_blanks; /* Eat the left quote. */ self.skip(); loop { /* Check for a document indicator. */ self.lookahead(4); if self.mark.col == 0 && (((self.buffer[0] == '-') && (self.buffer[1] == '-') && (self.buffer[2] == '-')) || ((self.buffer[0] == '.') && (self.buffer[1] == '.') && (self.buffer[2] == '.'))) && is_blankz(self.buffer[3]) { return Err(ScanError::new( start_mark, "while scanning a quoted scalar, found unexpected document indicator", )); } if is_z(self.ch()) { return Err(ScanError::new( start_mark, "while scanning a quoted scalar, found unexpected end of stream", )); } self.lookahead(2); leading_blanks = false; // Consume non-blank characters. while !is_blankz(self.ch()) { match self.ch() { // Check for an escaped single quote. '\'' if self.buffer[1] == '\'' && single => { string.push('\''); self.skip(); self.skip(); } // Check for the right quote. '\'' if single => break, '"' if !single => break, // Check for an escaped line break. '\\' if !single && is_break(self.buffer[1]) => { self.lookahead(3); self.skip(); self.skip_line(); leading_blanks = true; break; } // Check for an escape sequence. '\\' if !single => { let mut code_length = 0usize; match self.buffer[1] { '0' => string.push('\0'), 'a' => string.push('\x07'), 'b' => string.push('\x08'), 't' | '\t' => string.push('\t'), 'n' => string.push('\n'), 'v' => string.push('\x0b'), 'f' => string.push('\x0c'), 'r' => string.push('\x0d'), 'e' => string.push('\x1b'), ' ' => string.push('\x20'), '"' => string.push('"'), '\'' => string.push('\''), '\\' => string.push('\\'), // NEL (#x85) 'N' => string.push(char::from_u32(0x85).unwrap()), // #xA0 '_' => string.push(char::from_u32(0xA0).unwrap()), // LS (#x2028) 'L' => string.push(char::from_u32(0x2028).unwrap()), // PS (#x2029) 'P' => string.push(char::from_u32(0x2029).unwrap()), 'x' => code_length = 2, 'u' => code_length = 4, 'U' => code_length = 8, _ => { return Err(ScanError::new( start_mark, "while parsing a quoted scalar, found unknown escape character", )) } } self.skip(); self.skip(); // Consume an arbitrary escape code. if code_length > 0 { self.lookahead(code_length); let mut value = 0u32; for i in 0..code_length { if !is_hex(self.buffer[i]) { return Err(ScanError::new(start_mark, "while parsing a quoted scalar, did not find expected hexadecimal number")); } value = (value << 4) + as_hex(self.buffer[i]); } let ch = match char::from_u32(value) { Some(v) => v, None => { return Err(ScanError::new(start_mark, "while parsing a quoted scalar, found invalid Unicode character escape code")); } }; string.push(ch); for _ in 0..code_length { self.skip(); } } } c => { string.push(c); self.skip(); } } self.lookahead(2); } self.lookahead(1); match self.ch() { '\'' if single => break, '"' if !single => break, _ => {} } // Consume blank characters. while is_blank(self.ch()) || is_break(self.ch()) { if is_blank(self.ch()) { // Consume a space or a tab character. if leading_blanks { self.skip(); } else { whitespaces.push(self.ch()); self.skip(); } } else { self.lookahead(2); // Check if it is a first line break. if leading_blanks { self.read_break(&mut trailing_breaks); } else { whitespaces.clear(); self.read_break(&mut leading_break); leading_blanks = true; } } self.lookahead(1); } // Join the whitespaces or fold line breaks. if leading_blanks { if leading_break.is_empty() { string.push_str(&leading_break); string.push_str(&trailing_breaks); trailing_breaks.clear(); leading_break.clear(); } else { if trailing_breaks.is_empty() { string.push(' '); } else { string.push_str(&trailing_breaks); trailing_breaks.clear(); } leading_break.clear(); } } else { string.push_str(&whitespaces); whitespaces.clear(); } } // loop // Eat the right quote. self.skip(); if single { Ok(Token( start_mark, TokenType::Scalar(TScalarStyle::SingleQuoted, string), )) } else { Ok(Token( start_mark, TokenType::Scalar(TScalarStyle::DoubleQuoted, string), )) } } fn fetch_plain_scalar(&mut self) -> ScanResult { self.save_simple_key()?; self.disallow_simple_key(); let tok = self.scan_plain_scalar()?; self.tokens.push_back(tok); Ok(()) } fn scan_plain_scalar(&mut self) -> Result { let indent = self.indent + 1; let start_mark = self.mark; let mut string = String::new(); let mut leading_break = String::new(); let mut trailing_breaks = String::new(); let mut whitespaces = String::new(); let mut leading_blanks = false; loop { /* Check for a document indicator. */ self.lookahead(4); if self.mark.col == 0 && (((self.buffer[0] == '-') && (self.buffer[1] == '-') && (self.buffer[2] == '-')) || ((self.buffer[0] == '.') && (self.buffer[1] == '.') && (self.buffer[2] == '.'))) && is_blankz(self.buffer[3]) { break; } if self.ch() == '#' { break; } while !is_blankz(self.ch()) { // indicators can end a plain scalar, see 7.3.3. Plain Style match self.ch() { ':' if is_blankz(self.buffer[1]) || (self.flow_level > 0 && is_flow(self.buffer[1])) => { break; } ',' | '[' | ']' | '{' | '}' if self.flow_level > 0 => break, _ => {} } if leading_blanks || !whitespaces.is_empty() { if leading_blanks { if leading_break.is_empty() { string.push_str(&leading_break); string.push_str(&trailing_breaks); trailing_breaks.clear(); leading_break.clear(); } else { if trailing_breaks.is_empty() { string.push(' '); } else { string.push_str(&trailing_breaks); trailing_breaks.clear(); } leading_break.clear(); } leading_blanks = false; } else { string.push_str(&whitespaces); whitespaces.clear(); } } string.push(self.ch()); self.skip(); self.lookahead(2); } // is the end? if !(is_blank(self.ch()) || is_break(self.ch())) { break; } self.lookahead(1); while is_blank(self.ch()) || is_break(self.ch()) { if is_blank(self.ch()) { if leading_blanks && (self.mark.col as isize) < indent && self.ch() == '\t' { return Err(ScanError::new( start_mark, "while scanning a plain scalar, found a tab", )); } if leading_blanks { self.skip(); } else { whitespaces.push(self.ch()); self.skip(); } } else { self.lookahead(2); // Check if it is a first line break if leading_blanks { self.read_break(&mut trailing_breaks); } else { whitespaces.clear(); self.read_break(&mut leading_break); leading_blanks = true; } } self.lookahead(1); } // check indentation level if self.flow_level == 0 && (self.mark.col as isize) < indent { break; } } if leading_blanks { self.allow_simple_key(); } Ok(Token( start_mark, TokenType::Scalar(TScalarStyle::Plain, string), )) } fn fetch_key(&mut self) -> ScanResult { let start_mark = self.mark; if self.flow_level == 0 { // Check if we are allowed to start a new key (not necessarily simple). if !self.simple_key_allowed { return Err(ScanError::new( self.mark, "mapping keys are not allowed in this context", )); } self.roll_indent( start_mark.col, None, TokenType::BlockMappingStart, start_mark, ); } self.remove_simple_key()?; if self.flow_level == 0 { self.allow_simple_key(); } else { self.disallow_simple_key(); } self.skip(); self.tokens.push_back(Token(start_mark, TokenType::Key)); Ok(()) } fn fetch_value(&mut self) -> ScanResult { let sk = self.simple_keys.last().unwrap().clone(); let start_mark = self.mark; if sk.possible { // insert simple key let tok = Token(sk.mark, TokenType::Key); let tokens_parsed = self.tokens_parsed; self.insert_token(sk.token_number - tokens_parsed, tok); // Add the BLOCK-MAPPING-START token if needed. self.roll_indent( sk.mark.col, Some(sk.token_number), TokenType::BlockMappingStart, start_mark, ); self.simple_keys.last_mut().unwrap().possible = false; self.disallow_simple_key(); } else { // The ':' indicator follows a complex key. if self.flow_level == 0 { if !self.simple_key_allowed { return Err(ScanError::new( start_mark, "mapping values are not allowed in this context", )); } self.roll_indent( start_mark.col, None, TokenType::BlockMappingStart, start_mark, ); } if self.flow_level == 0 { self.allow_simple_key(); } else { self.disallow_simple_key(); } } self.skip(); self.tokens.push_back(Token(start_mark, TokenType::Value)); Ok(()) } fn roll_indent(&mut self, col: usize, number: Option, tok: TokenType, mark: Marker) { if self.flow_level > 0 { return; } if self.indent < col as isize { self.indents.push(self.indent); self.indent = col as isize; let tokens_parsed = self.tokens_parsed; match number { Some(n) => self.insert_token(n - tokens_parsed, Token(mark, tok)), None => self.tokens.push_back(Token(mark, tok)), } } } fn unroll_indent(&mut self, col: isize) { if self.flow_level > 0 { return; } while self.indent > col { self.tokens.push_back(Token(self.mark, TokenType::BlockEnd)); self.indent = self.indents.pop().unwrap(); } } fn save_simple_key(&mut self) -> Result<(), ScanError> { let required = self.flow_level > 0 && self.indent == (self.mark.col as isize); if self.simple_key_allowed { let mut sk = SimpleKey::new(self.mark); sk.possible = true; sk.required = required; sk.token_number = self.tokens_parsed + self.tokens.len(); self.remove_simple_key()?; self.simple_keys.pop(); self.simple_keys.push(sk); } Ok(()) } fn remove_simple_key(&mut self) -> ScanResult { let last = self.simple_keys.last_mut().unwrap(); if last.possible && last.required { return Err(ScanError::new(self.mark, "simple key expected")); } last.possible = false; Ok(()) } } #[cfg(test)] mod test { use super::TokenType::*; use super::*; macro_rules! next { ($p:ident, $tk:pat) => {{ let tok = $p.next().unwrap(); match tok.1 { $tk => {} _ => panic!("unexpected token: {:?}", tok), } }}; } macro_rules! next_scalar { ($p:ident, $tk:expr, $v:expr) => {{ let tok = $p.next().unwrap(); match tok.1 { Scalar(style, ref v) => { assert_eq!(style, $tk); assert_eq!(v, $v); } _ => panic!("unexpected token: {:?}", tok), } }}; } macro_rules! end { ($p:ident) => {{ assert_eq!($p.next(), None); }}; } /// test cases in libyaml scanner.c #[test] fn test_empty() { let s = ""; let mut p = Scanner::new(s.chars()); next!(p, StreamStart(..)); next!(p, StreamEnd); end!(p); } #[test] fn test_scalar() { let s = "a scalar"; let mut p = Scanner::new(s.chars()); next!(p, StreamStart(..)); next!(p, Scalar(TScalarStyle::Plain, _)); next!(p, StreamEnd); end!(p); } #[test] fn test_explicit_scalar() { let s = "--- 'a scalar' ... "; let mut p = Scanner::new(s.chars()); next!(p, StreamStart(..)); next!(p, DocumentStart); next!(p, Scalar(TScalarStyle::SingleQuoted, _)); next!(p, DocumentEnd); next!(p, StreamEnd); end!(p); } #[test] fn test_multiple_documents() { let s = " 'a scalar' --- 'a scalar' --- 'a scalar' "; let mut p = Scanner::new(s.chars()); next!(p, StreamStart(..)); next!(p, Scalar(TScalarStyle::SingleQuoted, _)); next!(p, DocumentStart); next!(p, Scalar(TScalarStyle::SingleQuoted, _)); next!(p, DocumentStart); next!(p, Scalar(TScalarStyle::SingleQuoted, _)); next!(p, StreamEnd); end!(p); } #[test] fn test_a_flow_sequence() { let s = "[item 1, item 2, item 3]"; let mut p = Scanner::new(s.chars()); next!(p, StreamStart(..)); next!(p, FlowSequenceStart); next_scalar!(p, TScalarStyle::Plain, "item 1"); next!(p, FlowEntry); next!(p, Scalar(TScalarStyle::Plain, _)); next!(p, FlowEntry); next!(p, Scalar(TScalarStyle::Plain, _)); next!(p, FlowSequenceEnd); next!(p, StreamEnd); end!(p); } #[test] fn test_a_flow_mapping() { let s = " { a simple key: a value, # Note that the KEY token is produced. ? a complex key: another value, } "; let mut p = Scanner::new(s.chars()); next!(p, StreamStart(..)); next!(p, FlowMappingStart); next!(p, Key); next!(p, Scalar(TScalarStyle::Plain, _)); next!(p, Value); next!(p, Scalar(TScalarStyle::Plain, _)); next!(p, FlowEntry); next!(p, Key); next_scalar!(p, TScalarStyle::Plain, "a complex key"); next!(p, Value); next!(p, Scalar(TScalarStyle::Plain, _)); next!(p, FlowEntry); next!(p, FlowMappingEnd); next!(p, StreamEnd); end!(p); } #[test] fn test_block_sequences() { let s = " - item 1 - item 2 - - item 3.1 - item 3.2 - key 1: value 1 key 2: value 2 "; let mut p = Scanner::new(s.chars()); next!(p, StreamStart(..)); next!(p, BlockSequenceStart); next!(p, BlockEntry); next_scalar!(p, TScalarStyle::Plain, "item 1"); next!(p, BlockEntry); next_scalar!(p, TScalarStyle::Plain, "item 2"); next!(p, BlockEntry); next!(p, BlockSequenceStart); next!(p, BlockEntry); next_scalar!(p, TScalarStyle::Plain, "item 3.1"); next!(p, BlockEntry); next_scalar!(p, TScalarStyle::Plain, "item 3.2"); next!(p, BlockEnd); next!(p, BlockEntry); next!(p, BlockMappingStart); next!(p, Key); next_scalar!(p, TScalarStyle::Plain, "key 1"); next!(p, Value); next_scalar!(p, TScalarStyle::Plain, "value 1"); next!(p, Key); next_scalar!(p, TScalarStyle::Plain, "key 2"); next!(p, Value); next_scalar!(p, TScalarStyle::Plain, "value 2"); next!(p, BlockEnd); next!(p, BlockEnd); next!(p, StreamEnd); end!(p); } #[test] fn test_block_mappings() { let s = " a simple key: a value # The KEY token is produced here. ? a complex key : another value a mapping: key 1: value 1 key 2: value 2 a sequence: - item 1 - item 2 "; let mut p = Scanner::new(s.chars()); next!(p, StreamStart(..)); next!(p, BlockMappingStart); next!(p, Key); next!(p, Scalar(_, _)); next!(p, Value); next!(p, Scalar(_, _)); next!(p, Key); next!(p, Scalar(_, _)); next!(p, Value); next!(p, Scalar(_, _)); next!(p, Key); next!(p, Scalar(_, _)); next!(p, Value); // libyaml comment seems to be wrong next!(p, BlockMappingStart); next!(p, Key); next!(p, Scalar(_, _)); next!(p, Value); next!(p, Scalar(_, _)); next!(p, Key); next!(p, Scalar(_, _)); next!(p, Value); next!(p, Scalar(_, _)); next!(p, BlockEnd); next!(p, Key); next!(p, Scalar(_, _)); next!(p, Value); next!(p, BlockSequenceStart); next!(p, BlockEntry); next!(p, Scalar(_, _)); next!(p, BlockEntry); next!(p, Scalar(_, _)); next!(p, BlockEnd); next!(p, BlockEnd); next!(p, StreamEnd); end!(p); } #[test] fn test_no_block_sequence_start() { let s = " key: - item 1 - item 2 "; let mut p = Scanner::new(s.chars()); next!(p, StreamStart(..)); next!(p, BlockMappingStart); next!(p, Key); next_scalar!(p, TScalarStyle::Plain, "key"); next!(p, Value); next!(p, BlockEntry); next_scalar!(p, TScalarStyle::Plain, "item 1"); next!(p, BlockEntry); next_scalar!(p, TScalarStyle::Plain, "item 2"); next!(p, BlockEnd); next!(p, StreamEnd); end!(p); } #[test] fn test_collections_in_sequence() { let s = " - - item 1 - item 2 - key 1: value 1 key 2: value 2 - ? complex key : complex value "; let mut p = Scanner::new(s.chars()); next!(p, StreamStart(..)); next!(p, BlockSequenceStart); next!(p, BlockEntry); next!(p, BlockSequenceStart); next!(p, BlockEntry); next_scalar!(p, TScalarStyle::Plain, "item 1"); next!(p, BlockEntry); next_scalar!(p, TScalarStyle::Plain, "item 2"); next!(p, BlockEnd); next!(p, BlockEntry); next!(p, BlockMappingStart); next!(p, Key); next_scalar!(p, TScalarStyle::Plain, "key 1"); next!(p, Value); next_scalar!(p, TScalarStyle::Plain, "value 1"); next!(p, Key); next_scalar!(p, TScalarStyle::Plain, "key 2"); next!(p, Value); next_scalar!(p, TScalarStyle::Plain, "value 2"); next!(p, BlockEnd); next!(p, BlockEntry); next!(p, BlockMappingStart); next!(p, Key); next_scalar!(p, TScalarStyle::Plain, "complex key"); next!(p, Value); next_scalar!(p, TScalarStyle::Plain, "complex value"); next!(p, BlockEnd); next!(p, BlockEnd); next!(p, StreamEnd); end!(p); } #[test] fn test_collections_in_mapping() { let s = " ? a sequence : - item 1 - item 2 ? a mapping : key 1: value 1 key 2: value 2 "; let mut p = Scanner::new(s.chars()); next!(p, StreamStart(..)); next!(p, BlockMappingStart); next!(p, Key); next_scalar!(p, TScalarStyle::Plain, "a sequence"); next!(p, Value); next!(p, BlockSequenceStart); next!(p, BlockEntry); next_scalar!(p, TScalarStyle::Plain, "item 1"); next!(p, BlockEntry); next_scalar!(p, TScalarStyle::Plain, "item 2"); next!(p, BlockEnd); next!(p, Key); next_scalar!(p, TScalarStyle::Plain, "a mapping"); next!(p, Value); next!(p, BlockMappingStart); next!(p, Key); next_scalar!(p, TScalarStyle::Plain, "key 1"); next!(p, Value); next_scalar!(p, TScalarStyle::Plain, "value 1"); next!(p, Key); next_scalar!(p, TScalarStyle::Plain, "key 2"); next!(p, Value); next_scalar!(p, TScalarStyle::Plain, "value 2"); next!(p, BlockEnd); next!(p, BlockEnd); next!(p, StreamEnd); end!(p); } #[test] fn test_spec_ex7_3() { let s = " { ? foo :, : bar, } "; let mut p = Scanner::new(s.chars()); next!(p, StreamStart(..)); next!(p, FlowMappingStart); next!(p, Key); next_scalar!(p, TScalarStyle::Plain, "foo"); next!(p, Value); next!(p, FlowEntry); next!(p, Value); next_scalar!(p, TScalarStyle::Plain, "bar"); next!(p, FlowEntry); next!(p, FlowMappingEnd); next!(p, StreamEnd); end!(p); } #[test] fn test_plain_scalar_starting_with_indicators_in_flow() { // "Plain scalars must not begin with most indicators, as this would cause ambiguity with // other YAML constructs. However, the “:”, “?” and “-” indicators may be used as the first // character if followed by a non-space “safe” character, as this causes no ambiguity." let s = "{a: :b}"; let mut p = Scanner::new(s.chars()); next!(p, StreamStart(..)); next!(p, FlowMappingStart); next!(p, Key); next_scalar!(p, TScalarStyle::Plain, "a"); next!(p, Value); next_scalar!(p, TScalarStyle::Plain, ":b"); next!(p, FlowMappingEnd); next!(p, StreamEnd); end!(p); let s = "{a: ?b}"; let mut p = Scanner::new(s.chars()); next!(p, StreamStart(..)); next!(p, FlowMappingStart); next!(p, Key); next_scalar!(p, TScalarStyle::Plain, "a"); next!(p, Value); next_scalar!(p, TScalarStyle::Plain, "?b"); next!(p, FlowMappingEnd); next!(p, StreamEnd); end!(p); } #[test] fn test_plain_scalar_starting_with_indicators_in_block() { let s = ":a"; let mut p = Scanner::new(s.chars()); next!(p, StreamStart(..)); next_scalar!(p, TScalarStyle::Plain, ":a"); next!(p, StreamEnd); end!(p); let s = "?a"; let mut p = Scanner::new(s.chars()); next!(p, StreamStart(..)); next_scalar!(p, TScalarStyle::Plain, "?a"); next!(p, StreamEnd); end!(p); } #[test] fn test_plain_scalar_containing_indicators_in_block() { let s = "a:,b"; let mut p = Scanner::new(s.chars()); next!(p, StreamStart(..)); next_scalar!(p, TScalarStyle::Plain, "a:,b"); next!(p, StreamEnd); end!(p); let s = ":,b"; let mut p = Scanner::new(s.chars()); next!(p, StreamStart(..)); next_scalar!(p, TScalarStyle::Plain, ":,b"); next!(p, StreamEnd); end!(p); } #[test] fn test_scanner_cr() { let s = "---\r\n- tok1\r\n- tok2"; let mut p = Scanner::new(s.chars()); next!(p, StreamStart(..)); next!(p, DocumentStart); next!(p, BlockSequenceStart); next!(p, BlockEntry); next_scalar!(p, TScalarStyle::Plain, "tok1"); next!(p, BlockEntry); next_scalar!(p, TScalarStyle::Plain, "tok2"); next!(p, BlockEnd); next!(p, StreamEnd); end!(p); } #[test] fn test_uri() { // TODO } #[test] fn test_uri_escapes() { // TODO } } insta-1.46.1/src/content/yaml/vendored/yaml.rs000064400000000000000000000461521046102023000173640ustar 00000000000000use crate::content::yaml::vendored::parser::*; use crate::content::yaml::vendored::scanner::{Marker, ScanError, TScalarStyle, TokenType}; use std::collections::BTreeMap; use std::f64; use std::mem; use std::ops::Index; use std::string; use std::vec; /// A YAML node is stored as this `Yaml` enumeration, which provides an easy way to /// access your YAML document. #[derive(Clone, PartialEq, PartialOrd, Debug, Eq, Ord, Hash)] pub enum Yaml { /// Float types are stored as [`String`] and parsed on demand. /// Note that [`f64'] does NOT implement [`Eq'] trait and can NOT be stored in [`BTreeMap`]. Real(string::String), /// YAML int is stored as i64. Integer(i64), /// YAML scalar. String(string::String), /// YAML bool, e.g. `true` or `false`. Boolean(bool), /// YAML array, can be accessed as a `Vec`. Array(self::Array), /// YAML hash, can be accessed as a sorted vector of key/value pairs. /// /// Insertion order will match the order of insertion into the map. Hash(self::Hash), /// YAML null, e.g. `null` or `~`. Null, /// Accessing a nonexistent node via the Index trait returns `BadValue`. This /// simplifies error handling in the calling code. Invalid type conversion also /// returns `BadValue`. BadValue, } pub type Array = Vec; pub type Hash = Vec<(Yaml, Yaml)>; // parse f64 as Core schema // See: https://github.com/chyh1990/yaml-rust/issues/51 fn parse_f64(v: &str) -> Option { match v { ".inf" | ".Inf" | ".INF" | "+.inf" | "+.Inf" | "+.INF" => Some(f64::INFINITY), "-.inf" | "-.Inf" | "-.INF" => Some(f64::NEG_INFINITY), ".nan" | "NaN" | ".NAN" => Some(f64::NAN), _ => v.parse::().ok(), } } pub struct YamlLoader { docs: Vec, // states // (current node, anchor_id) tuple doc_stack: Vec<(Yaml, usize)>, key_stack: Vec, anchor_map: BTreeMap, } impl MarkedEventReceiver for YamlLoader { fn on_event(&mut self, ev: Event, _: Marker) { // println!("EV {:?}", ev); match ev { Event::DocumentStart => { // do nothing } Event::DocumentEnd => { match self.doc_stack.len() { // empty document 0 => self.docs.push(Yaml::BadValue), 1 => self.docs.push(self.doc_stack.pop().unwrap().0), _ => unreachable!(), } } Event::SequenceStart(aid) => { self.doc_stack.push((Yaml::Array(Vec::new()), aid)); } Event::SequenceEnd => { let node = self.doc_stack.pop().unwrap(); self.insert_new_node(node); } Event::MappingStart(aid) => { self.doc_stack.push((Yaml::Hash(Hash::new()), aid)); self.key_stack.push(Yaml::BadValue); } Event::MappingEnd => { self.key_stack.pop().unwrap(); let node = self.doc_stack.pop().unwrap(); self.insert_new_node(node); } Event::Scalar(v, style, aid, tag) => { let node = if style != TScalarStyle::Plain { Yaml::String(v) } else if let Some(TokenType::Tag(ref handle, ref suffix)) = tag { // XXX tag:yaml.org,2002: if handle == "!!" { match suffix.as_ref() { "bool" => { // "true" or "false" match v.parse::() { Err(_) => Yaml::BadValue, Ok(v) => Yaml::Boolean(v), } } "int" => match v.parse::() { Err(_) => Yaml::BadValue, Ok(v) => Yaml::Integer(v), }, "float" => match parse_f64(&v) { Some(_) => Yaml::Real(v), None => Yaml::BadValue, }, "null" => match v.as_ref() { "~" | "null" => Yaml::Null, _ => Yaml::BadValue, }, _ => Yaml::String(v), } } else { Yaml::String(v) } } else { // Datatype is not specified, or unrecognized Yaml::from_str(&v) }; self.insert_new_node((node, aid)); } _ => { /* ignore */ } } // println!("DOC {:?}", self.doc_stack); } } impl YamlLoader { fn insert_new_node(&mut self, node: (Yaml, usize)) { // valid anchor id starts from 1 if node.1 > 0 { self.anchor_map.insert(node.1, node.0.clone()); } if self.doc_stack.is_empty() { self.doc_stack.push(node); } else { let parent = self.doc_stack.last_mut().unwrap(); match *parent { (Yaml::Array(ref mut v), _) => v.push(node.0), (Yaml::Hash(ref mut h), _) => { let cur_key = self.key_stack.last_mut().unwrap(); // current node is a key if cur_key.is_badvalue() { *cur_key = node.0; // current node is a value } else { let mut newkey = Yaml::BadValue; mem::swap(&mut newkey, cur_key); h.push((newkey, node.0)); } } _ => unreachable!(), } } } pub fn load_from_str(source: &str) -> Result, ScanError> { let mut loader = YamlLoader { docs: Vec::new(), doc_stack: Vec::new(), key_stack: Vec::new(), anchor_map: BTreeMap::new(), }; let mut parser = Parser::new(source.chars()); parser.load(&mut loader, true)?; Ok(loader.docs) } } macro_rules! define_as ( ($name:ident, $t:ident, $yt:ident) => ( pub fn $name(&self) -> Option<$t> { match *self { Yaml::$yt(v) => Some(v), _ => None } } ); ); macro_rules! define_as_ref ( ($name:ident, $t:ty, $yt:ident) => ( pub fn $name(&self) -> Option<$t> { match *self { Yaml::$yt(ref v) => Some(v), _ => None } } ); ); macro_rules! define_into ( ($name:ident, $t:ty, $yt:ident) => ( pub fn $name(self) -> Option<$t> { match self { Yaml::$yt(v) => Some(v), _ => None } } ); ); impl Yaml { define_as!(as_bool, bool, Boolean); define_as!(as_i64, i64, Integer); define_as_ref!(as_str, &str, String); define_as_ref!(as_hash, &Hash, Hash); define_as_ref!(as_vec, &Array, Array); define_into!(into_bool, bool, Boolean); define_into!(into_i64, i64, Integer); define_into!(into_string, String, String); define_into!(into_hash, Hash, Hash); define_into!(into_vec, Array, Array); pub fn is_null(&self) -> bool { matches!(*self, Yaml::Null) } pub fn is_badvalue(&self) -> bool { matches!(*self, Yaml::BadValue) } pub fn is_array(&self) -> bool { matches!(*self, Yaml::Array(_)) } pub fn as_f64(&self) -> Option { match *self { Yaml::Real(ref v) => parse_f64(v), _ => None, } } pub fn into_f64(self) -> Option { match self { Yaml::Real(ref v) => parse_f64(v), _ => None, } } } impl Yaml { // Not implementing FromStr because there is no possibility of Error. // This function falls back to Yaml::String if nothing else matches. pub fn from_str(v: &str) -> Yaml { if let Some(rest) = v.strip_prefix("0x") { if let Ok(i) = i64::from_str_radix(rest, 16) { return Yaml::Integer(i); } } if let Some(rest) = v.strip_prefix("0o") { if let Ok(i) = i64::from_str_radix(rest, 8) { return Yaml::Integer(i); } } if let Some(rest) = v.strip_prefix('+') { if let Ok(i) = rest.parse::() { return Yaml::Integer(i); } } match v { "~" | "null" => Yaml::Null, "true" => Yaml::Boolean(true), "false" => Yaml::Boolean(false), _ if v.parse::().is_ok() => Yaml::Integer(v.parse::().unwrap()), // try parsing as f64 _ if parse_f64(v).is_some() => Yaml::Real(v.to_owned()), _ => Yaml::String(v.to_owned()), } } } static BAD_VALUE: Yaml = Yaml::BadValue; impl<'a> Index<&'a str> for Yaml { type Output = Yaml; fn index(&self, idx: &'a str) -> &Yaml { let key = Yaml::String(idx.to_owned()); match self.as_hash() { Some(h) => h.iter().find(|x| x.0 == key).map_or(&BAD_VALUE, |x| &x.1), None => &BAD_VALUE, } } } impl Index for Yaml { type Output = Yaml; fn index(&self, idx: usize) -> &Yaml { if let Some(v) = self.as_vec() { v.get(idx).unwrap_or(&BAD_VALUE) } else if let Some(v) = self.as_hash() { let key = Yaml::Integer(idx as i64); v.iter().find(|x| x.0 == key).map_or(&BAD_VALUE, |x| &x.1) } else { &BAD_VALUE } } } impl IntoIterator for Yaml { type Item = Yaml; type IntoIter = YamlIter; fn into_iter(self) -> Self::IntoIter { YamlIter { yaml: self.into_vec().unwrap_or_default().into_iter(), } } } pub struct YamlIter { yaml: vec::IntoIter, } impl Iterator for YamlIter { type Item = Yaml; fn next(&mut self) -> Option { self.yaml.next() } } #[cfg(test)] mod test { use crate::content::yaml::vendored::yaml::*; use std::f64; #[test] fn test_coerce() { let s = "--- a: 1 b: 2.2 c: [1, 2] "; let out = YamlLoader::load_from_str(s).unwrap(); let doc = &out[0]; assert_eq!(doc["a"].as_i64().unwrap(), 1i64); assert_eq!(doc["b"].as_f64().unwrap(), 2.2f64); assert_eq!(doc["c"][1].as_i64().unwrap(), 2i64); assert!(doc["d"][0].is_badvalue()); } #[test] fn test_empty_doc() { let s: String = "".to_owned(); YamlLoader::load_from_str(&s).unwrap(); let s: String = "---".to_owned(); assert_eq!(YamlLoader::load_from_str(&s).unwrap()[0], Yaml::Null); } #[test] fn test_parser() { let s: String = " # comment a0 bb: val a1: b1: 4 b2: d a2: 4 # i'm comment a3: [1, 2, 3] a4: - - a1 - a2 - 2 a5: 'single_quoted' a6: \"double_quoted\" a7: 你好 " .to_owned(); let out = YamlLoader::load_from_str(&s).unwrap(); let doc = &out[0]; assert_eq!(doc["a7"].as_str().unwrap(), "你好"); } #[test] fn test_multi_doc() { let s = " 'a scalar' --- 'a scalar' --- 'a scalar' "; let out = YamlLoader::load_from_str(s).unwrap(); assert_eq!(out.len(), 3); } #[test] fn test_bad_anchor() { let s = " a1: &DEFAULT b1: 4 b2: *DEFAULT "; let out = YamlLoader::load_from_str(s).unwrap(); let doc = &out[0]; assert_eq!(doc["a1"]["b2"], Yaml::BadValue); } #[test] fn test_github_27() { // https://github.com/chyh1990/yaml-rust/issues/27 let s = "&a"; let out = YamlLoader::load_from_str(s).unwrap(); let doc = &out[0]; assert_eq!(doc.as_str().unwrap(), ""); } #[test] fn test_plain_datatype() { let s = " - 'string' - \"string\" - string - 123 - -321 - 1.23 - -1e4 - ~ - null - true - false - !!str 0 - !!int 100 - !!float 2 - !!null ~ - !!bool true - !!bool false - 0xFF # bad values - !!int string - !!float string - !!bool null - !!null val - 0o77 - [ 0xF, 0xF ] - +12345 - [ true, false ] "; let out = YamlLoader::load_from_str(s).unwrap(); let doc = &out[0]; assert_eq!(doc[0].as_str().unwrap(), "string"); assert_eq!(doc[1].as_str().unwrap(), "string"); assert_eq!(doc[2].as_str().unwrap(), "string"); assert_eq!(doc[3].as_i64().unwrap(), 123); assert_eq!(doc[4].as_i64().unwrap(), -321); assert_eq!(doc[5].as_f64().unwrap(), 1.23); assert_eq!(doc[6].as_f64().unwrap(), -1e4); assert!(doc[7].is_null()); assert!(doc[8].is_null()); assert!(doc[9].as_bool().unwrap()); assert!(!doc[10].as_bool().unwrap()); assert_eq!(doc[11].as_str().unwrap(), "0"); assert_eq!(doc[12].as_i64().unwrap(), 100); assert_eq!(doc[13].as_f64().unwrap(), 2.0); assert!(doc[14].is_null()); assert!(doc[15].as_bool().unwrap()); assert!(!doc[16].as_bool().unwrap()); assert_eq!(doc[17].as_i64().unwrap(), 255); assert!(doc[18].is_badvalue()); assert!(doc[19].is_badvalue()); assert!(doc[20].is_badvalue()); assert!(doc[21].is_badvalue()); assert_eq!(doc[22].as_i64().unwrap(), 63); assert_eq!(doc[23][0].as_i64().unwrap(), 15); assert_eq!(doc[23][1].as_i64().unwrap(), 15); assert_eq!(doc[24].as_i64().unwrap(), 12345); assert!(doc[25][0].as_bool().unwrap()); assert!(!doc[25][1].as_bool().unwrap()); } #[test] fn test_bad_hyphen() { // See: https://github.com/chyh1990/yaml-rust/issues/23 let s = "{-"; assert!(YamlLoader::load_from_str(s).is_err()); } #[test] fn test_issue_65() { // See: https://github.com/chyh1990/yaml-rust/issues/65 let b = "\n\"ll\\\"ll\\\r\n\"ll\\\"ll\\\r\r\r\rU\r\r\rU"; assert!(YamlLoader::load_from_str(b).is_err()); } #[test] fn test_bad_docstart() { assert!(YamlLoader::load_from_str("---This used to cause an infinite loop").is_ok()); assert_eq!( YamlLoader::load_from_str("----"), Ok(vec![Yaml::String(String::from("----"))]) ); assert_eq!( YamlLoader::load_from_str("--- #here goes a comment"), Ok(vec![Yaml::Null]) ); assert_eq!( YamlLoader::load_from_str("---- #here goes a comment"), Ok(vec![Yaml::String(String::from("----"))]) ); } #[test] fn test_plain_datatype_with_into_methods() { let s = " - 'string' - \"string\" - string - 123 - -321 - 1.23 - -1e4 - true - false - !!str 0 - !!int 100 - !!float 2 - !!bool true - !!bool false - 0xFF - 0o77 - +12345 - -.INF - .NAN - !!float .INF "; let mut out = YamlLoader::load_from_str(s).unwrap().into_iter(); let mut doc = out.next().unwrap().into_iter(); assert_eq!(doc.next().unwrap().into_string().unwrap(), "string"); assert_eq!(doc.next().unwrap().into_string().unwrap(), "string"); assert_eq!(doc.next().unwrap().into_string().unwrap(), "string"); assert_eq!(doc.next().unwrap().into_i64().unwrap(), 123); assert_eq!(doc.next().unwrap().into_i64().unwrap(), -321); assert_eq!(doc.next().unwrap().into_f64().unwrap(), 1.23); assert_eq!(doc.next().unwrap().into_f64().unwrap(), -1e4); assert!(doc.next().unwrap().into_bool().unwrap()); assert!(!doc.next().unwrap().into_bool().unwrap()); assert_eq!(doc.next().unwrap().into_string().unwrap(), "0"); assert_eq!(doc.next().unwrap().into_i64().unwrap(), 100); assert_eq!(doc.next().unwrap().into_f64().unwrap(), 2.0); assert!(doc.next().unwrap().into_bool().unwrap()); assert!(!doc.next().unwrap().into_bool().unwrap()); assert_eq!(doc.next().unwrap().into_i64().unwrap(), 255); assert_eq!(doc.next().unwrap().into_i64().unwrap(), 63); assert_eq!(doc.next().unwrap().into_i64().unwrap(), 12345); assert_eq!(doc.next().unwrap().into_f64().unwrap(), f64::NEG_INFINITY); assert!(doc.next().unwrap().into_f64().is_some()); assert_eq!(doc.next().unwrap().into_f64().unwrap(), f64::INFINITY); } #[test] fn test_hash_order() { let s = "--- b: ~ a: ~ c: ~ "; let out = YamlLoader::load_from_str(s).unwrap(); let first = out.into_iter().next().unwrap(); let mut iter = first.into_hash().unwrap().into_iter(); assert_eq!( Some((Yaml::String("b".to_owned()), Yaml::Null)), iter.next() ); assert_eq!( Some((Yaml::String("a".to_owned()), Yaml::Null)), iter.next() ); assert_eq!( Some((Yaml::String("c".to_owned()), Yaml::Null)), iter.next() ); assert_eq!(None, iter.next()); } #[test] fn test_integer_key() { let s = " 0: important: true 1: important: false "; let out = YamlLoader::load_from_str(s).unwrap(); let first = out.into_iter().next().unwrap(); assert!(first[0]["important"].as_bool().unwrap()); } #[test] fn test_indentation_equality() { let four_spaces = YamlLoader::load_from_str( r#" hash: with: indentations "#, ) .unwrap() .into_iter() .next() .unwrap(); let two_spaces = YamlLoader::load_from_str( r#" hash: with: indentations "#, ) .unwrap() .into_iter() .next() .unwrap(); let one_space = YamlLoader::load_from_str( r#" hash: with: indentations "#, ) .unwrap() .into_iter() .next() .unwrap(); let mixed_spaces = YamlLoader::load_from_str( r#" hash: with: indentations "#, ) .unwrap() .into_iter() .next() .unwrap(); assert_eq!(four_spaces, two_spaces); assert_eq!(two_spaces, one_space); assert_eq!(four_spaces, mixed_spaces); } #[test] fn test_two_space_indentations() { // https://github.com/kbknapp/clap-rs/issues/965 let s = r#" subcommands: - server: about: server related commands subcommands2: - server: about: server related commands subcommands3: - server: about: server related commands "#; let out = YamlLoader::load_from_str(s).unwrap(); let doc = &out.into_iter().next().unwrap(); println!("{doc:#?}"); assert_eq!(doc["subcommands"][0]["server"], Yaml::Null); assert!(doc["subcommands2"][0]["server"].as_hash().is_some()); assert!(doc["subcommands3"][0]["server"].as_hash().is_some()); } #[test] fn test_recursion_depth_check_objects() { let s = "{a:".repeat(10_000) + &"}".repeat(10_000); assert!(YamlLoader::load_from_str(&s).is_err()); } #[test] fn test_recursion_depth_check_arrays() { let s = "[".repeat(10_000) + &"]".repeat(10_000); assert!(YamlLoader::load_from_str(&s).is_err()); } } insta-1.46.1/src/env.rs000064400000000000000000000627221046102023000127710ustar 00000000000000use std::collections::BTreeMap; use std::io::Write; use std::path::{Path, PathBuf}; use std::sync::{Arc, Mutex}; use std::{env, fmt, fs}; use crate::utils::is_ci; use crate::{ content::{yaml, Content}, elog, }; use once_cell::sync::Lazy; static WORKSPACES: Lazy>>> = Lazy::new(|| Mutex::new(BTreeMap::new())); static TOOL_CONFIGS: Lazy>>> = Lazy::new(|| Mutex::new(BTreeMap::new())); pub fn get_tool_config(workspace_dir: &Path) -> Arc { TOOL_CONFIGS .lock() .unwrap() .entry(workspace_dir.to_path_buf()) .or_insert_with(|| { ToolConfig::from_workspace(workspace_dir) .unwrap_or_else(|e| panic!("Error building config from {workspace_dir:?}: {e}")) .into() }) .clone() } /// The test runner to use. #[cfg(feature = "_cargo_insta_internal")] #[derive(Clone, Copy, Debug, PartialEq, Eq, clap::ValueEnum)] pub enum TestRunner { Auto, CargoTest, Nextest, } #[cfg(feature = "_cargo_insta_internal")] impl TestRunner { /// Fall back to `cargo test` if `cargo nextest` isn't installed and /// `test_runner_fallback` is true pub fn resolve_fallback(&self, test_runner_fallback: bool) -> &TestRunner { use crate::utils::get_cargo; if self == &TestRunner::Nextest && test_runner_fallback && std::process::Command::new(get_cargo()) .arg("nextest") .arg("--version") .output() .map(|output| !output.status.success()) .unwrap_or(true) { &TestRunner::Auto } else { self } } } /// Controls how information is supposed to be displayed. #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum OutputBehavior { /// Diff only Diff, /// Short summary Summary, /// The most minimal output Minimal, /// No output at all Nothing, } /// Unreferenced snapshots flag #[cfg(feature = "_cargo_insta_internal")] #[derive(Clone, Copy, Debug, PartialEq, Eq, clap::ValueEnum)] pub enum UnreferencedSnapshots { Auto, Reject, Delete, Warn, Ignore, } /// Snapshot update flag #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum SnapshotUpdate { Always, Auto, Unseen, New, No, Force, } #[derive(Debug)] pub enum Error { Deserialize(crate::content::Error), Env(&'static str), #[allow(unused)] Config(&'static str), } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Error::Deserialize(_) => write!(f, "failed to deserialize tool config"), Error::Env(var) => write!(f, "invalid value for env var '{var}'"), Error::Config(var) => write!(f, "invalid value for config '{var}'"), } } } impl std::error::Error for Error { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match self { Error::Deserialize(ref err) => Some(err), _ => None, } } } /// Represents a tool configuration. #[derive(Debug, Clone)] pub struct ToolConfig { force_pass: bool, require_full_match: bool, output: OutputBehavior, snapshot_update: SnapshotUpdate, #[cfg(feature = "glob")] glob_fail_fast: bool, #[cfg(feature = "_cargo_insta_internal")] test_runner_fallback: bool, #[cfg(feature = "_cargo_insta_internal")] test_runner: TestRunner, #[cfg(feature = "_cargo_insta_internal")] test_unreferenced: UnreferencedSnapshots, #[cfg(feature = "_cargo_insta_internal")] auto_review: bool, #[cfg(feature = "_cargo_insta_internal")] auto_accept_unseen: bool, #[cfg(feature = "_cargo_insta_internal")] review_include_ignored: bool, #[cfg(feature = "_cargo_insta_internal")] review_include_hidden: bool, #[cfg(feature = "_cargo_insta_internal")] review_warn_undiscovered: bool, #[cfg(feature = "_cargo_insta_internal")] disable_nextest_doctest: bool, } impl ToolConfig { /// Loads the tool config from a cargo workspace. pub fn from_workspace(workspace_dir: &Path) -> Result { let mut cfg = None; for choice in &[".config/insta.yaml", "insta.yaml", ".insta.yaml"] { let path = workspace_dir.join(choice); match fs::read_to_string(&path) { Ok(s) => { cfg = Some(yaml::parse_str(&s, &path).map_err(Error::Deserialize)?); break; } // ideally we would not swallow all errors here but unfortunately there are // some cases where we cannot detect the error properly. // Eg we can see NotADirectory here as kind, but on stable rust it cannot // be matched on. Err(_) => continue, } } let cfg = cfg.unwrap_or_else(|| Content::Map(Default::default())); // Support for the deprecated environment variables. This is // implemented in a way that cargo-insta can support older and newer // insta versions alike. Versions of `cargo-insta` <= 1.39 will set // `INSTA_FORCE_UPDATE_SNAPSHOTS` & `INSTA_FORCE_UPDATE`. // // If `INSTA_FORCE_UPDATE_SNAPSHOTS` is the only env var present we emit // a deprecation warning, later to be expanded to `INSTA_FORCE_UPDATE`. // // Another approach would be to pass the version of `cargo-insta` in a // `INSTA_CARGO_INSTA_VERSION` env var, and then raise a warning unless // running under cargo-insta <= 1.39. Though it would require adding a // `semver` dependency to this crate or doing the version comparison // ourselves (a tractable task...). let force_update_old_env_vars = if let Ok("1") = env::var("INSTA_FORCE_UPDATE").as_deref() { // Don't raise a warning yet, because recent versions of // `cargo-insta` use this, so that it's compatible with older // versions of `insta`. // // elog!("INSTA_FORCE_UPDATE is deprecated, use // INSTA_UPDATE=force"); true } else if let Ok("1") = env::var("INSTA_FORCE_UPDATE_SNAPSHOTS").as_deref() { // Warn on an old envvar. // // There's some possibility that we're running from within an fairly // old version of `cargo-insta` (before we added an // `INSTA_CARGO_INSTA` env var, so we can't pick that up.) So offer // a caveat in that case. elog!("INSTA_FORCE_UPDATE_SNAPSHOTS is deprecated, use INSTA_UPDATE=force. (If running from `cargo insta`, no action is required; upgrading `cargo-insta` will silence this warning.)"); true } else { false }; if force_update_old_env_vars { env::set_var("INSTA_UPDATE", "force"); } Ok(ToolConfig { require_full_match: match env::var("INSTA_REQUIRE_FULL_MATCH").as_deref() { Err(_) | Ok("") => resolve(&cfg, &["behavior", "require_full_match"]) .and_then(|x| x.as_bool()) .unwrap_or(false), Ok("0") => false, Ok("1") => true, _ => return Err(Error::Env("INSTA_REQUIRE_FULL_MATCH")), }, force_pass: match env::var("INSTA_FORCE_PASS").as_deref() { Err(_) | Ok("") => resolve(&cfg, &["behavior", "force_pass"]) .and_then(|x| x.as_bool()) .unwrap_or(false), Ok("0") => false, Ok("1") => true, _ => return Err(Error::Env("INSTA_FORCE_PASS")), }, output: { let env_var = env::var("INSTA_OUTPUT"); let val = match env_var.as_deref() { Err(_) | Ok("") => resolve(&cfg, &["behavior", "output"]) .and_then(|x| x.as_str()) .unwrap_or("diff"), Ok(val) => val, }; match val { "diff" => OutputBehavior::Diff, "summary" => OutputBehavior::Summary, "minimal" => OutputBehavior::Minimal, "none" => OutputBehavior::Nothing, _ => return Err(Error::Env("INSTA_OUTPUT")), } }, snapshot_update: { let env_var = env::var("INSTA_UPDATE"); let val = match env_var.as_deref() { Err(_) | Ok("") => resolve(&cfg, &["behavior", "update"]) .and_then(|x| x.as_str()) // Legacy support for the old force update config .or(resolve(&cfg, &["behavior", "force_update"]).and_then(|x| { elog!("`force_update: true` is deprecated in insta config files, use `update: force`"); match x.as_bool() { Some(true) => Some("force"), _ => None, } })) .unwrap_or("auto"), Ok(val) => val, }; match val { "auto" => SnapshotUpdate::Auto, "always" | "1" => SnapshotUpdate::Always, "new" => SnapshotUpdate::New, "unseen" => SnapshotUpdate::Unseen, "no" => SnapshotUpdate::No, "force" => SnapshotUpdate::Force, _ => return Err(Error::Env("INSTA_UPDATE")), } }, #[cfg(feature = "glob")] glob_fail_fast: match env::var("INSTA_GLOB_FAIL_FAST").as_deref() { Err(_) | Ok("") => resolve(&cfg, &["behavior", "glob_fail_fast"]) .and_then(|x| x.as_bool()) .unwrap_or(false), Ok("1") => true, Ok("0") => false, _ => return Err(Error::Env("INSTA_GLOB_FAIL_FAST")), }, #[cfg(feature = "_cargo_insta_internal")] test_runner: { let env_var = env::var("INSTA_TEST_RUNNER"); match env_var.as_deref() { Err(_) | Ok("") => resolve(&cfg, &["test", "runner"]) .and_then(|x| x.as_str()) .unwrap_or("auto"), Ok(val) => val, } .parse::() .map_err(|_| Error::Env("INSTA_TEST_RUNNER"))? }, #[cfg(feature = "_cargo_insta_internal")] test_runner_fallback: match env::var("INSTA_TEST_RUNNER_FALLBACK").as_deref() { Err(_) | Ok("") => resolve(&cfg, &["test", "runner_fallback"]) .and_then(|x| x.as_bool()) .unwrap_or(false), Ok("1") => true, Ok("0") => false, _ => return Err(Error::Env("INSTA_TEST_RUNNER_FALLBACK")), }, #[cfg(feature = "_cargo_insta_internal")] test_unreferenced: { resolve(&cfg, &["test", "unreferenced"]) .and_then(|x| x.as_str()) .unwrap_or("ignore") .parse::() .map_err(|_| Error::Config("unreferenced"))? }, #[cfg(feature = "_cargo_insta_internal")] auto_review: resolve(&cfg, &["test", "auto_review"]) .and_then(|x| x.as_bool()) .unwrap_or(false), #[cfg(feature = "_cargo_insta_internal")] auto_accept_unseen: resolve(&cfg, &["test", "auto_accept_unseen"]) .and_then(|x| x.as_bool()) .unwrap_or(false), #[cfg(feature = "_cargo_insta_internal")] review_include_hidden: resolve(&cfg, &["review", "include_hidden"]) .and_then(|x| x.as_bool()) .unwrap_or(false), #[cfg(feature = "_cargo_insta_internal")] review_include_ignored: resolve(&cfg, &["review", "include_ignored"]) .and_then(|x| x.as_bool()) .unwrap_or(false), #[cfg(feature = "_cargo_insta_internal")] review_warn_undiscovered: resolve(&cfg, &["review", "warn_undiscovered"]) .and_then(|x| x.as_bool()) .unwrap_or(true), #[cfg(feature = "_cargo_insta_internal")] disable_nextest_doctest: resolve(&cfg, &["test", "disable_nextest_doctest"]) .and_then(|x| x.as_bool()) .unwrap_or(false), }) } // TODO: Do we want all these methods, vs. just allowing access to the fields? /// Should we fail if metadata doesn't match? pub fn require_full_match(&self) -> bool { self.require_full_match } /// Is insta instructed to fail in tests? pub fn force_pass(&self) -> bool { self.force_pass } /// Returns the intended output behavior for insta. pub fn output_behavior(&self) -> OutputBehavior { self.output } /// Returns the intended snapshot update behavior. pub fn snapshot_update(&self) -> SnapshotUpdate { self.snapshot_update } /// Returns whether the glob should fail fast, as snapshot failures within the glob macro will appear only at the end of execution unless `glob_fail_fast` is set. #[cfg(feature = "glob")] pub fn glob_fail_fast(&self) -> bool { self.glob_fail_fast } } #[cfg(feature = "_cargo_insta_internal")] impl ToolConfig { /// Returns the intended test runner pub fn test_runner(&self) -> TestRunner { self.test_runner } /// Whether to fallback to `cargo test` if the test runner isn't available pub fn test_runner_fallback(&self) -> bool { self.test_runner_fallback } pub fn test_unreferenced(&self) -> UnreferencedSnapshots { self.test_unreferenced } /// Returns the auto review flag. pub fn auto_review(&self) -> bool { self.auto_review } /// Returns the auto accept unseen flag. pub fn auto_accept_unseen(&self) -> bool { self.auto_accept_unseen } pub fn review_include_hidden(&self) -> bool { self.review_include_hidden } pub fn review_include_ignored(&self) -> bool { self.review_include_ignored } pub fn review_warn_undiscovered(&self) -> bool { self.review_warn_undiscovered } pub fn disable_nextest_doctest(&self) -> bool { self.disable_nextest_doctest } } /// How snapshots are supposed to be updated #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum SnapshotUpdateBehavior { /// Snapshots are updated in-place InPlace, /// Snapshots are placed in a new file with a .new suffix NewFile, /// Snapshots are not updated at all. NoUpdate, } /// Returns the intended snapshot update behavior. pub fn snapshot_update_behavior(tool_config: &ToolConfig, unseen: bool) -> SnapshotUpdateBehavior { match tool_config.snapshot_update() { SnapshotUpdate::Always => SnapshotUpdateBehavior::InPlace, SnapshotUpdate::Auto => { if is_ci() { SnapshotUpdateBehavior::NoUpdate } else { SnapshotUpdateBehavior::NewFile } } SnapshotUpdate::Unseen => { if unseen { SnapshotUpdateBehavior::NewFile } else { SnapshotUpdateBehavior::InPlace } } SnapshotUpdate::New => SnapshotUpdateBehavior::NewFile, SnapshotUpdate::No => SnapshotUpdateBehavior::NoUpdate, SnapshotUpdate::Force => SnapshotUpdateBehavior::InPlace, } } pub enum Workspace { DetectWithCargo(&'static str), UseAsIs(&'static str), } /// Returns the cargo workspace path for a crate manifest, like /// `/Users/janedoe/projects/insta` when passed /// `/Users/janedoe/projects/insta/insta/Cargo.toml`. /// /// If `INSTA_WORKSPACE_ROOT` environment variable is set at runtime, use the value as-is. /// If `INSTA_WORKSPACE_ROOT` environment variable is set at compile time, use the value as-is. /// If `INSTA_WORKSPACE_ROOT` environment variable is not set, use `cargo metadata` to find the workspace root. pub fn get_cargo_workspace(workspace: Workspace) -> Arc { // This is useful where CARGO_MANIFEST_DIR at compilation points to some // transient location. This can easily happen when building the test in one // directory but running it in another. if let Ok(workspace_root) = env::var("INSTA_WORKSPACE_ROOT") { return PathBuf::from(workspace_root).into(); } // Distinguish if we need to run `cargo metadata`` or if we can return the workspace // as is. // This is useful if INSTA_WORKSPACE_ROOT was set at compile time, not pointing to // the cargo manifest directory let manifest_dir = match workspace { Workspace::UseAsIs(workspace_root) => return PathBuf::from(workspace_root).into(), Workspace::DetectWithCargo(manifest_dir) => manifest_dir, }; WORKSPACES .lock() // we really do not care about poisoning here. .unwrap() .entry(manifest_dir.to_string()) .or_insert_with(|| { get_cargo_workspace_from_metadata(manifest_dir).unwrap_or_else(|e| { eprintln!("cargo metadata failed in {manifest_dir}: {e}"); eprintln!("will use manifest directory as fallback"); Arc::new(PathBuf::from(manifest_dir)) }) }) .clone() } fn get_cargo_workspace_from_metadata( manifest_dir: &str, ) -> Result, Box> { let output = std::process::Command::new(env::var("CARGO").unwrap_or_else(|_| "cargo".to_string())) .args(["metadata", "--format-version=1", "--no-deps"]) .current_dir(manifest_dir) .output()?; if !output.status.success() { let stderr = String::from_utf8_lossy(&output.stderr); return Err(format!("command failed with {}: {stderr}", output.status).into()); } let stdout = std::str::from_utf8(&output.stdout).map_err(|e| format!("invalid UTF-8 in output: {e}"))?; let docs = crate::content::yaml::vendored::yaml::YamlLoader::load_from_str(stdout) .map_err(|e| format!("failed to parse YAML: {e}"))?; let metadata = docs.into_iter().next().ok_or("no content found in YAML")?; let workspace_root = metadata["workspace_root"] .clone() .into_string() .ok_or("couldn't find 'workspace_root' in metadata")?; Ok(Arc::new(workspace_root.into())) } #[test] fn test_get_cargo_workspace_manifest_dir() { let workspace = get_cargo_workspace(Workspace::DetectWithCargo(env!("CARGO_MANIFEST_DIR"))); // The absolute path of the workspace should be a valid directory // In worktrees or other setups, the path might not end with "insta" // but should still be a parent of the manifest directory let manifest_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR")); assert!(manifest_dir.starts_with(&*workspace)); } #[test] fn test_get_cargo_workspace_insta_workspace() { let workspace = get_cargo_workspace(Workspace::UseAsIs("/tmp/insta_workspace_root")); // The absolute path of the workspace, like `/tmp/insta_workspace_root` assert!(workspace.ends_with("insta_workspace_root")); } #[cfg(feature = "_cargo_insta_internal")] impl std::str::FromStr for TestRunner { type Err = (); fn from_str(value: &str) -> Result { match value { "auto" => Ok(TestRunner::Auto), "cargo-test" => Ok(TestRunner::CargoTest), "nextest" => Ok(TestRunner::Nextest), _ => Err(()), } } } #[cfg(feature = "_cargo_insta_internal")] impl std::str::FromStr for UnreferencedSnapshots { type Err = (); fn from_str(value: &str) -> Result { match value { "auto" => Ok(UnreferencedSnapshots::Auto), "reject" | "error" => Ok(UnreferencedSnapshots::Reject), "delete" => Ok(UnreferencedSnapshots::Delete), "warn" => Ok(UnreferencedSnapshots::Warn), "ignore" => Ok(UnreferencedSnapshots::Ignore), _ => Err(()), } } } /// Memoizes a snapshot file in the reference file, as part of removing unreferenced snapshots. pub fn memoize_snapshot_file(snapshot_file: &Path) { if let Ok(path) = env::var("INSTA_SNAPSHOT_REFERENCES_FILE") { let mut f = fs::OpenOptions::new() .append(true) .create(true) .open(path) .unwrap(); f.write_all(format!("{}\n", snapshot_file.display()).as_bytes()) .unwrap(); } } /// Appends a warning to the warnings file for cargo-insta to display after tests. /// Best-effort: does nothing if `INSTA_WARNINGS_FILE` is not set or IO fails. pub fn memoize_warning(message: &str) { if let Ok(path) = env::var("INSTA_WARNINGS_FILE") { if let Ok(mut f) = fs::OpenOptions::new().append(true).create(true).open(path) { let _ = writeln!(f, "{}", message); } } } /// Returns the pending directory if `INSTA_PENDING_DIR` is set and non-empty. pub fn get_pending_dir() -> Option { env::var("INSTA_PENDING_DIR") .ok() .filter(|s| !s.is_empty()) .map(PathBuf::from) } /// Tries to strip a prefix from a path, with optional normalization fallback. /// /// First attempts direct `strip_prefix` (preserves symlinks, works for Bazel). /// If that fails, tries normalizing both paths with `canonicalize()` and /// retrying (handles Windows path format differences like `\\?\` prefix). fn strip_prefix_with_fallback<'a>( path: &'a Path, prefix: &Path, ) -> Result, std::path::StripPrefixError> { // Try direct strip_prefix first. This preserves symlinks which is // essential for Bazel's execroot setup where paths are symlinks. if let Ok(relative) = path.strip_prefix(prefix) { return Ok(std::borrow::Cow::Borrowed(relative)); } // Fallback: normalize both paths with canonicalize() and try again. // This handles Windows path format differences (e.g., `\\?\` prefix, // 8.3 short names). On Unix, canonicalize() follows symlinks, so we // only reach here if direct strip failed (likely not a symlink case). if let (Ok(normalized_path), Ok(normalized_prefix)) = (path.canonicalize(), prefix.canonicalize()) { if let Ok(relative) = normalized_path.strip_prefix(&normalized_prefix) { return Ok(std::borrow::Cow::Owned(relative.to_path_buf())); } } // Return the original error path.strip_prefix(prefix).map(std::borrow::Cow::Borrowed) } /// Computes the path for a pending snapshot file. /// /// If `INSTA_PENDING_DIR` is set, returns a path within that directory /// preserving the relative structure from the workspace. Otherwise returns /// the original path unchanged. /// /// # Panics /// /// Panics if `INSTA_PENDING_DIR` is set but the snapshot path is outside the /// workspace (e.g., external test paths like `../tests/lib.rs`). This is because /// such paths would escape the pending directory. pub fn pending_snapshot_path(workspace: &Path, original_path: &Path) -> PathBuf { match get_pending_dir() { Some(pending_dir) => { // Compute relative path from workspace to original_path. // Try direct strip first (for Bazel), fall back to normalized (for Windows). let relative = strip_prefix_with_fallback(original_path, workspace).unwrap_or_else(|_| { panic!( "INSTA_PENDING_DIR is set but snapshot path {:?} is outside \ workspace {:?}. External test paths (e.g., path = \"../tests/lib.rs\" \ in Cargo.toml) are not compatible with INSTA_PENDING_DIR because \ the relative path would escape the pending directory.", original_path, workspace ); }); // Check if the relative path contains ".." which would escape // the pending directory. This happens with external test paths // like `path = "../tests/lib.rs"` in Cargo.toml. if relative .components() .any(|c| c == std::path::Component::ParentDir) { panic!( "INSTA_PENDING_DIR is set but snapshot path {:?} would escape \ the pending directory (relative path contains \"..\"). \ External test paths (e.g., path = \"../tests/lib.rs\" in Cargo.toml) \ are not compatible with INSTA_PENDING_DIR.", original_path ); } pending_dir.join(relative) } None => original_path.to_path_buf(), } } fn resolve<'a>(value: &'a Content, path: &[&str]) -> Option<&'a Content> { path.iter() .try_fold(value, |node, segment| match node.resolve_inner() { Content::Map(fields) => fields .iter() .find(|x| x.0.as_str() == Some(segment)) .map(|x| &x.1), Content::Struct(_, fields) | Content::StructVariant(_, _, _, fields) => { fields.iter().find(|x| x.0 == *segment).map(|x| &x.1) } _ => None, }) } insta-1.46.1/src/filters.rs000064400000000000000000000040751046102023000136460ustar 00000000000000use std::borrow::Cow; use std::iter::FromIterator; use std::iter::IntoIterator; use regex::Regex; /// Represents stored filters. #[derive(Debug, Default, Clone)] #[cfg_attr(docsrs, doc(cfg(feature = "filters")))] pub struct Filters { rules: Vec<(Regex, String)>, } impl<'a, I> From for Filters where I: IntoIterator, { fn from(value: I) -> Self { Self::from_iter(value) } } impl<'a> FromIterator<(&'a str, &'a str)> for Filters { fn from_iter>(iter: I) -> Self { let mut rv = Filters::default(); for (regex, replacement) in iter { rv.add(regex, replacement); } rv } } impl Filters { /// Adds a simple regex with a replacement. pub(crate) fn add>(&mut self, regex: &str, replacement: S) { self.rules.push(( Regex::new(regex).expect("invalid regex for snapshot filter rule"), replacement.into(), )); } /// Clears all filters. pub(crate) fn clear(&mut self) { self.rules.clear(); } /// Applies all filters to the given snapshot. pub(crate) fn apply_to<'s>(&self, s: &'s str) -> Cow<'s, str> { let mut rv = Cow::Borrowed(s); for (regex, replacement) in &self.rules { match regex.replace_all(&rv, replacement) { Cow::Borrowed(_) => continue, Cow::Owned(value) => rv = Cow::Owned(value), }; } rv } } #[test] fn test_filters() { let mut filters = Filters::default(); filters.add("\\bhello\\b", "[NAME]"); filters.add("(a)", "[$1]"); assert_eq!( filters.apply_to("hellohello hello abc"), "hellohello [NAME] [a]bc" ); } #[test] fn test_static_str_array_conversion() { let arr: [(&'static str, &'static str); 2] = [("a1", "b1"), ("a2", "b2")]; let _ = Filters::from_iter(arr); } #[test] fn test_vec_str_conversion() { let vec: Vec<(&str, &str)> = Vec::from([("a1", "b1"), ("a2", "b2")]); let _ = Filters::from(vec); } insta-1.46.1/src/glob.rs000064400000000000000000000123521046102023000131160ustar 00000000000000use std::env; use std::path::{Path, PathBuf}; use std::sync::Mutex; use globset::{GlobBuilder, GlobMatcher}; use once_cell::sync::Lazy; use walkdir::WalkDir; use crate::env::get_tool_config; use crate::settings::Settings; use crate::utils::style; pub(crate) struct GlobCollector { pub(crate) fail_fast: bool, pub(crate) failed: usize, pub(crate) show_insta_hint: bool, } /// the glob stack holds failure count and an indication if `cargo insta review` /// should be run. pub(crate) static GLOB_STACK: Lazy>> = Lazy::new(Mutex::default); static GLOB_FILTER: Lazy> = Lazy::new(|| { env::var("INSTA_GLOB_FILTER") .unwrap_or_default() .split(';') .filter(|x| !x.is_empty()) .filter_map(|filter| { GlobBuilder::new(filter) .case_insensitive(true) .build() .ok() .map(|x| x.compile_matcher()) }) .collect() }); pub fn glob_exec(workspace_dir: &Path, base: &Path, pattern: &str, mut f: F) { // Check if the pattern contains parent directory traversal (../) if pattern.contains("../") || pattern.starts_with("..") { panic!("Parent directory traversal is not supported in glob patterns. Use the three-argument form of glob! with an explicit base directory instead."); } // If settings.allow_empty_glob() == true and `base` doesn't exist, skip // everything. This is necessary as `base` is user-controlled via `glob!/3` // and may not exist. let mut settings = Settings::clone_current(); if settings.allow_empty_glob() && !base.exists() { return; } let glob = GlobBuilder::new(pattern) .case_insensitive(true) .literal_separator(true) .build() .unwrap() .compile_matcher(); let walker = WalkDir::new(base).follow_links(true); let mut glob_found_matches = false; GLOB_STACK.lock().unwrap().push(GlobCollector { failed: 0, show_insta_hint: false, fail_fast: get_tool_config(workspace_dir).glob_fail_fast(), }); // step 1: collect all matching files let mut all_matching_files = vec![]; let mut filtered_files = vec![]; for file in walker { let file = file.unwrap(); let path = file.path(); let stripped_path = path.strip_prefix(base).unwrap_or(path); if !glob.is_match(stripped_path) { continue; } glob_found_matches = true; all_matching_files.push(path.to_path_buf()); // if there is a glob filter, skip if it does not match this path if !GLOB_FILTER.is_empty() && !GLOB_FILTER.iter().any(|x| x.is_match(stripped_path)) { eprintln!("Skipping {} due to glob filter", stripped_path.display()); continue; } filtered_files.push(path.to_path_buf()); } // step 2: sort, determine common prefix and run assertions all_matching_files.sort(); filtered_files.sort(); // Use the common prefix from ALL matching files, not just filtered ones // This preserves the original snapshot naming when filtering let common_prefix = find_common_prefix(&all_matching_files); let matching_files = filtered_files; for path in &matching_files { settings.set_input_file(path); // if there is a common prefix, use that stirp down the input file. That way we // can ensure that a glob like inputs/*/*.txt with a/file.txt and b/file.txt // does not create two identical snapshot suffixes. Instead of file.txt for both // it would end up as a/file.txt and b/file.txt. let snapshot_suffix = if let Some(prefix) = common_prefix { path.strip_prefix(prefix).unwrap().as_os_str() } else { path.file_name().unwrap() }; settings.set_snapshot_suffix(snapshot_suffix.to_str().unwrap()); settings.bind(|| { f(path); }); } let top = GLOB_STACK.lock().unwrap().pop().unwrap(); if !glob_found_matches && !settings.allow_empty_glob() { panic!("the glob! macro did not match any files."); } if top.failed > 0 { if top.show_insta_hint { println!( "{hint}", hint = style("To update snapshots run `cargo insta review`").dim(), ); } if top.failed > 1 { println!( "{hint}", hint = style("To enable fast failing for glob! export INSTA_GLOB_FAIL_FAST=1 as environment variable.").dim() ); } panic!( "glob! resulted in {} snapshot assertion failure{}", top.failed, if top.failed == 1 { "" } else { "s" }, ); } } fn find_common_prefix(sorted_paths: &[PathBuf]) -> Option<&Path> { let first = sorted_paths.first()?; let last = sorted_paths.last()?; let prefix_len = first .components() .zip(last.components()) .take_while(|(a, b)| a == b) .count(); if prefix_len == 0 { None } else { let mut prefix = first.components(); for _ in 0..first.components().count() - prefix_len { prefix.next_back(); } Some(prefix.as_path()) } } insta-1.46.1/src/lib.rs000064400000000000000000000333531046102023000127450ustar 00000000000000#![warn(clippy::doc_markdown)] #![warn(clippy::needless_raw_strings)] #![warn(rustdoc::all)] //!
//! //!

insta: a snapshot testing library for Rust

//!
//! //! # What are snapshot tests //! //! Snapshots tests (also sometimes called approval tests) are tests that //! assert values against a reference value (the snapshot). This is similar //! to how [`assert_eq!`] lets you compare a value against a reference value but //! unlike simple string assertions, snapshot tests let you test against complex //! values and come with comprehensive tools to review changes. //! //! Snapshot tests are particularly useful if your reference values are very //! large or change often. //! //! # What it looks like: //! //! ```no_run //! #[test] //! fn test_hello_world() { //! insta::assert_debug_snapshot!(vec![1, 2, 3]); //! } //! ``` //! //! Where are the snapshots stored? Right next to your test in a folder //! called `snapshots` as individual [`.snap` files](https://insta.rs/docs/snapshot-files/). //! //! Got curious? //! //! * [Read the introduction](https://insta.rs/docs/quickstart/) //! * [Read the main documentation](https://insta.rs/docs/) which does not just //! cover the API of the crate but also many of the details of how it works. //! * There is a screencast that shows the entire workflow: [watch the insta //! introduction screencast](https://www.youtube.com/watch?v=rCHrMqE4JOY&feature=youtu.be). //! //! # Writing Tests //! //! ``` //! use insta::assert_debug_snapshot; //! //! # #[allow(clippy::test_attr_in_doctest)] //! #[test] //! fn test_snapshots() { //! assert_debug_snapshot!(vec![1, 2, 3]); //! } //! ``` //! //! The recommended flow is to run the tests once, have them fail and check //! if the result is okay. //! By default, the new snapshots are stored next //! to the old ones with the extra `.new` extension. Once you are satisfied //! move the new files over. To simplify this workflow you can use //! `cargo insta review` (requires //! [`cargo-insta`](https://crates.io/crates/cargo-insta)) which will let you //! interactively review them: //! //! ```text //! $ cargo test //! $ cargo insta review //! ``` //! //! # Use Without `cargo-insta` //! //! Note that `cargo-insta` is entirely optional. You can also just use insta //! directly from `cargo test` and control it via the `INSTA_UPDATE` environment //! variable — see [Updating snapshots](#updating-snapshots) for details. //! //! You can for instance first run the tests and not write any new snapshots, and //! if you like them run the tests again and update them: //! //! ```text //! INSTA_UPDATE=no cargo test //! INSTA_UPDATE=always cargo test //! ``` //! //! # Assertion Macros //! //! This crate exports multiple macros for snapshot testing: //! //! - [`assert_snapshot!`] for comparing basic snapshots of //! [`Display`](std::fmt::Display) outputs, often strings. //! - [`assert_debug_snapshot!`] for comparing [`Debug`] outputs of values. //! //! The following macros require the use of [`serde::Serialize`]: //! #![cfg_attr( feature = "csv", doc = "- [`assert_csv_snapshot!`] for comparing CSV serialized output. (requires the `csv` feature)" )] #![cfg_attr( feature = "toml", doc = "- [`assert_toml_snapshot!`] for comparing TOML serialized output. (requires the `toml` feature)" )] #![cfg_attr( feature = "yaml", doc = "- [`assert_yaml_snapshot!`] for comparing YAML serialized output. (requires the `yaml` feature)" )] #![cfg_attr( feature = "ron", doc = "- [`assert_ron_snapshot!`] for comparing RON serialized output. (requires the `ron` feature)" )] #![cfg_attr( feature = "json", doc = "- [`assert_json_snapshot!`] for comparing JSON serialized output. (requires the `json` feature)" )] #![cfg_attr( feature = "json", doc = "- [`assert_compact_json_snapshot!`] for comparing JSON serialized output while preferring single-line formatting. (requires the `json` feature)" )] //! //! For macros that work with [`serde`] this crate also permits redacting of //! partial values. See [redactions in the //! documentation](https://insta.rs/docs/redactions/) for more information. //! //! # Updating snapshots //! //! During test runs snapshots will be updated according to the `INSTA_UPDATE` //! environment variable. The default is `auto` which will write snapshots for //! any failing tests into `.snap.new` files (if no CI is detected) so that //! [`cargo-insta`](https://crates.io/crates/cargo-insta) can pick them up for //! review. Normally you don't have to change this variable. //! //! `INSTA_UPDATE` modes: //! //! - `auto`: the default. `no` for CI environments or `new` otherwise //! - `new`: writes snapshots for any failing tests into `.snap.new` files, //! pending review //! - `always`: writes snapshots for any failing tests into `.snap` files, //! bypassing review //! - `unseen`: `always` for previously unseen snapshots or `new` for existing //! snapshots //! - `no`: does not write to snapshot files at all; just runs tests //! - `force`: forcibly updates snapshot files, even if assertions pass //! //! When `new`, `auto` or `unseen` is used, the //! [`cargo-insta`](https://crates.io/crates/cargo-insta) command can be used to //! review the snapshots conveniently: //! //! ```text //! $ cargo insta review //! ``` //! //! "enter" or "a" accepts a new snapshot, "escape" or "r" rejects, "space" or //! "s" skips the snapshot for now. //! //! For more information [read the cargo insta //! docs](https://insta.rs/docs/cli/). //! //! # Inline Snapshots //! //! Additionally snapshots can also be stored inline. In that case the format //! for the snapshot macros is `assert_snapshot!(reference_value, @"snapshot")`. //! The leading at sign (`@`) indicates that the following string is the //! reference value. On review, `cargo-insta` will update the string with the //! new value. //! //! Example: //! //! ```no_run //! # use insta::assert_snapshot; //! assert_snapshot!(2 + 2, @""); //! ``` //! //! Like with normal snapshots, an initial test failure will write the proposed //! value into a draft file (note that inline snapshots use `.pending-snap` //! files rather than `.snap.new` files). Running `cargo insta review` will //! review the proposed changes and update the source files on acceptance //! automatically. //! //! # Features //! //! The following features exist: //! //! * `csv`: enables CSV support (via [`serde`]) //! * `json`: enables JSON support (via [`serde`]) //! * `ron`: enables RON support (via [`serde`]) //! * `toml`: enables TOML support (via [`serde`]) //! * `yaml`: enables YAML support (via [`serde`]) //! * `redactions`: enables support for redactions //! * `filters`: enables support for filters //! * `glob`: enables support for globbing ([`glob!`]) //! * `colors`: enables color output (enabled by default) //! //! For legacy reasons the `json` and `yaml` features are enabled by default in //! limited capacity. You will receive a deprecation warning if you are not //! opting into them but for now the macros will continue to function. //! //! Enabling any of the [`serde`] based formats enables the hidden `serde` feature //! which gates some [`serde`] specific APIs such as [`Settings::set_info`]. //! //! # Dependencies //! //! [`insta`] tries to be light in dependencies but this is tricky to accomplish //! given what it tries to do. //! By default, it currently depends on [`serde`] for //! the [`assert_toml_snapshot!`] and [`assert_yaml_snapshot!`] macros. In the //! future this default dependencies will be removed. To already benefit from //! this optimization you can disable the default features and manually opt into //! what you want. //! //! # Settings //! //! There are some settings that can be changed on a per-thread (and thus //! per-test) basis. For more information see [Settings]. //! //! Additionally, Insta will load a YAML config file with settings that change //! the behavior of insta between runs. It's loaded from any of the following //! locations: `.config/insta.yaml`, `insta.yaml` and `.insta.yaml` from the //! workspace root. The following config options exist: //! //! ```yaml //! behavior: //! # also set by INSTA_REQUIRE_FULL_MATCH //! require_full_match: true/false //! # also set by INSTA_FORCE_PASS //! force_pass: true/false //! # also set by INSTA_OUTPUT //! output: "diff" | "summary" | "minimal" | "none" //! # also set by INSTA_UPDATE //! update: "auto" | "new" | "always" | "no" | "unseen" | "force" //! # also set by INSTA_GLOB_FAIL_FAST //! glob_fail_fast: true/false //! //! # these are used by cargo insta test //! test: //! # also set by INSTA_TEST_RUNNER //! # cargo-nextest binary path can be explicitly set by INSTA_CARGO_NEXTEST_BIN //! runner: "auto" | "cargo-test" | "nextest" //! # whether to fallback to `cargo-test` if `nextest` is not available, //! # also set by INSTA_TEST_RUNNER_FALLBACK, default false //! runner_fallback: true/false //! # disable running doctests separately when using nextest //! disable_nextest_doctest: true/false //! # automatically assume --review was passed to cargo insta test //! auto_review: true/false //! # automatically assume --accept-unseen was passed to cargo insta test //! auto_accept_unseen: true/false //! //! # these are used by cargo insta review //! review: //! # also look for snapshots in ignored folders //! include_ignored: true / false //! # also look for snapshots in hidden folders //! include_hidden: true / false //! # show a warning if undiscovered (ignored or hidden) snapshots are found. //! # defaults to true but creates a performance hit. //! warn_undiscovered: true / false //! ``` //! //! # External Diff Tools //! //! By default, insta displays diffs inline in unified format. You can configure //! an external diff tool via the `INSTA_DIFF_TOOL` environment variable. When set, //! insta writes the old and new snapshot contents to temporary files and invokes //! your diff tool with those files as arguments. //! //! ```bash //! # Use delta for syntax-highlighted diffs //! export INSTA_DIFF_TOOL=delta //! //! # With arguments //! export INSTA_DIFF_TOOL="delta --side-by-side" //! //! # Or any other diff tool //! export INSTA_DIFF_TOOL=difftastic //! ``` //! //! This is a user-level setting (not project-level) since diff tool preference //! varies by developer. The tool is invoked as ` [args...] `. //! If the tool fails to run, insta falls back to the built-in diff. //! //! # Optional: Faster Runs //! //! Insta benefits from being compiled in release mode, even as dev dependency. //! It will compile slightly slower once, but use less memory, have faster diffs //! and just generally be more fun to use. To achieve that, opt [`insta`] and //! [`similar`] (the diffing library) into higher optimization in your //! `Cargo.toml`: //! //! ```yaml //! [profile.dev.package.insta] //! opt-level = 3 //! //! [profile.dev.package.similar] //! opt-level = 3 //! ``` //! //! You can also disable the default features of [`insta`] which will cut down on //! the compile time a bit by removing some quality of life features. //! //! [`insta`]: https://docs.rs/insta #![cfg_attr(docsrs, feature(doc_cfg))] #[macro_use] mod macros; mod content; mod env; #[doc(hidden)] pub mod output; mod runtime; #[cfg(feature = "serde")] mod serialization; mod settings; mod snapshot; mod utils; #[cfg(feature = "redactions")] mod redaction; #[cfg(feature = "filters")] mod filters; #[cfg(feature = "glob")] mod glob; #[cfg(test)] mod test; pub use crate::settings::Settings; pub use crate::snapshot::{MetaData, Snapshot, TextSnapshotKind}; /// Exposes some library internals. /// /// You're unlikely to want to work with these objects but they /// are exposed for documentation primarily. /// /// This module does not follow the same stability guarantees as the rest of the crate and is not /// guaranteed to be compatible between minor versions. pub mod internals { pub use crate::content::Content; #[cfg(feature = "filters")] pub use crate::filters::Filters; pub use crate::runtime::AutoName; pub use crate::settings::SettingsBindDropGuard; pub use crate::snapshot::{MetaData, SnapshotContents}; #[cfg(feature = "redactions")] pub use crate::{ redaction::{ContentPath, Redaction}, settings::Redactions, }; } // exported for cargo-insta only #[doc(hidden)] #[cfg(feature = "_cargo_insta_internal")] pub mod _cargo_insta_support { pub use crate::{ content::Error as ContentError, env::{ get_pending_dir, Error as ToolConfigError, OutputBehavior, SnapshotUpdate, TestRunner, ToolConfig, UnreferencedSnapshots, }, output::SnapshotPrinter, snapshot::PendingInlineSnapshot, snapshot::SnapshotContents, snapshot::TextSnapshotContents, utils::get_cargo, utils::is_ci, }; } // useful for redactions #[cfg(feature = "redactions")] pub use crate::redaction::{dynamic_redaction, rounded_redaction, sorted_redaction}; // these are here to make the macros work #[doc(hidden)] pub mod _macro_support { pub use crate::content::Content; pub use crate::env::{get_cargo_workspace, Workspace}; pub use crate::runtime::{ assert_snapshot, with_allow_duplicates, AutoName, BinarySnapshotValue, InlineValue, SnapshotValue, }; pub use core::{file, line, module_path}; pub use std::{any, env, format, option_env, path, vec}; #[cfg(feature = "serde")] pub use crate::serialization::{serialize_value, SerializationFormat, SnapshotLocation}; #[cfg(feature = "glob")] pub use crate::glob::glob_exec; #[cfg(feature = "redactions")] pub use crate::{ redaction::Redaction, redaction::Selector, serialization::serialize_value_redacted, }; } insta-1.46.1/src/macros.rs000064400000000000000000000541221046102023000134600ustar 00000000000000/// Utility macro to return the name of the current function. #[doc(hidden)] #[macro_export] macro_rules! _function_name { () => {{ fn f() {} fn type_name_of_val(_: T) -> &'static str { $crate::_macro_support::any::type_name::() } let mut name = type_name_of_val(f).strip_suffix("::f").unwrap_or(""); while let Some(rest) = name.strip_suffix("::{{closure}}") { name = rest; } name }}; } #[doc(hidden)] #[macro_export] macro_rules! _get_workspace_root { () => {{ use $crate::_macro_support::{env, option_env}; // Note the `env!("CARGO_MANIFEST_DIR")` needs to be in the macro (in // contrast to a function in insta) because the macro needs to capture // the value in the caller library, an exclusive property of macros. // By default the `CARGO_MANIFEST_DIR` environment variable is used as the workspace root. // If the `INSTA_WORKSPACE_ROOT` environment variable is set at compile time it will override the default. // This can be useful to avoid including local paths in the binary. const WORKSPACE_ROOT: $crate::_macro_support::Workspace = if let Some(root) = option_env!("INSTA_WORKSPACE_ROOT") { $crate::_macro_support::Workspace::UseAsIs(root) } else { $crate::_macro_support::Workspace::DetectWithCargo(env!("CARGO_MANIFEST_DIR")) }; $crate::_macro_support::get_cargo_workspace(WORKSPACE_ROOT) }}; } /// Asserts a [`serde::Serialize`] snapshot in CSV format. /// /// **Feature:** `csv` (disabled by default) /// /// This works exactly like [`assert_yaml_snapshot!`](crate::assert_yaml_snapshot!) /// but serializes in [CSV](https://github.com/burntsushi/rust-csv) format instead of /// YAML. /// /// Example: /// /// ```no_run /// insta::assert_csv_snapshot!(vec![1, 2, 3]); /// ``` /// /// The third argument to the macro can be an object expression for redaction. /// It's in the form `{ selector => replacement }` or `match .. { selector => replacement }`. /// For more information about redactions refer to the [redactions feature in /// the guide](https://insta.rs/docs/redactions/). /// /// The snapshot name is optional but can be provided as first argument. #[cfg(feature = "csv")] #[cfg_attr(docsrs, doc(cfg(feature = "csv")))] #[macro_export] macro_rules! assert_csv_snapshot { ($($arg:tt)*) => { $crate::_assert_serialized_snapshot!(format=Csv, $($arg)*); }; } /// Asserts a [`serde::Serialize`] snapshot in TOML format. /// /// **Feature:** `toml` (disabled by default) /// /// This works exactly like [`assert_yaml_snapshot!`](crate::assert_yaml_snapshot!) /// but serializes in [TOML](https://github.com/alexcrichton/toml-rs) format instead of /// YAML. Note that TOML cannot represent all values due to limitations in the /// format. /// /// Example: /// /// ```no_run /// insta::assert_toml_snapshot!(vec![1, 2, 3]); /// ``` /// /// The third argument to the macro can be an object expression for redaction. /// It's in the form `{ selector => replacement }` or `match .. { selector => replacement }`. /// For more information about redactions refer to the [redactions feature in /// the guide](https://insta.rs/docs/redactions/). /// /// The snapshot name is optional but can be provided as first argument. #[cfg(feature = "toml")] #[cfg_attr(docsrs, doc(cfg(feature = "toml")))] #[macro_export] macro_rules! assert_toml_snapshot { ($($arg:tt)*) => { $crate::_assert_serialized_snapshot!(format=Toml, $($arg)*); }; } /// Asserts a [`serde::Serialize`] snapshot in YAML format. /// /// **Feature:** `yaml` /// /// The value needs to implement the [`serde::Serialize`] trait and the snapshot /// will be serialized in YAML format. This does mean that unlike the debug /// snapshot variant the type of the value does not appear in the output. /// You can however use the [`assert_ron_snapshot!`](crate::assert_ron_snapshot!) macro to dump out /// the value in [RON](https://github.com/ron-rs/ron/) format which retains some /// type information for more accurate comparisons. /// /// Example: /// /// ```no_run /// # use insta::*; /// assert_yaml_snapshot!(vec![1, 2, 3]); /// ``` /// /// Unlike the [`assert_debug_snapshot!`](crate::assert_debug_snapshot!) /// macro, this one has a secondary mode where redactions can be defined. /// /// The third argument to the macro can be an object expression for redaction. /// It's in the form `{ selector => replacement }` or `match .. { selector => replacement }`. /// For more information about redactions refer to the [redactions feature in /// the guide](https://insta.rs/docs/redactions/). /// /// Example: /// #[cfg_attr(feature = "redactions", doc = " ```no_run")] #[cfg_attr(not(feature = "redactions"), doc = " ```ignore")] /// # use insta::*; use serde::Serialize; /// # #[derive(Serialize)] struct Value; let value = Value; /// assert_yaml_snapshot!(value, { /// ".key.to.redact" => "[replacement value]", /// ".another.key.*.to.redact" => 42 /// }); /// ``` /// /// The replacement value can be a string, integer or any other primitive value. /// /// For inline usage the format is `(expression, @reference_value)` where the /// reference value must be a string literal. If you make the initial snapshot /// just use an empty string (`@""`). /// /// The snapshot name is optional but can be provided as first argument. #[cfg(feature = "yaml")] #[cfg_attr(docsrs, doc(cfg(feature = "yaml")))] #[macro_export] macro_rules! assert_yaml_snapshot { ($($arg:tt)*) => { $crate::_assert_serialized_snapshot!(format=Yaml, $($arg)*); }; } /// Asserts a [`serde::Serialize`] snapshot in RON format. /// /// **Feature:** `ron` (disabled by default) /// /// This works exactly like [`assert_yaml_snapshot!`](crate::assert_yaml_snapshot!) /// but serializes in [RON](https://github.com/ron-rs/ron/) format instead of /// YAML which retains some type information for more accurate comparisons. /// /// Example: /// /// ```no_run /// # use insta::*; /// assert_ron_snapshot!(vec![1, 2, 3]); /// ``` /// /// The third argument to the macro can be an object expression for redaction. /// It's in the form `{ selector => replacement }` or `match .. { selector => replacement }`. /// For more information about redactions refer to the [redactions feature in /// the guide](https://insta.rs/docs/redactions/). /// /// The snapshot name is optional but can be provided as first argument. #[cfg(feature = "ron")] #[cfg_attr(docsrs, doc(cfg(feature = "ron")))] #[macro_export] macro_rules! assert_ron_snapshot { ($($arg:tt)*) => { $crate::_assert_serialized_snapshot!(format=Ron, $($arg)*); }; } /// Asserts a [`serde::Serialize`] snapshot in JSON format. /// /// **Feature:** `json` /// /// This works exactly like [`assert_yaml_snapshot!`](crate::assert_yaml_snapshot!) but serializes in JSON format. /// This is normally not recommended because it makes diffs less reliable, but it can /// be useful for certain specialized situations. /// /// Example: /// /// ```no_run /// # use insta::*; /// assert_json_snapshot!(vec![1, 2, 3]); /// ``` /// /// The third argument to the macro can be an object expression for redaction. /// It's in the form `{ selector => replacement }` or `match .. { selector => replacement }`. /// For more information about redactions refer to the [redactions feature in /// the guide](https://insta.rs/docs/redactions/). /// /// The snapshot name is optional but can be provided as first argument. #[cfg(feature = "json")] #[cfg_attr(docsrs, doc(cfg(feature = "json")))] #[macro_export] macro_rules! assert_json_snapshot { ($($arg:tt)*) => { $crate::_assert_serialized_snapshot!(format=Json, $($arg)*); }; } /// Asserts a [`serde::Serialize`] snapshot in compact JSON format. /// /// **Feature:** `json` /// /// This works exactly like [`assert_json_snapshot!`](crate::assert_json_snapshot!) but serializes into a single /// line for as long as the output is less than 120 characters. This can be useful /// in cases where you are working with small result outputs but comes at the cost /// of slightly worse diffing behavior. /// /// Example: /// /// ```no_run /// # use insta::*; /// assert_compact_json_snapshot!(vec![1, 2, 3]); /// ``` /// /// The third argument to the macro can be an object expression for redaction. /// It's in the form `{ selector => replacement }` or `match .. { selector => replacement }`. /// For more information about redactions refer to the [redactions feature in /// the guide](https://insta.rs/docs/redactions/). /// /// The snapshot name is optional but can be provided as first argument. #[cfg(feature = "json")] #[cfg_attr(docsrs, doc(cfg(feature = "json")))] #[macro_export] macro_rules! assert_compact_json_snapshot { ($($arg:tt)*) => { $crate::_assert_serialized_snapshot!(format=JsonCompact, $($arg)*); }; } // This macro handles optional trailing commas. #[doc(hidden)] #[macro_export] macro_rules! _assert_serialized_snapshot { // If there are redaction expressions, capture the redactions expressions // and pass to `_assert_snapshot_base` (format=$format:ident, $value:expr, $(match ..)? {$($k:expr => $v:expr),* $(,)?} $($arg:tt)*) => {{ let transform = |value| { $crate::_prepare_snapshot_for_redaction!(value, {$($k => $v),*}, $format) }; $crate::_assert_snapshot_base!(transform=transform, $value $($arg)*); }}; // If there's a name, redaction expressions, and debug_expr, capture and pass all to `_assert_snapshot_base` (format=$format:ident, $name:expr, $value:expr, $(match ..)? {$($k:expr => $v:expr),* $(,)?}, $debug_expr:expr $(,)?) => {{ let transform = |value| { $crate::_prepare_snapshot_for_redaction!(value, {$($k => $v),*}, $format) }; $crate::_assert_snapshot_base!(transform=transform, $name, $value, $debug_expr); }}; // If there's a name and redaction expressions, capture and pass to `_assert_snapshot_base` (format=$format:ident, $name:expr, $value:expr, $(match ..)? {$($k:expr => $v:expr),* $(,)?} $(,)?) => {{ let transform = |value| { $crate::_prepare_snapshot_for_redaction!(value, {$($k => $v),*}, $format) }; $crate::_assert_snapshot_base!(transform=transform, $name, $value); }}; // Capture serialization function and pass to `_assert_snapshot_base` // (format=$format:ident, $($arg:tt)*) => {{ let transform = |value| {$crate::_macro_support::serialize_value( &value, $crate::_macro_support::SerializationFormat::$format, )}; $crate::_assert_snapshot_base!(transform=transform, $($arg)*); }}; } #[cfg(feature = "redactions")] #[doc(hidden)] #[macro_export] macro_rules! _prepare_snapshot_for_redaction { ($value:expr, {$($k:expr => $v:expr),*}, $format:ident) => { { let vec = $crate::_macro_support::vec![ $(( $crate::_macro_support::Selector::parse($k).unwrap(), $crate::_macro_support::Redaction::from($v) ),)* ]; $crate::_macro_support::serialize_value_redacted( &$value, &vec, $crate::_macro_support::SerializationFormat::$format, ) } } } #[cfg(not(feature = "redactions"))] #[doc(hidden)] #[macro_export] macro_rules! _prepare_snapshot_for_redaction { ($value:expr, {$($k:expr => $v:expr),*}, $format:ident) => { compile_error!( "insta was compiled without redactions support. Enable the `redactions` feature." ) }; } /// Asserts a [`Debug`] snapshot. /// /// The value needs to implement the [`Debug`] trait. This is useful for /// simple values that do not implement the [`serde::Serialize`] trait, but does not /// permit redactions. /// /// Debug is called with `"{:#?}"`, which means this uses pretty-print. #[macro_export] macro_rules! assert_debug_snapshot { ($($arg:tt)*) => { $crate::_assert_snapshot_base!(transform=|v| $crate::_macro_support::format!("{:#?}", v), $($arg)*) }; } /// Asserts a [`Debug`] snapshot in compact format. /// /// The value needs to implement the [`Debug`] trait. This is useful for /// simple values that do not implement the [`serde::Serialize`] trait, but does not /// permit redactions. /// /// Debug is called with `"{:?}"`, which means this does not use pretty-print. #[macro_export] macro_rules! assert_compact_debug_snapshot { ($($arg:tt)*) => { $crate::_assert_snapshot_base!(transform=|v| $crate::_macro_support::format!("{:?}", v), $($arg)*) }; } // A helper macro which takes a closure as `transform`, and runs the closure on // the value. This allows us to implement other macros with a small wrapper. All // snapshot macros eventually call this macro. // // This macro handles optional trailing commas. #[doc(hidden)] #[macro_export] macro_rules! _assert_snapshot_base { // If there's an inline literal value, wrap the literal in a // `ReferenceValue::Inline`, call self. (transform=$transform:expr, $($arg:expr),*, @$snapshot:literal $(,)?) => { $crate::_assert_snapshot_base!( transform = $transform, #[allow(clippy::needless_raw_string_hashes)] $crate::_macro_support::InlineValue($snapshot), $($arg),* ) }; // If there's no debug_expr, use the stringified value, call self. (transform=$transform:expr, $name:expr, $value:expr $(,)?) => { $crate::_assert_snapshot_base!(transform = $transform, $name, $value, stringify!($value)) }; // If there's no name (and necessarily no debug expr), auto generate the // name, call self. (transform=$transform:expr, $value:expr $(,)?) => { $crate::_assert_snapshot_base!( transform = $transform, $crate::_macro_support::AutoName, $value ) }; // The main macro body — every call to this macro should end up here. (transform=$transform:expr, $name:expr, $value:expr, $debug_expr:expr $(,)?) => { $crate::_macro_support::assert_snapshot( ( $name, #[allow(clippy::redundant_closure_call)] $transform(&$value).as_str(), ).into(), $crate::_get_workspace_root!().as_path(), $crate::_function_name!(), $crate::_macro_support::module_path!(), $crate::_macro_support::file!(), $crate::_macro_support::line!(), $debug_expr, ) .unwrap() }; } /// (Experimental) /// Asserts a binary snapshot in the form of a [`Vec`]. /// /// The contents get stored in a separate file next to the metadata file. The extension for this /// file must be passed as part of the name. For an implicit snapshot name just an extension can be /// passed starting with a `.`. /// /// This feature is considered experimental: we may make incompatible changes for the next couple /// of versions after 1.41. /// /// Examples: /// /// ```no_run /// // implicit name: /// insta::assert_binary_snapshot!(".txt", b"abcd".to_vec()); /// /// // named: /// insta::assert_binary_snapshot!("my_snapshot.bin", [0, 1, 2, 3].to_vec()); /// ``` #[macro_export] macro_rules! assert_binary_snapshot { ($name_and_extension:expr, $value:expr $(,)?) => { $crate::assert_binary_snapshot!($name_and_extension, $value, stringify!($value)); }; ($name_and_extension:expr, $value:expr, $debug_expr:expr $(,)?) => { $crate::_macro_support::assert_snapshot( $crate::_macro_support::BinarySnapshotValue { name_and_extension: $name_and_extension, content: $value, } .into(), $crate::_get_workspace_root!().as_path(), $crate::_function_name!(), $crate::_macro_support::module_path!(), $crate::_macro_support::file!(), $crate::_macro_support::line!(), $debug_expr, ) .unwrap() }; } /// Asserts a [`Display`](std::fmt::Display) snapshot. /// /// This is now deprecated, replaced by the more generic [`assert_snapshot!`](crate::assert_snapshot!) #[macro_export] #[deprecated = "use assert_snapshot!() instead"] macro_rules! assert_display_snapshot { ($($arg:tt)*) => { $crate::assert_snapshot!($($arg)*) }; } /// Asserts a [`String`] snapshot. /// /// This is the simplest of all assertion methods. /// It accepts any value that implements [`Display`](std::fmt::Display). /// /// ```no_run /// # use insta::*; /// // implicitly named /// assert_snapshot!("reference value to snapshot"); /// // named /// assert_snapshot!("snapshot_name", "reference value to snapshot"); /// // inline /// assert_snapshot!("reference value", @"reference value"); /// ``` /// /// Optionally a third argument can be given as an expression to be stringified /// as the debug expression. For more information on this, check out /// . #[macro_export] macro_rules! assert_snapshot { ($($arg:tt)*) => { $crate::_assert_snapshot_base!(transform=|v| $crate::_macro_support::format!("{}", v), $($arg)*) }; } /// Settings configuration macro. /// /// This macro lets you bind some [`Settings`](crate::Settings) temporarily. The first argument /// takes key value pairs that should be set, and the second is the block to /// execute. All settings can be set (`sort_maps => value` maps to `set_sort_maps(value)`). /// The exception are redactions, which can only be set to a vector this way. /// /// This example: /// /// ```rust /// insta::with_settings!({sort_maps => true}, { /// // run snapshot test here /// }); /// ``` /// /// Is equivalent to the following: /// /// ```rust /// # use insta::Settings; /// let mut settings = Settings::clone_current(); /// settings.set_sort_maps(true); /// settings.bind(|| { /// // run snapshot test here /// }); /// ``` /// /// Note: before insta 0.17, this macro used /// [`Settings::new`](crate::Settings::new) which meant that original settings /// were always reset rather than extended. #[macro_export] macro_rules! with_settings { ({$($k:ident => $v:expr),*$(,)?}, $body:block) => {{ let mut settings = $crate::Settings::clone_current(); $( settings._private_inner_mut().$k($v); )* settings.bind(|| $body) }} } /// Executes a closure for all input files matching a glob. /// /// The closure is passed the path to the file. You can use [`std::fs::read_to_string`] /// or similar functions to load the file and process it. /// /// ``` /// # use insta::{assert_snapshot, glob, Settings}; /// # let mut settings = Settings::clone_current(); /// # settings.set_allow_empty_glob(true); /// # let _dropguard = settings.bind_to_scope(); /// use std::fs; /// /// glob!("inputs/*.txt", |path| { /// let input = fs::read_to_string(path).unwrap(); /// assert_snapshot!(input.to_uppercase()); /// }); /// ``` /// /// The `INSTA_GLOB_FILTER` environment variable can be set to only execute certain files. /// The format of the filter is a semicolon separated filter. For instance by setting /// `INSTA_GLOB_FILTER` to `foo-*txt;bar-*.txt` only files starting with `foo-` or `bar-` /// end ending in `.txt` will be executed. When using `cargo-insta` the `--glob-filter` /// option can be used instead. /// /// Another effect of the globbing system is that snapshot failures within the glob macro /// are deferred until the end of of it. In other words this means that each snapshot /// assertion within the `glob!` block are reported. It can be disabled by setting /// `INSTA_GLOB_FAIL_FAST` environment variable to `1`. /// /// Note: Parent directory traversal patterns (e.g., "../**/*.rs") are not supported in the /// two-argument form of this macro currently. If you need to access parent /// directories, use the three-argument version of this macro instead. /// /// A three-argument version of this macro allows specifying a base directory /// for the glob to start in. This allows globbing in arbitrary directories, /// including parent directories: /// /// ``` /// # use insta::{assert_snapshot, glob, Settings}; /// # let mut settings = Settings::clone_current(); /// # settings.set_allow_empty_glob(true); /// # let _dropguard = settings.bind_to_scope(); /// use std::fs; /// /// glob!("../test_data", "inputs/*.txt", |path| { /// let input = fs::read_to_string(path).unwrap(); /// assert_snapshot!(input.to_uppercase()); /// }); /// ``` #[cfg(feature = "glob")] #[cfg_attr(docsrs, doc(cfg(feature = "glob")))] #[macro_export] macro_rules! glob { // TODO: I think we could remove the three-argument version of this macro // and just support a pattern such as // `glob!("../test_data/inputs/*.txt"...`. ($base_path:expr, $glob:expr, $closure:expr) => {{ use $crate::_macro_support::path::Path; let base = $crate::_get_workspace_root!() .join(Path::new(file!()).parent().unwrap()) .join($base_path) .to_path_buf(); // we try to canonicalize but on some platforms (eg: wasm) that might not work, so // we instead silently fall back. let base = base.canonicalize().unwrap_or_else(|_| base); $crate::_macro_support::glob_exec( $crate::_get_workspace_root!().as_path(), &base, $glob, $closure, ); }}; ($glob:expr, $closure:expr) => {{ $crate::glob!(".", $glob, $closure) }}; } /// Utility macro to permit a multi-snapshot run where all snapshots match. /// /// Within this block, insta will allow an assertion to be run more than once /// (even inline) without generating another snapshot. Instead it will assert /// that snapshot expressions visited more than once are matching. /// /// ```rust /// insta::allow_duplicates! { /// for x in (0..10).step_by(2) { /// let is_even = x % 2 == 0; /// insta::assert_debug_snapshot!(is_even, @"true"); /// } /// } /// ``` /// /// The first snapshot assertion will be used as a gold master and every further /// assertion will be checked against it. If they don't match the assertion will /// fail. #[macro_export] macro_rules! allow_duplicates { ($($x:tt)*) => { $crate::_macro_support::with_allow_duplicates(|| { $($x)* }) } } insta-1.46.1/src/output.rs000064400000000000000000000435301046102023000135350ustar 00000000000000use std::borrow::Cow; use std::process::Command; use std::{env, path::Path, time::Duration}; use similar::{Algorithm, ChangeTag, TextDiff}; use crate::content::yaml; use crate::snapshot::{MetaData, Snapshot, SnapshotContents}; use crate::utils::{format_rust_expression, style, term_width}; /// Snapshot printer utility. pub struct SnapshotPrinter<'a> { workspace_root: &'a Path, old_snapshot: Option<&'a Snapshot>, new_snapshot: &'a Snapshot, old_snapshot_hint: &'a str, new_snapshot_hint: &'a str, show_info: bool, show_diff: bool, title: Option<&'a str>, line: Option, snapshot_file: Option<&'a Path>, } impl<'a> SnapshotPrinter<'a> { pub fn new( workspace_root: &'a Path, old_snapshot: Option<&'a Snapshot>, new_snapshot: &'a Snapshot, ) -> SnapshotPrinter<'a> { SnapshotPrinter { workspace_root, old_snapshot, new_snapshot, old_snapshot_hint: "old snapshot", new_snapshot_hint: "new results", show_info: false, show_diff: false, title: None, line: None, snapshot_file: None, } } pub fn set_snapshot_hints(&mut self, old: &'a str, new: &'a str) { self.old_snapshot_hint = old; self.new_snapshot_hint = new; } pub fn set_show_info(&mut self, yes: bool) { self.show_info = yes; } pub fn set_show_diff(&mut self, yes: bool) { self.show_diff = yes; } pub fn set_title(&mut self, title: Option<&'a str>) { self.title = title; } pub fn set_line(&mut self, line: Option) { self.line = line; } pub fn set_snapshot_file(&mut self, file: Option<&'a Path>) { self.snapshot_file = file; } pub fn print(&self) { if let Some(title) = self.title { let width = term_width(); println!( "{title:━^width$}", title = style(format!(" {title} ")).bold(), width = width ); } self.print_snapshot_diff(); } fn print_snapshot_diff(&self) { self.print_snapshot_summary(); if self.show_diff { self.print_changeset(); } else { self.print_snapshot(); } } fn print_snapshot_summary(&self) { print_snapshot_summary( self.workspace_root, self.new_snapshot, self.snapshot_file, self.line, ); } fn print_info(&self) { print_info(self.new_snapshot.metadata()); } fn print_snapshot(&self) { print_line(term_width()); let width = term_width(); if self.show_info { self.print_info(); } println!("Snapshot Contents:"); match self.new_snapshot.contents() { SnapshotContents::Text(new_contents) => { let new_contents = new_contents.to_string(); println!("──────┬{:─^1$}", "", width.saturating_sub(7)); for (idx, line) in new_contents.lines().enumerate() { println!("{:>5} │ {}", style(idx + 1).cyan().dim().bold(), line); } println!("──────┴{:─^1$}", "", width.saturating_sub(7)); } SnapshotContents::Binary(_) => { println!( "{}", encode_file_link_escape( &self .new_snapshot .build_binary_path( self.snapshot_file.unwrap().with_extension("snap.new") ) .unwrap() ) ); } } } fn print_changeset(&self) { let width = term_width(); print_line(width); if self.show_info { self.print_info(); } if let Some(old_snapshot) = self.old_snapshot { if old_snapshot.contents().is_binary() { println!( "{}", style(format_args!( "-{}: {}", self.old_snapshot_hint, encode_file_link_escape( &old_snapshot .build_binary_path(self.snapshot_file.unwrap()) .unwrap() ), )) .red() ); } } if self.new_snapshot.contents().is_binary() { println!( "{}", style(format_args!( "+{}: {}", self.new_snapshot_hint, encode_file_link_escape( &self .new_snapshot .build_binary_path( self.snapshot_file.unwrap().with_extension("snap.new") ) .unwrap() ), )) .green() ); } if let Some((old, new)) = match ( self.old_snapshot.as_ref().map(|o| o.contents()), self.new_snapshot.contents(), ) { (Some(SnapshotContents::Binary(_)) | None, SnapshotContents::Text(new)) => { Some((None, Some(new.to_string()))) } (Some(SnapshotContents::Text(old)), SnapshotContents::Binary { .. }) => { Some((Some(old.to_string()), None)) } (Some(SnapshotContents::Text(old)), SnapshotContents::Text(new)) => { Some((Some(old.to_string()), Some(new.to_string()))) } _ => None, } { let old_text = old.as_deref().unwrap_or(""); let new_text = new.as_deref().unwrap_or(""); // Check for external diff tool if let Ok(tool) = env::var("INSTA_DIFF_TOOL") { if !tool.is_empty() && invoke_external_diff_tool(&tool, old_text, new_text, self.snapshot_file) { println!(); // Add spacing after external tool output return; } } let newlines_matter = newlines_matter(old_text, new_text); let diff = TextDiff::configure() .algorithm(Algorithm::Patience) .timeout(Duration::from_millis(500)) .diff_lines(old_text, new_text); if old.is_some() { println!( "{}", style(format_args!("-{}", self.old_snapshot_hint)).red() ); } if new.is_some() { println!( "{}", style(format_args!("+{}", self.new_snapshot_hint)).green() ); } println!("────────────┬{:─^1$}", "", width.saturating_sub(13)); // This is to make sure that binary and text snapshots are never reported as being // equal (that would otherwise happen if the text snapshot is an empty string). let mut has_changes = old.is_none() || new.is_none(); for (idx, group) in diff.grouped_ops(4).iter().enumerate() { if idx > 0 { println!("┈┈┈┈┈┈┈┈┈┈┈┈┼{:┈^1$}", "", width.saturating_sub(13)); } for op in group { for change in diff.iter_inline_changes(op) { match change.tag() { ChangeTag::Insert => { has_changes = true; print!( "{:>5} {:>5} │{}", "", style(change.new_index().unwrap() + 1).cyan().dim().bold(), style("+").green(), ); for &(emphasized, change) in change.values() { let change = render_invisible(change, newlines_matter); if emphasized { print!("{}", style(change).green().underlined()); } else { print!("{}", style(change).green()); } } } ChangeTag::Delete => { has_changes = true; print!( "{:>5} {:>5} │{}", style(change.old_index().unwrap() + 1).cyan().dim(), "", style("-").red(), ); for &(emphasized, change) in change.values() { let change = render_invisible(change, newlines_matter); if emphasized { print!("{}", style(change).red().underlined()); } else { print!("{}", style(change).red()); } } } ChangeTag::Equal => { print!( "{:>5} {:>5} │ ", style(change.old_index().unwrap() + 1).cyan().dim(), style(change.new_index().unwrap() + 1).cyan().dim().bold(), ); for &(_, change) in change.values() { let change = render_invisible(change, newlines_matter); print!("{}", style(change).dim()); } } } if change.missing_newline() { println!(); } } } } if !has_changes { println!( "{:>5} {:>5} │{}", "", style("-").dim(), style(" snapshots are matching").cyan(), ); } println!("────────────┴{:─^1$}", "", width.saturating_sub(13)); } } } /// Prints the summary of a snapshot pub fn print_snapshot_summary( workspace_root: &Path, snapshot: &Snapshot, snapshot_file: Option<&Path>, line: Option, ) { if let Some(snapshot_file) = snapshot_file { let snapshot_file = workspace_root .join(snapshot_file) .strip_prefix(workspace_root) .ok() .map(|x| x.to_path_buf()) .unwrap_or_else(|| snapshot_file.to_path_buf()); println!( "Snapshot file: {}", style(snapshot_file.display()).cyan().underlined() ); } if let Some(name) = snapshot.snapshot_name() { println!("Snapshot: {}", style(name).yellow()); } else { println!("Snapshot: {}", style("").dim()); } if let Some(ref value) = snapshot.metadata().get_relative_source(workspace_root) { println!( "Source: {}{}", style(value.display()).cyan(), line.or( // default to old assertion line from snapshot. snapshot.metadata().assertion_line() ) .map(|line| format!(":{}", style(line).bold())) .unwrap_or_default() ); } if let Some(ref value) = snapshot.metadata().input_file() { println!("Input file: {}", style(value).cyan()); } } fn print_line(width: usize) { println!("{:─^1$}", "", width); } fn trailing_newline(s: &str) -> &str { if s.ends_with("\r\n") { "\r\n" } else if s.ends_with('\r') { "\r" } else if s.ends_with('\n') { "\n" } else { "" } } fn detect_newlines(s: &str) -> (bool, bool, bool) { let mut last_char = None; let mut detected_crlf = false; let mut detected_cr = false; let mut detected_lf = false; for c in s.chars() { if c == '\n' { if last_char.take() == Some('\r') { detected_crlf = true; } else { detected_lf = true; } } if last_char == Some('\r') { detected_cr = true; } last_char = Some(c); } if last_char == Some('\r') { detected_cr = true; } (detected_cr, detected_crlf, detected_lf) } fn newlines_matter(left: &str, right: &str) -> bool { if trailing_newline(left) != trailing_newline(right) { return true; } let (cr1, crlf1, lf1) = detect_newlines(left); let (cr2, crlf2, lf2) = detect_newlines(right); !matches!( (cr1 || cr2, crlf1 || crlf2, lf1 || lf2), (false, false, false) | (true, false, false) | (false, true, false) | (false, false, true) ) } fn render_invisible(s: &str, newlines_matter: bool) -> Cow<'_, str> { if newlines_matter || s.find(&['\x1b', '\x07', '\x08', '\x7f'][..]).is_some() { Cow::Owned( s.replace('\r', "␍\r") .replace('\n', "␊\n") .replace("␍\r␊\n", "␍␊\r\n") .replace('\x07', "␇") .replace('\x08', "␈") .replace('\x1b', "␛") .replace('\x7f', "␡"), ) } else { Cow::Borrowed(s) } } fn print_info(metadata: &MetaData) { let width = term_width(); if let Some(expr) = metadata.expression() { println!("Expression: {}", style(format_rust_expression(expr))); print_line(width); } if let Some(descr) = metadata.description() { println!("{descr}"); print_line(width); } if let Some(info) = metadata.private_info() { let out = yaml::to_string(info); // TODO: does the yaml output always start with '---'? println!("{}", out.trim().strip_prefix("---").unwrap().trim_start()); print_line(width); } } /// Encodes a path as an OSC-8 escape sequence. This makes it a clickable link in supported /// terminal emulators. fn encode_file_link_escape(path: &Path) -> String { assert!(path.is_absolute()); format!( "\x1b]8;;file://{}\x1b\\{}\x1b]8;;\x1b\\", path.display(), path.display() ) } /// Invokes an external diff tool with the old and new snapshot contents. /// /// Returns `true` if the external tool was successfully invoked, `false` if it failed /// (in which case the caller should fall back to the built-in diff). /// /// This function is public for testing purposes. #[doc(hidden)] pub fn invoke_external_diff_tool( tool: &str, old_content: &str, new_content: &str, snapshot_file: Option<&Path>, ) -> bool { let dir = match tempfile::tempdir() { Ok(dir) => dir, Err(err) => { eprintln!("warning: failed to create temp dir for diff tool: {err}"); return false; } }; // Use snapshot file stem for naming (helps diff tools with syntax detection). // Fall back to generic name - these are ephemeral temp files anyway. let base_name = snapshot_file .and_then(|p| p.file_stem()) .and_then(|s| s.to_str()) .unwrap_or("snapshot"); let old_path = dir.path().join(format!("{base_name}.old.snap")); let new_path = dir.path().join(format!("{base_name}.new.snap")); // Write old content if let Err(err) = std::fs::write(&old_path, old_content) { eprintln!("warning: failed to write old snapshot to temp file: {err}"); return false; } // Write new content if let Err(err) = std::fs::write(&new_path, new_content) { eprintln!("warning: failed to write new snapshot to temp file: {err}"); return false; } // Invoke the diff tool from the temp directory so paths are relative/clean. // We capture stdout/stderr and print them ourselves so the output goes through // the same capture mechanism as the built-in diff (important for cargo test). let old_filename = old_path.file_name().unwrap(); let new_filename = new_path.file_name().unwrap(); // Split tool string to support arguments (e.g., "delta --side-by-side") let mut parts = tool.split_whitespace(); let cmd = match parts.next() { Some(cmd) => cmd, None => return false, }; let mut command = Command::new(cmd); command.args(parts); command.current_dir(dir.path()); command.arg(old_filename); command.arg(new_filename); match command.output() { Ok(output) => { // Print captured output through normal channels so it gets captured // by cargo test when appropriate (just like the built-in diff) if !output.stdout.is_empty() { print!("{}", String::from_utf8_lossy(&output.stdout)); } if !output.stderr.is_empty() { eprint!("{}", String::from_utf8_lossy(&output.stderr)); } // Non-zero exit is normal for diff tools when files differ true } Err(err) => { eprintln!("warning: failed to invoke diff tool `{tool}`: {err}"); false } } // Temp dir is cleaned up when `dir` goes out of scope } #[test] fn test_invisible() { assert_eq!( render_invisible("\r\n\x1b\r\x07\x08\x7f\n", true), "␍␊\r\n␛␍\r␇␈␡␊\n" ); } insta-1.46.1/src/redaction.rs000064400000000000000000000453571046102023000141560ustar 00000000000000use pest::Parser; use pest_derive::Parser; use std::borrow::Cow; use std::fmt; use crate::content::Content; #[derive(Debug)] pub struct SelectorParseError(Box>); impl SelectorParseError { /// Return the column of where the error occurred. pub fn column(&self) -> usize { match self.0.line_col { pest::error::LineColLocation::Pos((_, col)) => col, pest::error::LineColLocation::Span((_, col), _) => col, } } } /// Represents a path for a callback function. /// /// This can be converted into a string with `to_string` to see a stringified /// path that the selector matched. #[derive(Clone, Debug)] #[cfg_attr(docsrs, doc(cfg(feature = "redactions")))] pub struct ContentPath<'a>(&'a [PathItem]); impl fmt::Display for ContentPath<'_> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { for item in self.0.iter() { write!(f, ".")?; match *item { PathItem::Content(ref ctx) => { if let Some(s) = ctx.as_str() { write!(f, "{s}")?; } else { write!(f, "")?; } } PathItem::Field(name) => write!(f, "{name}")?, PathItem::Index(idx, _) => write!(f, "{idx}")?, } } Ok(()) } } /// Replaces a value with another one. /// /// Represents a redaction. #[cfg_attr(docsrs, doc(cfg(feature = "redactions")))] pub enum Redaction { /// Static redaction with new content. Static(Content), /// Redaction with new content. Dynamic(Box) -> Content + Sync + Send>), } macro_rules! impl_from { ($ty:ty) => { impl From<$ty> for Redaction { fn from(value: $ty) -> Redaction { Redaction::Static(Content::from(value)) } } }; } impl_from!(()); impl_from!(bool); impl_from!(u8); impl_from!(u16); impl_from!(u32); impl_from!(u64); impl_from!(i8); impl_from!(i16); impl_from!(i32); impl_from!(i64); impl_from!(f32); impl_from!(f64); impl_from!(char); impl_from!(String); impl_from!(Vec); impl<'a> From<&'a str> for Redaction { fn from(value: &'a str) -> Redaction { Redaction::Static(Content::from(value)) } } impl<'a> From<&'a [u8]> for Redaction { fn from(value: &'a [u8]) -> Redaction { Redaction::Static(Content::from(value)) } } /// Creates a dynamic redaction. /// /// This can be used to redact a value with a different value but instead of /// statically declaring it a dynamic value can be computed. This can also /// be used to perform assertions before replacing the value. /// /// The closure is passed two arguments: the value as [`Content`] /// and the path that was selected (as [`ContentPath`]) /// /// Example: /// /// ```rust /// # use insta::{Settings, dynamic_redaction}; /// # let mut settings = Settings::new(); /// settings.add_redaction(".id", dynamic_redaction(|value, path| { /// assert_eq!(path.to_string(), ".id"); /// assert_eq!( /// value /// .as_str() /// .unwrap() /// .chars() /// .filter(|&c| c == '-') /// .count(), /// 4 /// ); /// "[uuid]" /// })); /// ``` #[cfg_attr(docsrs, doc(cfg(feature = "redactions")))] pub fn dynamic_redaction(func: F) -> Redaction where I: Into, F: Fn(Content, ContentPath<'_>) -> I + Send + Sync + 'static, { Redaction::Dynamic(Box::new(move |c, p| func(c, p).into())) } /// Creates a dynamic redaction that sorts the value at the selector. /// /// This is useful to force something like a set or map to be ordered to make /// it deterministic. This is necessary as insta's serialization support is /// based on [`serde`] which does not have native set support. As a result vectors /// (which need to retain order) and sets (which should be given a stable order) /// look the same. /// /// ```rust /// # use insta::{Settings, sorted_redaction}; /// # let mut settings = Settings::new(); /// settings.add_redaction(".flags", sorted_redaction()); /// ``` #[cfg_attr(docsrs, doc(cfg(feature = "redactions")))] pub fn sorted_redaction() -> Redaction { fn sort(mut value: Content, _path: ContentPath) -> Content { match value.resolve_inner_mut() { Content::Seq(ref mut val) => { val.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal)) } Content::Map(ref mut val) => { val.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal)) } Content::Struct(_, ref mut fields) | Content::StructVariant(_, _, _, ref mut fields) => { fields.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal)) } _ => {} } value } dynamic_redaction(sort) } /// Creates a redaction that rounds floating point numbers to a given /// number of decimal places. /// /// ```rust /// # use insta::{Settings, rounded_redaction}; /// # let mut settings = Settings::new(); /// settings.add_redaction(".sum", rounded_redaction(2)); /// ``` #[cfg_attr(docsrs, doc(cfg(feature = "redactions")))] pub fn rounded_redaction(decimals: usize) -> Redaction { dynamic_redaction(move |value: Content, _path: ContentPath| -> Content { let f = match value.resolve_inner() { Content::F32(f) => *f as f64, Content::F64(f) => *f, _ => return value, }; let x = 10f64.powf(decimals as f64); Content::F64((f * x).round() / x) }) } impl Redaction { /// Performs the redaction of the value at the given path. fn redact(&self, value: Content, path: &[PathItem]) -> Content { match *self { Redaction::Static(ref new_val) => new_val.clone(), Redaction::Dynamic(ref callback) => callback(value, ContentPath(path)), } } } #[derive(Parser)] #[grammar = "select_grammar.pest"] pub struct SelectParser; #[derive(Debug)] pub enum PathItem { Content(Content), Field(&'static str), Index(u64, u64), } impl PathItem { fn as_str(&self) -> Option<&str> { match *self { PathItem::Content(ref content) => content.as_str(), PathItem::Field(s) => Some(s), PathItem::Index(..) => None, } } fn as_u64(&self) -> Option { match *self { PathItem::Content(ref content) => content.as_u64(), PathItem::Field(_) => None, PathItem::Index(idx, _) => Some(idx), } } fn range_check(&self, start: Option, end: Option) -> bool { fn expand_range(sel: i64, len: i64) -> i64 { if sel < 0 { (len + sel).max(0) } else { sel } } let (idx, len) = match *self { PathItem::Index(idx, len) => (idx as i64, len as i64), _ => return false, }; match (start, end) { (None, None) => true, (None, Some(end)) => idx < expand_range(end, len), (Some(start), None) => idx >= expand_range(start, len), (Some(start), Some(end)) => { idx >= expand_range(start, len) && idx < expand_range(end, len) } } } } #[derive(Debug, Clone, PartialEq, Eq)] pub enum Segment<'a> { DeepWildcard, Wildcard, Key(Cow<'a, str>), Index(u64), Range(Option, Option), } #[derive(Debug, Clone)] pub struct Selector<'a> { selectors: Vec>>, } impl<'a> Selector<'a> { pub fn parse(selector: &'a str) -> Result, SelectorParseError> { let pair = SelectParser::parse(Rule::selectors, selector) .map_err(Box::new) .map_err(SelectorParseError)? .next() .unwrap(); let mut rv = vec![]; for selector_pair in pair.into_inner() { match selector_pair.as_rule() { Rule::EOI => break, other => assert_eq!(other, Rule::selector), } let mut segments = vec![]; let mut have_deep_wildcard = false; for segment_pair in selector_pair.into_inner() { segments.push(match segment_pair.as_rule() { Rule::identity => continue, Rule::wildcard => Segment::Wildcard, Rule::deep_wildcard => { if have_deep_wildcard { return Err(SelectorParseError(Box::new( pest::error::Error::new_from_span( pest::error::ErrorVariant::CustomError { message: "deep wildcard used twice".into(), }, segment_pair.as_span(), ), ))); } have_deep_wildcard = true; Segment::DeepWildcard } Rule::key => Segment::Key(Cow::Borrowed(&segment_pair.as_str()[1..])), Rule::subscript => { let subscript_rule = segment_pair.into_inner().next().unwrap(); match subscript_rule.as_rule() { Rule::int => Segment::Index(subscript_rule.as_str().parse().unwrap()), Rule::string => { let sq = subscript_rule.as_str(); let s = &sq[1..sq.len() - 1]; let mut was_backslash = false; Segment::Key(if s.bytes().any(|x| x == b'\\') { Cow::Owned( s.chars() .filter_map(|c| { let rv = match c { '\\' if !was_backslash => { was_backslash = true; return None; } other => other, }; was_backslash = false; Some(rv) }) .collect(), ) } else { Cow::Borrowed(s) }) } _ => unreachable!(), } } Rule::full_range => Segment::Range(None, None), Rule::range => { let mut int_rule = segment_pair .into_inner() .map(|x| x.as_str().parse().unwrap()); Segment::Range(int_rule.next(), int_rule.next()) } Rule::range_to => { let int_rule = segment_pair.into_inner().next().unwrap(); Segment::Range(None, int_rule.as_str().parse().ok()) } Rule::range_from => { let int_rule = segment_pair.into_inner().next().unwrap(); Segment::Range(int_rule.as_str().parse().ok(), None) } _ => unreachable!(), }); } rv.push(segments); } Ok(Selector { selectors: rv }) } pub fn make_static(self) -> Selector<'static> { Selector { selectors: self .selectors .into_iter() .map(|parts| { parts .into_iter() .map(|x| match x { Segment::Key(x) => Segment::Key(Cow::Owned(x.into_owned())), Segment::Index(x) => Segment::Index(x), Segment::Wildcard => Segment::Wildcard, Segment::DeepWildcard => Segment::DeepWildcard, Segment::Range(a, b) => Segment::Range(a, b), }) .collect() }) .collect(), } } fn segment_is_match(&self, segment: &Segment, element: &PathItem) -> bool { match *segment { Segment::Wildcard => true, Segment::DeepWildcard => true, Segment::Key(ref k) => element.as_str() == Some(k), Segment::Index(i) => element.as_u64() == Some(i), Segment::Range(start, end) => element.range_check(start, end), } } fn selector_is_match(&self, selector: &[Segment], path: &[PathItem]) -> bool { if let Some(idx) = selector.iter().position(|x| *x == Segment::DeepWildcard) { let forward_sel = &selector[..idx]; let backward_sel = &selector[idx + 1..]; if path.len() <= idx { return false; } for (segment, element) in forward_sel.iter().zip(path.iter()) { if !self.segment_is_match(segment, element) { return false; } } for (segment, element) in backward_sel.iter().rev().zip(path.iter().rev()) { if !self.segment_is_match(segment, element) { return false; } } true } else { if selector.len() != path.len() { return false; } for (segment, element) in selector.iter().zip(path.iter()) { if !self.segment_is_match(segment, element) { return false; } } true } } pub fn is_match(&self, path: &[PathItem]) -> bool { for selector in &self.selectors { if self.selector_is_match(selector, path) { return true; } } false } pub fn redact(&self, value: Content, redaction: &Redaction) -> Content { self.redact_impl(value, redaction, &mut vec![]) } fn redact_seq( &self, seq: Vec, redaction: &Redaction, path: &mut Vec, ) -> Vec { let len = seq.len(); seq.into_iter() .enumerate() .map(|(idx, value)| { path.push(PathItem::Index(idx as u64, len as u64)); let new_value = self.redact_impl(value, redaction, path); path.pop(); new_value }) .collect() } fn redact_struct( &self, seq: Vec<(&'static str, Content)>, redaction: &Redaction, path: &mut Vec, ) -> Vec<(&'static str, Content)> { seq.into_iter() .map(|(key, value)| { path.push(PathItem::Field(key)); let new_value = self.redact_impl(value, redaction, path); path.pop(); (key, new_value) }) .collect() } fn redact_impl( &self, value: Content, redaction: &Redaction, path: &mut Vec, ) -> Content { if self.is_match(path) { redaction.redact(value, path) } else { match value { Content::Map(map) => Content::Map( map.into_iter() .map(|(key, value)| { path.push(PathItem::Field("$key")); let new_key = self.redact_impl(key.clone(), redaction, path); path.pop(); path.push(PathItem::Content(key)); let new_value = self.redact_impl(value, redaction, path); path.pop(); (new_key, new_value) }) .collect(), ), Content::Seq(seq) => Content::Seq(self.redact_seq(seq, redaction, path)), Content::Tuple(seq) => Content::Tuple(self.redact_seq(seq, redaction, path)), Content::TupleStruct(name, seq) => { Content::TupleStruct(name, self.redact_seq(seq, redaction, path)) } Content::TupleVariant(name, variant_index, variant, seq) => Content::TupleVariant( name, variant_index, variant, self.redact_seq(seq, redaction, path), ), Content::Struct(name, seq) => { Content::Struct(name, self.redact_struct(seq, redaction, path)) } Content::StructVariant(name, variant_index, variant, seq) => { Content::StructVariant( name, variant_index, variant, self.redact_struct(seq, redaction, path), ) } Content::NewtypeStruct(name, inner) => Content::NewtypeStruct( name, Box::new(self.redact_impl(*inner, redaction, path)), ), Content::NewtypeVariant(name, index, variant_name, inner) => { Content::NewtypeVariant( name, index, variant_name, Box::new(self.redact_impl(*inner, redaction, path)), ) } Content::Some(contents) => { Content::Some(Box::new(self.redact_impl(*contents, redaction, path))) } other => other, } } } } #[test] fn test_range_checks() { use similar_asserts::assert_eq; assert_eq!(PathItem::Index(0, 10).range_check(None, Some(-1)), true); assert_eq!(PathItem::Index(9, 10).range_check(None, Some(-1)), false); assert_eq!(PathItem::Index(0, 10).range_check(Some(1), Some(-1)), false); assert_eq!(PathItem::Index(1, 10).range_check(Some(1), Some(-1)), true); assert_eq!(PathItem::Index(9, 10).range_check(Some(1), Some(-1)), false); assert_eq!(PathItem::Index(0, 10).range_check(Some(1), None), false); assert_eq!(PathItem::Index(1, 10).range_check(Some(1), None), true); assert_eq!(PathItem::Index(9, 10).range_check(Some(1), None), true); } insta-1.46.1/src/runtime.rs000064400000000000000000001046301046102023000136570ustar 00000000000000use std::cell::RefCell; use std::collections::{BTreeMap, BTreeSet}; use std::error::Error; use std::fs; use std::io::ErrorKind; use std::path::{Path, PathBuf}; use std::rc::Rc; use std::str; use std::sync::{Arc, Mutex}; use std::{borrow::Cow, env}; use crate::settings::Settings; use crate::snapshot::{ MetaData, PendingInlineSnapshot, Snapshot, SnapshotContents, SnapshotKind, TextSnapshotContents, }; use crate::utils::{path_to_storage, style}; use crate::{env::get_tool_config, output::SnapshotPrinter}; use crate::{ env::{ memoize_snapshot_file, pending_snapshot_path, snapshot_update_behavior, OutputBehavior, SnapshotUpdateBehavior, ToolConfig, }, snapshot::TextSnapshotKind, }; use once_cell::sync::Lazy; static TEST_NAME_COUNTERS: Lazy>> = Lazy::new(|| Mutex::new(BTreeMap::new())); static TEST_NAME_CLASH_DETECTION: Lazy>> = Lazy::new(|| Mutex::new(BTreeMap::new())); static INLINE_DUPLICATES: Lazy>> = Lazy::new(|| Mutex::new(BTreeSet::new())); thread_local! { static RECORDED_DUPLICATES: RefCell>> = RefCell::default() } // Writes to stderr and also to a warnings file (if INSTA_WARNINGS_FILE is set). // The warnings file allows cargo-insta to display warnings after tests complete, // since test runners like nextest suppress output from passing tests by default. #[macro_export] macro_rules! elog { ($($arg:tt)*) => {{ use std::io::Write as _; let msg = format!($($arg)*); let _ = writeln!(std::io::stderr(), "{}", msg); $crate::env::memoize_warning(&msg); }}; } #[cfg(feature = "glob")] macro_rules! print_or_panic { ($fail_fast:expr, $($tokens:tt)*) => {{ if (!$fail_fast) { eprintln!($($tokens)*); eprintln!(); } else { panic!($($tokens)*); } }} } /// Special marker to use an automatic name. /// /// This can be passed as a snapshot name in a macro to explicitly tell /// insta to use the automatic name. This is useful in ambiguous syntax /// situations. #[derive(Debug)] pub struct AutoName; pub struct InlineValue<'a>(pub &'a str); /// The name of a snapshot, from which the path is derived. type SnapshotName<'a> = Option>; pub struct BinarySnapshotValue<'a> { pub name_and_extension: &'a str, pub content: Vec, } pub enum SnapshotValue<'a> { /// A text snapshot that gets stored along with the metadata in the same file. FileText { name: SnapshotName<'a>, /// The new generated value to compare against any previously approved content. content: &'a str, }, /// An inline snapshot. InlineText { /// The reference content from the macro invocation that will be compared against. reference_content: &'a str, /// The new generated value to compare against any previously approved content. content: &'a str, }, /// A binary snapshot that gets stored as a separate file next to the metadata file. Binary { name: SnapshotName<'a>, /// The new generated value to compare against any previously approved content. content: Vec, /// The extension of the separate file. extension: &'a str, }, } impl<'a> From<(AutoName, &'a str)> for SnapshotValue<'a> { fn from((_, content): (AutoName, &'a str)) -> Self { SnapshotValue::FileText { name: None, content, } } } impl<'a> From<(Option, &'a str)> for SnapshotValue<'a> { fn from((name, content): (Option, &'a str)) -> Self { SnapshotValue::FileText { name: name.map(Cow::Owned), content, } } } impl<'a> From<(String, &'a str)> for SnapshotValue<'a> { fn from((name, content): (String, &'a str)) -> Self { SnapshotValue::FileText { name: Some(Cow::Owned(name)), content, } } } impl<'a> From<(Option<&'a str>, &'a str)> for SnapshotValue<'a> { fn from((name, content): (Option<&'a str>, &'a str)) -> Self { SnapshotValue::FileText { name: name.map(Cow::Borrowed), content, } } } impl<'a> From<(&'a str, &'a str)> for SnapshotValue<'a> { fn from((name, content): (&'a str, &'a str)) -> Self { SnapshotValue::FileText { name: Some(Cow::Borrowed(name)), content, } } } impl<'a> From<(InlineValue<'a>, &'a str)> for SnapshotValue<'a> { fn from((InlineValue(reference_content), content): (InlineValue<'a>, &'a str)) -> Self { SnapshotValue::InlineText { reference_content, content, } } } impl<'a> From> for SnapshotValue<'a> { fn from( BinarySnapshotValue { name_and_extension, content, }: BinarySnapshotValue<'a>, ) -> Self { let (name, extension) = name_and_extension.split_once('.').unwrap_or_else(|| { panic!("\"{name_and_extension}\" does not match the format \"name.extension\"",) }); let name = if name.is_empty() { None } else { Some(Cow::Borrowed(name)) }; SnapshotValue::Binary { name, extension, content, } } } fn is_doctest(function_name: &str) -> bool { function_name.starts_with("rust_out::main::_doctest") } fn detect_snapshot_name(function_name: &str, module_path: &str) -> Result { // clean test name first let name = function_name.rsplit("::").next().unwrap(); let (name, test_prefixed) = if let Some(stripped) = name.strip_prefix("test_") { (stripped, true) } else { (name, false) }; // next check if we need to add a suffix let name = add_suffix_to_snapshot_name(Cow::Borrowed(name)); let key = format!("{}::{}", module_path.replace("::", "__"), name); // because fn foo and fn test_foo end up with the same snapshot name we // make sure we detect this here and raise an error. let mut name_clash_detection = TEST_NAME_CLASH_DETECTION .lock() .unwrap_or_else(|x| x.into_inner()); match name_clash_detection.get(&key) { None => { name_clash_detection.insert(key.clone(), test_prefixed); } Some(&was_test_prefixed) => { if was_test_prefixed != test_prefixed { panic!( "Insta snapshot name clash detected between '{name}' \ and 'test_{name}' in '{module_path}'. Rename one function." ); } } } // The rest of the code just deals with duplicates, which we in some // cases do not want to guard against. if allow_duplicates() { return Ok(name.to_string()); } // if the snapshot name clashes we need to increment a counter. // we really do not care about poisoning here. let mut counters = TEST_NAME_COUNTERS.lock().unwrap_or_else(|x| x.into_inner()); let test_idx = counters.get(&key).cloned().unwrap_or(0) + 1; let rv = if test_idx == 1 { name.to_string() } else { format!("{name}-{test_idx}") }; counters.insert(key, test_idx); Ok(rv) } /// If there is a suffix on the settings, append it to the snapshot name. fn add_suffix_to_snapshot_name(name: Cow<'_, str>) -> Cow<'_, str> { Settings::with(|settings| { settings .snapshot_suffix() .map(|suffix| Cow::Owned(format!("{name}@{suffix}"))) .unwrap_or_else(|| name) }) } fn get_snapshot_filename( module_path: &str, assertion_file: &str, snapshot_name: &str, cargo_workspace: &Path, is_doctest: bool, ) -> PathBuf { let root = Path::new(cargo_workspace); let base = Path::new(assertion_file); Settings::with(|settings| { root.join(base.parent().unwrap()) .join(settings.snapshot_path()) .join({ use std::fmt::Write; let mut f = String::new(); if settings.prepend_module_to_snapshot() { if is_doctest { write!( &mut f, "doctest_{}__", base.file_name() .unwrap() .to_string_lossy() .replace('.', "_") ) .unwrap(); } else { write!(&mut f, "{}__", module_path.replace("::", "__")).unwrap(); } } write!( &mut f, "{}.snap", snapshot_name.replace(&['/', '\\'][..], "__") ) .unwrap(); f }) }) } /// The context around a snapshot, such as the reference value, location, etc. /// (but not including the generated value). Responsible for saving the /// snapshot. #[derive(Debug)] struct SnapshotAssertionContext<'a> { tool_config: Arc, workspace: &'a Path, module_path: &'a str, snapshot_name: Option>, snapshot_file: Option, duplication_key: Option, old_snapshot: Option, pending_snapshots_path: Option, assertion_file: &'a str, assertion_line: u32, is_doctest: bool, snapshot_kind: SnapshotKind, } impl<'a> SnapshotAssertionContext<'a> { fn prepare( new_snapshot_value: &SnapshotValue<'a>, workspace: &'a Path, function_name: &'a str, module_path: &'a str, assertion_file: &'a str, assertion_line: u32, ) -> Result, Box> { let tool_config = get_tool_config(workspace); let snapshot_name; let mut duplication_key = None; let mut snapshot_file = None; let mut old_snapshot = None; let mut pending_snapshots_path = None; let is_doctest = is_doctest(function_name); match new_snapshot_value { SnapshotValue::FileText { name, .. } | SnapshotValue::Binary { name, .. } => { let name = match &name { Some(name) => add_suffix_to_snapshot_name(name.clone()), None => { if is_doctest { panic!("Cannot determine reliable names for snapshot in doctests. Please use explicit names instead."); } detect_snapshot_name(function_name, module_path) .unwrap() .into() } }; if allow_duplicates() { duplication_key = Some(format!("named:{module_path}|{name}")); } let file = get_snapshot_filename( module_path, assertion_file, &name, workspace, is_doctest, ); if fs::metadata(&file).is_ok() { match Snapshot::from_file(&file) { Ok(snapshot) => { old_snapshot = Some(snapshot); } Err(err) => { // If we can't parse the snapshot (e.g., invalid YAML, // merge conflicts, truncated file), log a warning and // proceed. The test will generate a new pending snapshot. elog!( "{}: Failed to parse snapshot file; \ a new snapshot will be generated: {}\n Error: {}", style("warning").yellow().bold(), file.display(), err ); } } } snapshot_name = Some(name); snapshot_file = Some(file); } SnapshotValue::InlineText { reference_content: contents, .. } => { if allow_duplicates() { duplication_key = Some(format!( "inline:{function_name}|{assertion_file}|{assertion_line}" )); } else { prevent_inline_duplicate(function_name, assertion_file, assertion_line); } snapshot_name = detect_snapshot_name(function_name, module_path) .ok() .map(Cow::Owned); let mut pending_file = workspace.join(assertion_file); pending_file.set_file_name(format!( ".{}.pending-snap", pending_file .file_name() .expect("no filename") .to_str() .expect("non unicode filename") )); pending_snapshots_path = Some(pending_file); old_snapshot = Some(Snapshot::from_components( module_path.replace("::", "__"), None, MetaData::default(), SnapshotContents::Text(TextSnapshotContents::from_inline_literal(contents)), )); } }; let snapshot_type = match new_snapshot_value { SnapshotValue::FileText { .. } | SnapshotValue::InlineText { .. } => SnapshotKind::Text, &SnapshotValue::Binary { extension, .. } => SnapshotKind::Binary { extension: extension.to_string(), }, }; Ok(SnapshotAssertionContext { tool_config, workspace, module_path, snapshot_name, snapshot_file, old_snapshot, pending_snapshots_path, assertion_file, assertion_line, duplication_key, is_doctest, snapshot_kind: snapshot_type, }) } /// Given a path returns the local path within the workspace. pub fn localize_path(&self, p: &Path) -> Option { let workspace = self.workspace.canonicalize().ok()?; let p = self.workspace.join(p).canonicalize().ok()?; p.strip_prefix(&workspace).ok().map(|x| x.to_path_buf()) } /// Creates the new snapshot from input values. pub fn new_snapshot(&self, contents: SnapshotContents, expr: &str) -> Snapshot { assert_eq!( contents.is_binary(), matches!(self.snapshot_kind, SnapshotKind::Binary { .. }) ); Snapshot::from_components( self.module_path.replace("::", "__"), self.snapshot_name.as_ref().map(|x| x.to_string()), Settings::with(|settings| MetaData { source: { let source_path = Path::new(self.assertion_file); // We need to compute a relative path from the workspace to the source file. // This is necessary for workspace setups where the project is not a direct // child of the workspace root (e.g., when workspace and project are siblings). // We canonicalize paths first to properly handle symlinks. let canonicalized_base = self.workspace.canonicalize().ok(); let canonicalized_path = source_path.canonicalize().ok(); let relative = if let (Some(base), Some(path)) = (canonicalized_base, canonicalized_path) { path_relative_from(&path, &base) .unwrap_or_else(|| source_path.to_path_buf()) } else { // If canonicalization fails, try with original paths path_relative_from(source_path, self.workspace) .unwrap_or_else(|| source_path.to_path_buf()) }; Some(path_to_storage(&relative)) }, assertion_line: Some(self.assertion_line), description: settings.description().map(Into::into), expression: if settings.omit_expression() { None } else { Some(expr.to_string()) }, info: settings.info().map(ToOwned::to_owned), input_file: settings .input_file() .and_then(|x| self.localize_path(x)) .map(|x| path_to_storage(&x)), snapshot_kind: self.snapshot_kind.clone(), }), contents, ) } /// Cleanup logic for passing snapshots. pub fn cleanup_passing(&self) -> Result<(), Box> { // let's just make sure there are no more pending files lingering // around. if let Some(ref snapshot_file) = self.snapshot_file { let target_path = pending_snapshot_path(self.workspace, snapshot_file); let new_file = target_path.with_extension("snap.new"); fs::remove_file(new_file).ok(); } // and add a null pending snapshot to a pending snapshot file if needed if let Some(ref pending_snapshots) = self.pending_snapshots_path { let target_path = pending_snapshot_path(self.workspace, pending_snapshots); if fs::metadata(&target_path).is_ok() { PendingInlineSnapshot::new(None, None, self.assertion_line).save(&target_path)?; } } Ok(()) } /// Removes any old .snap.new.* files that belonged to previous pending snapshots. This should /// only ever remove maximum one file because we do this every time before we create a new /// pending snapshot. pub fn cleanup_previous_pending_binary_snapshots(&self) -> Result<(), Box> { if let Some(ref path) = self.snapshot_file { // Use pending directory if set let target_path = pending_snapshot_path(self.workspace, path); // The file name to compare against has to be valid utf-8 as it is generated by this crate // out of utf-8 strings. let file_name_prefix = format!( "{}.new.", target_path.file_name().unwrap().to_str().unwrap() ); let read_dir = target_path.parent().unwrap().read_dir(); match read_dir { Err(e) if e.kind() == ErrorKind::NotFound => return Ok(()), _ => (), } // We have to loop over where whole directory here because there is no filesystem API // for getting files by prefix. for entry in read_dir? { let entry = entry?; let entry_file_name = entry.file_name(); // We'll just skip over files with non-utf-8 names. The assumption being that those // would not have been generated by this crate. if entry_file_name .to_str() .map(|f| f.starts_with(&file_name_prefix)) .unwrap_or(false) { std::fs::remove_file(entry.path())?; } } } Ok(()) } /// Writes the changes of the snapshot back. pub fn update_snapshot( &self, new_snapshot: Snapshot, ) -> Result> { // TODO: this seems to be making `unseen` be true when there is an // existing snapshot file; which seems wrong?? let unseen = self .snapshot_file .as_ref() .map_or(false, |x| fs::metadata(x).is_ok()); let should_print = self.tool_config.output_behavior() != OutputBehavior::Nothing; let snapshot_update = snapshot_update_behavior(&self.tool_config, unseen); // If snapshot_update is `InPlace` and we have an inline snapshot, then // use `NewFile`, since we can't use `InPlace` for inline. `cargo-insta` // then accepts all snapshots at the end of the test. let snapshot_update = // TODO: could match on the snapshot kind instead of whether snapshot_file is None if snapshot_update == SnapshotUpdateBehavior::InPlace && self.snapshot_file.is_none() { SnapshotUpdateBehavior::NewFile } else { snapshot_update }; match snapshot_update { SnapshotUpdateBehavior::InPlace => { if let Some(ref snapshot_file) = self.snapshot_file { new_snapshot.save(snapshot_file)?; if should_print { elog!( "{} {}", style("updated snapshot").green(), style(snapshot_file.display()).cyan().underlined(), ); } } else { // Checked self.snapshot_file.is_none() above unreachable!() } } SnapshotUpdateBehavior::NewFile => { if let Some(ref snapshot_file) = self.snapshot_file { // File snapshot - use pending directory if set let target_path = pending_snapshot_path(self.workspace, snapshot_file); let new_path = new_snapshot.save_new(&target_path)?; if should_print { elog!( "{} {}", style("stored new snapshot").green(), style(new_path.display()).cyan().underlined(), ); } } else if self.is_doctest { if should_print { elog!( "{}", style("warning: cannot update inline snapshots in doctests") .red() .bold(), ); } } else { // Inline snapshot - use pending directory if set let pending_path = self.pending_snapshots_path.as_ref().unwrap(); let target_path = pending_snapshot_path(self.workspace, pending_path); PendingInlineSnapshot::new( Some(new_snapshot), self.old_snapshot.clone(), self.assertion_line, ) .save(&target_path)?; } } SnapshotUpdateBehavior::NoUpdate => {} } Ok(snapshot_update) } /// This prints the information about the snapshot fn print_snapshot_info(&self, new_snapshot: &Snapshot) { let mut printer = SnapshotPrinter::new(self.workspace, self.old_snapshot.as_ref(), new_snapshot); printer.set_line(Some(self.assertion_line)); printer.set_snapshot_file(self.snapshot_file.as_deref()); printer.set_title(Some("Snapshot Summary")); printer.set_show_info(true); match self.tool_config.output_behavior() { OutputBehavior::Summary => { printer.print(); } OutputBehavior::Diff => { printer.set_show_diff(true); printer.print(); } _ => {} } } /// Finalizes the assertion when the snapshot comparison fails, potentially /// panicking to fail the test fn finalize(&self, update_result: SnapshotUpdateBehavior) { // if we are in glob mode, we want to adjust the finalization // so that we do not show the hints immediately. let fail_fast = { #[cfg(feature = "glob")] { if let Some(top) = crate::glob::GLOB_STACK.lock().unwrap().last() { top.fail_fast } else { true } } #[cfg(not(feature = "glob"))] { true } }; if fail_fast && update_result == SnapshotUpdateBehavior::NewFile && self.tool_config.output_behavior() != OutputBehavior::Nothing && !self.is_doctest { println!( "{hint}", hint = style("To update snapshots run `cargo insta review`").dim(), ); } if update_result != SnapshotUpdateBehavior::InPlace && !self.tool_config.force_pass() { if fail_fast && self.tool_config.output_behavior() != OutputBehavior::Nothing { let msg = if env::var("INSTA_CARGO_INSTA") == Ok("1".to_string()) { "Stopped on the first failure." } else { "Stopped on the first failure. Run `cargo insta test` to run all snapshots." }; println!("{hint}", hint = style(msg).dim(),); } // if we are in glob mode, count the failures and print the // errors instead of panicking. The glob will then panic at // the end. #[cfg(feature = "glob")] { let mut stack = crate::glob::GLOB_STACK.lock().unwrap(); if let Some(glob_collector) = stack.last_mut() { glob_collector.failed += 1; if update_result == SnapshotUpdateBehavior::NewFile && self.tool_config.output_behavior() != OutputBehavior::Nothing { glob_collector.show_insta_hint = true; } print_or_panic!( fail_fast, "snapshot assertion from glob for '{}' failed in line {}", self.snapshot_name.as_deref().unwrap_or("unnamed snapshot"), self.assertion_line ); return; } } panic!( "snapshot assertion for '{}' failed in line {}", self.snapshot_name.as_deref().unwrap_or("unnamed snapshot"), self.assertion_line ); } } } /// Computes a relative path from `base` to `path`, returning a path with `../` components /// if necessary. /// /// This function is vendored from the old Rust standard library implementation /// (pre-1.0, removed in RFC 474) and is distributed under the same terms as the /// Rust project (MIT/Apache-2.0 dual license). /// /// Unlike `Path::strip_prefix`, this function can handle cases where `path` is not /// a descendant of `base`, making it suitable for finding relative paths between /// arbitrary directories (e.g., between sibling directories in a workspace). fn path_relative_from(path: &Path, base: &Path) -> Option { use std::path::Component; if path.is_absolute() != base.is_absolute() { if path.is_absolute() { Some(PathBuf::from(path)) } else { None } } else { let mut ita = path.components(); let mut itb = base.components(); let mut comps: Vec = vec![]; loop { match (ita.next(), itb.next()) { (None, None) => break, (Some(a), None) => { comps.push(a); comps.extend(ita.by_ref()); break; } (None, _) => comps.push(Component::ParentDir), (Some(a), Some(b)) if comps.is_empty() && a == b => {} (Some(a), Some(_b)) => { comps.push(Component::ParentDir); for _ in itb { comps.push(Component::ParentDir); } comps.push(a); comps.extend(ita.by_ref()); break; } } } Some(comps.iter().map(|c| c.as_os_str()).collect()) } } fn prevent_inline_duplicate(function_name: &str, assertion_file: &str, assertion_line: u32) { let key = format!("{function_name}|{assertion_file}|{assertion_line}"); let mut set = INLINE_DUPLICATES.lock().unwrap(); if set.contains(&key) { // drop the lock so we don't poison it drop(set); panic!( "Insta does not allow inline snapshot assertions in loops. \ Wrap your assertions in allow_duplicates! to change this." ); } set.insert(key); } fn record_snapshot_duplicate( results: &mut BTreeMap, snapshot: &Snapshot, ctx: &SnapshotAssertionContext, ) { let key = ctx.duplication_key.as_deref().unwrap(); if let Some(prev_snapshot) = results.get(key) { if prev_snapshot.contents() != snapshot.contents() { println!("Snapshots in allow-duplicates block do not match."); let mut printer = SnapshotPrinter::new(ctx.workspace, Some(prev_snapshot), snapshot); printer.set_line(Some(ctx.assertion_line)); printer.set_snapshot_file(ctx.snapshot_file.as_deref()); printer.set_title(Some("Differences in Block")); printer.set_snapshot_hints("previous assertion", "current assertion"); if ctx.tool_config.output_behavior() == OutputBehavior::Diff { printer.set_show_diff(true); } printer.print(); panic!( "snapshot assertion for '{}' failed in line {}. Result \ does not match previous snapshot in allow-duplicates block.", ctx.snapshot_name.as_deref().unwrap_or("unnamed snapshot"), ctx.assertion_line ); } } else { results.insert(key.to_string(), snapshot.clone()); } } /// Do we allow recording of duplicates? fn allow_duplicates() -> bool { RECORDED_DUPLICATES.with(|x| !x.borrow().is_empty()) } /// Helper function to support perfect duplicate detection. pub fn with_allow_duplicates(f: F) -> R where F: FnOnce() -> R, { RECORDED_DUPLICATES.with(|x| x.borrow_mut().push(BTreeMap::new())); let rv = std::panic::catch_unwind(std::panic::AssertUnwindSafe(f)); RECORDED_DUPLICATES.with(|x| x.borrow_mut().pop().unwrap()); match rv { Ok(rv) => rv, Err(payload) => std::panic::resume_unwind(payload), } } /// This function is invoked from the macros to run the main assertion logic. /// /// This will create the assertion context, run the main logic to assert /// on snapshots and write changes to the pending snapshot files. It will /// also print the necessary bits of information to the output and fail the /// assertion with a panic if needed. #[allow(clippy::too_many_arguments)] pub fn assert_snapshot( snapshot_value: SnapshotValue<'_>, workspace: &Path, function_name: &str, module_path: &str, assertion_file: &str, assertion_line: u32, expr: &str, ) -> Result<(), Box> { let ctx = SnapshotAssertionContext::prepare( &snapshot_value, workspace, function_name, module_path, assertion_file, assertion_line, )?; ctx.cleanup_previous_pending_binary_snapshots()?; let content = match snapshot_value { SnapshotValue::FileText { content, .. } | SnapshotValue::InlineText { content, .. } => { // apply filters if they are available #[cfg(feature = "filters")] let content = Settings::with(|settings| settings.filters().apply_to(content)); let kind = match ctx.snapshot_file { Some(_) => TextSnapshotKind::File, None => TextSnapshotKind::Inline, }; TextSnapshotContents::new(content.into(), kind).into() } SnapshotValue::Binary { content, extension, .. } => { assert!( extension != "new", "'.new' is not allowed as a file extension" ); assert!( !extension.starts_with("new."), "file extensions starting with 'new.' are not allowed", ); SnapshotContents::Binary(Rc::new(content)) } }; let new_snapshot = ctx.new_snapshot(content, expr); // memoize the snapshot file if requested, as part of potentially removing unreferenced snapshots if let Some(ref snapshot_file) = ctx.snapshot_file { memoize_snapshot_file(snapshot_file); } // If we allow assertion with duplicates, we record the duplicate now. This will // in itself fail the assertion if the previous visit of the same assertion macro // did not yield the same result. RECORDED_DUPLICATES.with(|x| { if let Some(results) = x.borrow_mut().last_mut() { record_snapshot_duplicate(results, &new_snapshot, &ctx); } }); let pass = ctx .old_snapshot .as_ref() .map(|x| { if ctx.tool_config.require_full_match() { x.matches_fully(&new_snapshot) } else { x.matches(&new_snapshot) } }) .unwrap_or(false); if pass { ctx.cleanup_passing()?; if matches!( ctx.tool_config.snapshot_update(), crate::env::SnapshotUpdate::Force ) { ctx.update_snapshot(new_snapshot)?; } // otherwise print information and update snapshots. } else { ctx.print_snapshot_info(&new_snapshot); let update_result = ctx.update_snapshot(new_snapshot)?; ctx.finalize(update_result); } Ok(()) } #[allow(rustdoc::private_doc_tests)] /// Test snapshots in doctests. /// /// ``` /// // this is only working on newer rust versions /// extern crate rustc_version; /// use rustc_version::{Version, version}; /// if version().unwrap() > Version::parse("1.72.0").unwrap() { /// insta::assert_debug_snapshot!("named", vec![1, 2, 3, 4, 5]); /// } /// ``` /// /// ```should_panic /// insta::assert_debug_snapshot!(vec![1, 2, 3, 4, 5]); /// ``` /// /// ``` /// let some_string = "Coucou je suis un joli bug"; /// insta::assert_snapshot!(some_string, @"Coucou je suis un joli bug"); /// ``` /// /// ``` /// let some_string = "Coucou je suis un joli bug"; /// insta::assert_snapshot!(some_string, @"Coucou je suis un joli bug"); /// ``` const _DOCTEST1: bool = false; insta-1.46.1/src/select_grammar.pest000064400000000000000000000012031046102023000155000ustar 00000000000000WHITESPACE = _{ WHITE_SPACE } ident = @{ ( "_" | "$" | XID_START ) ~ XID_CONTINUE* } deep_wildcard = { "." ~ "**" } wildcard = { "." ~ "*" } key = @{ "." ~ ident } int = { "-"? ~ NUMBER+ } string = @{ "\"" ~ (!("\"") ~ ANY)* ~ "\""} subscript = { "[" ~ ( string | int ) ~ "]" } full_range = { "[" ~ "]" } range = { "[" ~ int ~ ":" ~ int ~ "]" } range_to = { "[" ~ ":" ~ int ~ "]" } range_from = { "[" ~ int ~ ":]" } segment = _{ deep_wildcard | wildcard | key | subscript | full_range | range | range_to | range_from } identity = { "." } selector = { (segment+ | identity) } selectors = { SOI ~ selector ~ ("," ~ selector)* ~ ","? ~ EOI } insta-1.46.1/src/serialization.rs000064400000000000000000000203241046102023000150460ustar 00000000000000use serde::{de::value::Error as ValueError, Serialize}; #[cfg(feature = "ron")] use std::borrow::Cow; #[cfg(feature = "toml")] use { core::str::FromStr, toml_edit::{visit_mut::*, Array, Item, Table, Value}, toml_writer::ToTomlValue, }; use crate::{ content::{json, yaml, Content, ContentSerializer}, settings::Settings, }; pub enum SerializationFormat { #[cfg(feature = "csv")] Csv, #[cfg(feature = "ron")] Ron, #[cfg(feature = "toml")] Toml, Yaml, Json, JsonCompact, } #[derive(Debug)] pub enum SnapshotLocation { Inline, File, } pub fn serialize_content(mut content: Content, format: SerializationFormat) -> String { content = Settings::with(|settings| { if settings.sort_maps() { content.sort_maps(); } #[cfg(feature = "redactions")] { content = settings.apply_redactions(content); } content }); match format { SerializationFormat::Yaml => yaml::to_string(&content)[4..].to_string(), SerializationFormat::Json => json::to_string_pretty(&content), SerializationFormat::JsonCompact => json::to_string_compact(&content), #[cfg(feature = "csv")] SerializationFormat::Csv => { let mut buf = Vec::with_capacity(128); { let mut writer = csv::Writer::from_writer(&mut buf); // if the top-level content we're serializing is a vector we // want to serialize it multiple times once for each item. if let Some(content_slice) = content.as_slice() { for content in content_slice { writer.serialize(content).unwrap(); } } else { writer.serialize(&content).unwrap(); } writer.flush().unwrap(); } if buf.ends_with(b"\n") { buf.truncate(buf.len() - 1); } String::from_utf8(buf).unwrap() } #[cfg(feature = "ron")] SerializationFormat::Ron => { let mut buf = String::new(); let mut config = ron::ser::PrettyConfig::new(); config.new_line = Cow::Borrowed("\n"); config.indentor = Cow::Borrowed(" "); config.struct_names = true; let mut serializer = ron::ser::Serializer::with_options( &mut buf, Some(config), &ron::options::Options::default(), ) .unwrap(); content.serialize(&mut serializer).unwrap(); buf } #[cfg(feature = "toml")] SerializationFormat::Toml => { struct Pretty { in_value: bool, } impl VisitMut for Pretty { fn visit_item_mut(&mut self, node: &mut Item) { let decor = if let Item::Value(Value::InlineTable(t)) = node { Some(t.decor().clone()) } else { None }; if !self.in_value { let other = std::mem::take(node); let other = match other.into_table().map(Item::Table) { Ok(i) => i, Err(i) => i, }; let other = match other.into_array_of_tables().map(Item::ArrayOfTables) { Ok(i) => i, Err(i) => i, }; *node = other; } if let Item::Table(table) = node { if let Some(decor) = decor { *table.decor_mut() = decor; } for (_key, value) in table.iter_mut() { if let Item::Value(Value::InlineTable(inner)) = value { inner.decor_mut().set_prefix(""); } } } visit_item_mut(self, node); } fn visit_table_mut(&mut self, node: &mut Table) { if !node.is_empty() { node.set_implicit(true); } visit_table_mut(self, node); } fn visit_value_mut(&mut self, node: &mut Value) { if let Value::String(f) = node { let builder = toml_writer::TomlStringBuilder::new(f.value().as_str()); let formatted = builder .as_literal() .or_else(|| builder.as_ml_literal()) .unwrap_or_else(|| builder.as_default()) .to_toml_value(); if let Ok(value) = Value::from_str(&formatted) { *node = value; } } node.decor_mut().clear(); let old_in_value = self.in_value; self.in_value = true; visit_value_mut(self, node); self.in_value = old_in_value; } fn visit_array_mut(&mut self, node: &mut Array) { visit_array_mut(self, node); if (0..=1).contains(&node.len()) { node.set_trailing(""); node.set_trailing_comma(false); } else { for item in node.iter_mut() { item.decor_mut().set_prefix("\n "); } node.set_trailing("\n"); node.set_trailing_comma(true); } } } let mut dm = toml_edit::ser::to_document(&content).unwrap(); let mut visitor = Pretty { in_value: false }; visitor.visit_document_mut(&mut dm); let mut rv = dm.to_string(); if rv.ends_with('\n') { rv.truncate(rv.len() - 1); } rv } } } pub fn serialize_value(s: &S, format: SerializationFormat) -> String { let serializer = ContentSerializer::::new(); let content = Serialize::serialize(s, serializer).unwrap(); serialize_content(content, format) } #[cfg(feature = "redactions")] pub fn serialize_value_redacted( s: &S, redactions: &[(crate::redaction::Selector, crate::redaction::Redaction)], format: SerializationFormat, ) -> String { let serializer = ContentSerializer::::new(); let mut content = Serialize::serialize(s, serializer).unwrap(); for (selector, redaction) in redactions { content = selector.redact(content, redaction); } serialize_content(content, format) } #[test] fn test_yaml_serialization() { let yaml = serialize_content( Content::Map(vec![ ( Content::from("env"), Content::Seq(vec![ Content::from("ENVIRONMENT"), Content::from("production"), ]), ), ( Content::from("cmdline"), Content::Seq(vec![Content::from("my-tool"), Content::from("run")]), ), ]), SerializationFormat::Yaml, ); crate::assert_snapshot!(&yaml, @" env: - ENVIRONMENT - production cmdline: - my-tool - run "); let inline_yaml = serialize_content( Content::Map(vec![ ( Content::from("env"), Content::Seq(vec![ Content::from("ENVIRONMENT"), Content::from("production"), ]), ), ( Content::from("cmdline"), Content::Seq(vec![Content::from("my-tool"), Content::from("run")]), ), ]), SerializationFormat::Yaml, ); crate::assert_snapshot!(&inline_yaml, @" env: - ENVIRONMENT - production cmdline: - my-tool - run "); } insta-1.46.1/src/settings.rs000064400000000000000000000532651046102023000140430ustar 00000000000000use once_cell::sync::Lazy; #[cfg(feature = "serde")] use serde::{de::value::Error as ValueError, Serialize}; use std::cell::RefCell; use std::future::Future; use std::mem; use std::path::{Path, PathBuf}; use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; use crate::content::Content; #[cfg(feature = "serde")] use crate::content::ContentSerializer; #[cfg(feature = "filters")] use crate::filters::Filters; #[cfg(feature = "redactions")] use crate::redaction::{dynamic_redaction, sorted_redaction, ContentPath, Redaction, Selector}; static DEFAULT_SETTINGS: Lazy> = Lazy::new(|| { Arc::new(ActualSettings { sort_maps: false, snapshot_path: "snapshots".into(), snapshot_suffix: "".into(), input_file: None, description: None, info: None, omit_expression: false, prepend_module_to_snapshot: true, #[cfg(feature = "redactions")] redactions: Redactions::default(), #[cfg(feature = "filters")] filters: Filters::default(), #[cfg(feature = "glob")] allow_empty_glob: false, }) }); thread_local!(static CURRENT_SETTINGS: RefCell = RefCell::new(Settings::new())); /// Represents stored redactions. #[cfg(feature = "redactions")] #[cfg_attr(docsrs, doc(cfg(feature = "redactions")))] #[derive(Clone, Default)] pub struct Redactions(Vec<(Selector<'static>, Arc)>); #[cfg(feature = "redactions")] impl<'a> From> for Redactions { fn from(value: Vec<(&'a str, Redaction)>) -> Redactions { Redactions( value .into_iter() .map(|x| (Selector::parse(x.0).unwrap().make_static(), Arc::new(x.1))) .collect(), ) } } #[cfg(feature = "redactions")] impl Redactions { /// Applies all redactions to the given content. pub(crate) fn apply_to_content(&self, mut content: Content) -> Content { for (selector, redaction) in self.0.iter() { content = selector.redact(content, redaction); } content } } #[derive(Clone)] #[doc(hidden)] pub struct ActualSettings { pub sort_maps: bool, pub snapshot_path: PathBuf, pub snapshot_suffix: String, pub input_file: Option, pub description: Option, pub info: Option, pub omit_expression: bool, pub prepend_module_to_snapshot: bool, #[cfg(feature = "redactions")] pub redactions: Redactions, #[cfg(feature = "filters")] pub filters: Filters, #[cfg(feature = "glob")] pub allow_empty_glob: bool, } impl ActualSettings { pub fn sort_maps(&mut self, value: bool) { self.sort_maps = value; } pub fn snapshot_path>(&mut self, path: P) { self.snapshot_path = path.as_ref().to_path_buf(); } pub fn snapshot_suffix>(&mut self, suffix: I) { self.snapshot_suffix = suffix.into(); } pub fn input_file>(&mut self, p: P) { self.input_file = Some(p.as_ref().to_path_buf()); } pub fn description>(&mut self, value: S) { self.description = Some(value.into()); } #[cfg(feature = "serde")] pub fn info(&mut self, s: &S) { let serializer = ContentSerializer::::new(); let content = Serialize::serialize(s, serializer).unwrap(); // Apply redactions to metadata immediately when set. Unlike snapshot // content (which is redacted lazily during serialization), metadata is // redacted eagerly to ensure sensitive data never reaches the stored // settings. The redacted content is then written to the snapshot file // as-is without further redaction. #[cfg(feature = "redactions")] let content = self.redactions.apply_to_content(content); self.info = Some(content); } pub fn raw_info(&mut self, content: &Content) { self.info = Some(content.to_owned()); } pub fn omit_expression(&mut self, value: bool) { self.omit_expression = value; } pub fn prepend_module_to_snapshot(&mut self, value: bool) { self.prepend_module_to_snapshot = value; } #[cfg(feature = "redactions")] pub fn redactions>(&mut self, r: R) { self.redactions = r.into(); } #[cfg(feature = "filters")] pub fn filters>(&mut self, f: F) { self.filters = f.into(); } #[cfg(feature = "glob")] pub fn allow_empty_glob(&mut self, value: bool) { self.allow_empty_glob = value; } } /// Configures how insta operates at test time. /// /// Settings are always bound to a thread, and some default settings are always /// available. These settings can be changed and influence how insta behaves on /// that thread. They can be either temporarily or permanently changed. /// /// This can be used to influence how the snapshot macros operate. /// For instance, it can be useful to force ordering of maps when /// unordered structures are used through settings. /// /// Some of the settings can be changed but shouldn't as it will make it harder /// for tools like cargo-insta or an editor integration to locate the snapshot /// files. /// /// Settings can also be configured with the [`with_settings!`] macro. /// /// Example: /// /// ```ignore /// use insta; /// /// let mut settings = insta::Settings::clone_current(); /// settings.set_sort_maps(true); /// settings.bind(|| { /// // runs the assertion with the changed settings enabled /// insta::assert_snapshot!(...); /// }); /// ``` #[derive(Clone)] pub struct Settings { inner: Arc, } impl Default for Settings { fn default() -> Settings { Settings { inner: DEFAULT_SETTINGS.clone(), } } } impl Settings { /// Returns the default settings. /// /// It's recommended to use [`Self::clone_current`] instead so that /// already applied modifications are not discarded. pub fn new() -> Settings { Settings::default() } /// Returns a copy of the current settings. pub fn clone_current() -> Settings { Settings::with(|x| x.clone()) } /// Internal helper for macros #[doc(hidden)] pub fn _private_inner_mut(&mut self) -> &mut ActualSettings { Arc::make_mut(&mut self.inner) } /// Enables forceful sorting of maps before serialization. /// /// Note that this only applies to snapshots that undergo serialization /// (eg: does not work for [`assert_debug_snapshot!`](crate::assert_debug_snapshot!)). /// /// The default value is `false`. pub fn set_sort_maps(&mut self, value: bool) { self._private_inner_mut().sort_maps = value; } /// Returns the current value for map sorting. pub fn sort_maps(&self) -> bool { self.inner.sort_maps } /// Disables prepending of modules to the snapshot filename. /// /// By default, the filename of a snapshot is `__.snap`. /// Setting this flag to `false` changes the snapshot filename to just /// `.snap`. /// /// The default value is `true`. pub fn set_prepend_module_to_snapshot(&mut self, value: bool) { self._private_inner_mut().prepend_module_to_snapshot(value); } /// Returns the current value for module name prepending. pub fn prepend_module_to_snapshot(&self) -> bool { self.inner.prepend_module_to_snapshot } /// Allows the [`glob!`] macro to succeed if it matches no files. /// /// By default, the glob macro will fail the test if it does not find /// any files to prevent accidental typos. This can be disabled when /// fixtures should be conditional. /// /// The default value is `false`. #[cfg(feature = "glob")] pub fn set_allow_empty_glob(&mut self, value: bool) { self._private_inner_mut().allow_empty_glob(value); } /// Returns the current value for the empty glob setting. #[cfg(feature = "glob")] pub fn allow_empty_glob(&self) -> bool { self.inner.allow_empty_glob } /// Sets the snapshot suffix. /// /// The snapshot suffix is added to all snapshot names with an `@` sign /// between. For instance, if the snapshot suffix is set to `"foo"`, and /// the snapshot would be named `"snapshot"`, it turns into `"snapshot@foo"`. /// This is useful to separate snapshots if you want to use test /// parameterization. pub fn set_snapshot_suffix>(&mut self, suffix: I) { self._private_inner_mut().snapshot_suffix(suffix); } /// Removes the snapshot suffix. pub fn remove_snapshot_suffix(&mut self) { self.set_snapshot_suffix(""); } /// Returns the current snapshot suffix. pub fn snapshot_suffix(&self) -> Option<&str> { if self.inner.snapshot_suffix.is_empty() { None } else { Some(&self.inner.snapshot_suffix) } } /// Sets the input file reference. /// /// This value is completely unused by the snapshot testing system, but it /// allows storing some metadata with a snapshot that refers back to the /// input file. The path stored here is made relative to the workspace root /// before storing with the snapshot. pub fn set_input_file>(&mut self, p: P) { self._private_inner_mut().input_file(p); } /// Removes the input file reference. pub fn remove_input_file(&mut self) { self._private_inner_mut().input_file = None; } /// Returns the current input file reference. pub fn input_file(&self) -> Option<&Path> { self.inner.input_file.as_deref() } /// Sets the description. /// /// The description is stored alongside the snapshot and will be displayed /// in the diff UI. When a snapshot is captured, the Rust expression for that /// snapshot is always retained. However, sometimes that information is not /// super useful by itself, particularly when working with loops and generated /// tests. In that case the `description` can be set as extra information. /// /// See also [`Self::set_info`]. pub fn set_description>(&mut self, value: S) { self._private_inner_mut().description(value); } /// Removes the description. pub fn remove_description(&mut self) { self._private_inner_mut().description = None; } /// Returns the current description pub fn description(&self) -> Option<&str> { self.inner.description.as_deref() } /// Sets the info. /// /// The `info` is similar to `description` but for structured data. This is /// stored with the snapshot and shown in the review UI. This for instance /// can be used to show extended information that can make a reviewer better /// understand what the snapshot is supposed to be testing. /// /// As an example the input parameters to the function that creates the snapshot /// can be persisted here. /// /// **Note:** Redactions configured via [`Self::add_redaction`] are automatically /// applied to the info metadata when it is set. /// /// Alternatively you can use [`Self::set_raw_info`] instead. #[cfg(feature = "serde")] #[cfg_attr(docsrs, doc(cfg(feature = "serde")))] pub fn set_info(&mut self, s: &S) { self._private_inner_mut().info(s); } /// Sets the info from a content object. /// /// This works like [`Self::set_info`] but does not require [`serde`]. /// /// **Note:** Unlike [`Self::set_info`], this method does NOT automatically apply /// redactions. If you need redactions applied to metadata, use [`Self::set_info`] /// instead (which requires the `serde` feature). pub fn set_raw_info(&mut self, content: &Content) { self._private_inner_mut().raw_info(content); } /// Removes the info. pub fn remove_info(&mut self) { self._private_inner_mut().info = None; } /// Returns the current info pub(crate) fn info(&self) -> Option<&Content> { self.inner.info.as_ref() } /// Returns the current info pub fn has_info(&self) -> bool { self.inner.info.is_some() } /// If set to true, does not retain the expression in the snapshot. pub fn set_omit_expression(&mut self, value: bool) { self._private_inner_mut().omit_expression(value); } /// Returns true if expressions are omitted from snapshots. pub fn omit_expression(&self) -> bool { self.inner.omit_expression } /// Registers redactions that should be applied. /// /// This can be useful if redactions must be shared across multiple /// snapshots. /// /// Note that this only applies to snapshots that undergo serialization /// (eg: does not work for [`assert_debug_snapshot!`](crate::assert_debug_snapshot!).) #[cfg(feature = "redactions")] #[cfg_attr(docsrs, doc(cfg(feature = "redactions")))] pub fn add_redaction>(&mut self, selector: &str, replacement: R) { self.add_redaction_impl(selector, replacement.into()) } #[cfg(feature = "redactions")] fn add_redaction_impl(&mut self, selector: &str, replacement: Redaction) { self._private_inner_mut().redactions.0.push(( Selector::parse(selector).unwrap().make_static(), Arc::new(replacement), )); } /// Registers a replacement callback. /// /// This works similar to a redaction but instead of changing the value it /// asserts the value at a certain place. This function is internally /// supposed to call things like [`assert_eq!`]. /// /// This is a shortcut to `add_redaction(selector, dynamic_redaction(...))`; #[cfg(feature = "redactions")] #[cfg_attr(docsrs, doc(cfg(feature = "redactions")))] pub fn add_dynamic_redaction(&mut self, selector: &str, func: F) where I: Into, F: Fn(Content, ContentPath<'_>) -> I + Send + Sync + 'static, { self.add_redaction(selector, dynamic_redaction(func)); } /// A special redaction that sorts a sequence or map. /// /// This is a shortcut to `add_redaction(selector, sorted_redaction())`. #[cfg(feature = "redactions")] #[cfg_attr(docsrs, doc(cfg(feature = "redactions")))] pub fn sort_selector(&mut self, selector: &str) { self.add_redaction(selector, sorted_redaction()); } /// Replaces the currently set redactions. /// /// The default set is empty. #[cfg(feature = "redactions")] #[cfg_attr(docsrs, doc(cfg(feature = "redactions")))] pub fn set_redactions>(&mut self, redactions: R) { self._private_inner_mut().redactions(redactions); } /// Removes all redactions. #[cfg(feature = "redactions")] #[cfg_attr(docsrs, doc(cfg(feature = "redactions")))] pub fn clear_redactions(&mut self) { self._private_inner_mut().redactions.0.clear(); } /// Apply redactions to content. #[cfg(feature = "redactions")] #[cfg_attr(docsrs, doc(cfg(feature = "redactions")))] pub(crate) fn apply_redactions(&self, content: Content) -> Content { self.inner.redactions.apply_to_content(content) } /// Adds a new filter. /// /// Filters are similar to redactions but are applied as regex onto the final snapshot /// value. This can be used to perform modifications to the snapshot string that would /// be impossible to do with redactions because for instance the value is just a string. /// /// The first argument is the [`regex`] pattern to apply, the second is a replacement /// string. The replacement string has the same functionality as the second argument /// to [`regex::Regex::replace`]. /// /// This is useful to perform some cleanup procedures on the snapshot for unstable values. /// /// ```rust /// # use insta::Settings; /// # async fn foo() { /// # let mut settings = Settings::new(); /// settings.add_filter(r"\b[[:xdigit:]]{32}\b", "[UID]"); /// # } /// ``` #[cfg(feature = "filters")] #[cfg_attr(docsrs, doc(cfg(feature = "filters")))] pub fn add_filter>(&mut self, regex: &str, replacement: S) { self._private_inner_mut().filters.add(regex, replacement); } /// Replaces the currently set filters. /// /// The default set is empty. #[cfg(feature = "filters")] #[cfg_attr(docsrs, doc(cfg(feature = "filters")))] pub fn set_filters>(&mut self, filters: F) { self._private_inner_mut().filters(filters); } /// Removes all filters. #[cfg(feature = "filters")] #[cfg_attr(docsrs, doc(cfg(feature = "filters")))] pub fn clear_filters(&mut self) { self._private_inner_mut().filters.clear(); } /// Returns the current filters #[cfg(feature = "filters")] #[cfg_attr(docsrs, doc(cfg(feature = "filters")))] pub(crate) fn filters(&self) -> &Filters { &self.inner.filters } /// Sets the snapshot path. /// /// If not absolute, it's relative to where the test is in. /// /// Defaults to `snapshots`. pub fn set_snapshot_path>(&mut self, path: P) { self._private_inner_mut().snapshot_path(path); } /// Returns the snapshot path. pub fn snapshot_path(&self) -> &Path { &self.inner.snapshot_path } /// Runs a function with the current settings bound to the thread. /// /// This is an alternative to [`Self::bind_to_scope`]() /// which does not require holding on to a drop guard. The return value /// of the closure is passed through. /// /// ``` /// # use insta::Settings; /// let mut settings = Settings::clone_current(); /// settings.set_sort_maps(true); /// settings.bind(|| { /// // do stuff here /// }); /// ``` pub fn bind R, R>(&self, f: F) -> R { let _guard = self.bind_to_scope(); f() } /// Like [`Self::bind`] but for futures. /// /// This lets you bind settings for the duration of a future like this: /// /// ```rust /// # use insta::Settings; /// # async fn foo() { /// let settings = Settings::new(); /// settings.bind_async(async { /// // do assertions here /// }).await; /// # } /// ``` pub fn bind_async, T>(&self, future: F) -> impl Future { struct BindingFuture { settings: Arc, future: F, } impl Future for BindingFuture { type Output = F::Output; fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { let inner = self.settings.clone(); // SAFETY: This is okay because `future` is pinned when `self` is. let future = unsafe { self.map_unchecked_mut(|s| &mut s.future) }; CURRENT_SETTINGS.with(|x| { let old = { let mut current = x.borrow_mut(); let old = current.inner.clone(); current.inner = inner; old }; let rv = future.poll(cx); let mut current = x.borrow_mut(); current.inner = old; rv }) } } BindingFuture { settings: self.inner.clone(), future, } } /// Binds the settings to the current thread and resets when the drop /// guard is released. /// /// This is the recommended way to temporarily bind settings. It replaces /// the earlier [`bind_to_scope`](Settings::bind_to_scope), and relies on /// drop guards. An alternative is [`bind`](Settings::bind), which binds /// for the duration of the block it wraps. /// /// ``` /// # use insta::Settings; /// let mut settings = Settings::clone_current(); /// settings.set_sort_maps(true); /// let _guard = settings.bind_to_scope(); /// // do stuff here /// ``` pub fn bind_to_scope(&self) -> SettingsBindDropGuard { CURRENT_SETTINGS.with(|x| { let mut x = x.borrow_mut(); let old = mem::replace(&mut x.inner, self.inner.clone()); SettingsBindDropGuard(Some(old), std::marker::PhantomData) }) } /// Runs a function with the current settings. pub(crate) fn with R>(f: F) -> R { CURRENT_SETTINGS.with(|x| f(&x.borrow())) } } /// Returned from [`Settings::bind_to_scope`] /// /// This type is not shareable between threads: /// /// ```compile_fail E0277 /// let mut settings = insta::Settings::clone_current(); /// settings.set_snapshot_suffix("test drop guard"); /// let guard = settings.bind_to_scope(); /// /// std::thread::spawn(move || { let guard = guard; }); // doesn't compile /// ``` /// /// This is to ensure tests under async runtimes like `tokio` don't show unexpected results #[must_use = "The guard is immediately dropped so binding has no effect. Use `let _guard = ...` to bind it."] pub struct SettingsBindDropGuard( Option>, /// A ZST that is not [`Send`] but is [`Sync`] /// /// This is necessary due to the lack of stable [negative impls](https://github.com/rust-lang/rust/issues/68318). /// /// Required as [`SettingsBindDropGuard`] modifies a thread local variable which would end up /// with unexpected results if sent to a different thread. std::marker::PhantomData>, ); impl Drop for SettingsBindDropGuard { fn drop(&mut self) { CURRENT_SETTINGS.with(|x| { x.borrow_mut().inner = self.0.take().unwrap(); }) } } insta-1.46.1/src/snapshot.rs000064400000000000000000001303601046102023000140320ustar 00000000000000use crate::{ content::{self, json, yaml, Content}, elog, utils::style, }; use once_cell::sync::Lazy; use std::env; use std::error::Error; use std::fmt; use std::fs; use std::io::{BufRead, BufReader, Write}; use std::path::{Path, PathBuf}; use std::rc::Rc; use std::time::{SystemTime, UNIX_EPOCH}; use std::{borrow::Cow, iter::once}; static RUN_ID: Lazy = Lazy::new(|| { if let Ok(run_id) = env::var("NEXTEST_RUN_ID") { run_id } else { let d = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); format!("{}-{}", d.as_secs(), d.subsec_nanos()) } }); /// Holds a pending inline snapshot loaded from a json file or read from an assert /// macro (doesn't write to the rust file, which is done by `cargo-insta`) #[derive(Debug)] pub struct PendingInlineSnapshot { pub run_id: String, pub line: u32, pub new: Option, pub old: Option, } impl PendingInlineSnapshot { pub fn new(new: Option, old: Option, line: u32) -> PendingInlineSnapshot { PendingInlineSnapshot { new, old, line, run_id: RUN_ID.clone(), } } #[cfg(feature = "_cargo_insta_internal")] pub fn load_batch(p: &Path) -> Result, Box> { let contents = fs::read_to_string(p).map_err(|e| content::Error::FileIo(e, p.to_path_buf()))?; let mut rv: Vec = contents .lines() .map(|line| { let value = yaml::parse_str(line, p)?; Self::from_content(value) }) .collect::>>()?; // remove all but the last run if let Some(last_run_id) = rv.last().map(|x| x.run_id.clone()) { rv.retain(|x| x.run_id == last_run_id); } Ok(rv) } #[cfg(feature = "_cargo_insta_internal")] pub fn save_batch(p: &Path, batch: &[PendingInlineSnapshot]) -> Result<(), Box> { fs::remove_file(p).ok(); for snap in batch { snap.save(p)?; } Ok(()) } pub fn save(&self, p: &Path) -> Result<(), Box> { // Create parent directories if they don't exist (needed for INSTA_PENDING_DIR) if let Some(parent) = p.parent() { fs::create_dir_all(parent)?; } let mut f = fs::OpenOptions::new().create(true).append(true).open(p)?; let mut s = json::to_string(&self.as_content()); s.push('\n'); f.write_all(s.as_bytes())?; Ok(()) } #[cfg(feature = "_cargo_insta_internal")] fn from_content(content: Content) -> Result> { if let Content::Map(map) = content { let mut run_id = None; let mut line = None; let mut old = None; let mut new = None; for (key, value) in map.into_iter() { match key.as_str() { Some("run_id") => run_id = value.as_str().map(|x| x.to_string()), Some("line") => line = value.as_u64().map(|x| x as u32), Some("old") if !value.is_nil() => { old = Some(Snapshot::from_content(value, TextSnapshotKind::Inline)?) } Some("new") if !value.is_nil() => { new = Some(Snapshot::from_content(value, TextSnapshotKind::Inline)?) } _ => {} } } Ok(PendingInlineSnapshot { run_id: run_id.ok_or(content::Error::MissingField)?, line: line.ok_or(content::Error::MissingField)?, new, old, }) } else { Err(content::Error::UnexpectedDataType.into()) } } fn as_content(&self) -> Content { let fields = vec![ ("run_id", Content::from(self.run_id.as_str())), ("line", Content::from(self.line)), ( "new", match &self.new { Some(snap) => snap.as_content(), None => Content::None, }, ), ( "old", match &self.old { Some(snap) => snap.as_content(), None => Content::None, }, ), ]; Content::Struct("PendingInlineSnapshot", fields) } } #[derive(Debug, Clone, PartialEq, Default)] pub enum SnapshotKind { #[default] Text, Binary { extension: String, }, } /// Snapshot metadata information. #[derive(Debug, Default, Clone, PartialEq)] pub struct MetaData { /// The source file (relative to workspace root). pub(crate) source: Option, /// The source line, if available. This is used by pending snapshots, but trimmed /// before writing to the final `.snap` files in [`MetaData::trim_for_persistence`]. pub(crate) assertion_line: Option, /// Optional human readable (non formatted) snapshot description. pub(crate) description: Option, /// Optionally the expression that created the snapshot. pub(crate) expression: Option, /// An optional arbitrary structured info object. pub(crate) info: Option, /// Reference to the input file. pub(crate) input_file: Option, /// The type of the snapshot (string or binary). pub(crate) snapshot_kind: SnapshotKind, } impl MetaData { /// Returns the absolute source path. pub fn source(&self) -> Option<&str> { self.source.as_deref() } /// Returns the assertion line. pub fn assertion_line(&self) -> Option { self.assertion_line } /// Returns the expression that created the snapshot. pub fn expression(&self) -> Option<&str> { self.expression.as_deref() } /// Returns the description that created the snapshot. pub fn description(&self) -> Option<&str> { self.description.as_deref().filter(|x| !x.is_empty()) } /// Returns the embedded info. #[doc(hidden)] pub fn private_info(&self) -> Option<&Content> { self.info.as_ref() } /// Returns the relative source path. pub fn get_relative_source(&self, base: &Path) -> Option { self.source.as_ref().map(|source| { base.join(source) .canonicalize() .ok() .and_then(|s| s.strip_prefix(base).ok().map(|x| x.to_path_buf())) .unwrap_or_else(|| base.to_path_buf()) }) } /// Returns the input file reference. pub fn input_file(&self) -> Option<&str> { self.input_file.as_deref() } fn from_content(content: Content) -> Result> { if let Content::Map(map) = content { let mut source = None; let mut assertion_line = None; let mut description = None; let mut expression = None; let mut info = None; let mut input_file = None; let mut snapshot_type = TmpSnapshotKind::Text; let mut extension = None; enum TmpSnapshotKind { Text, Binary, } for (key, value) in map.into_iter() { match key.as_str() { Some("source") => source = value.as_str().map(|x| x.to_string()), Some("assertion_line") => assertion_line = value.as_u64().map(|x| x as u32), Some("description") => description = value.as_str().map(Into::into), Some("expression") => expression = value.as_str().map(Into::into), Some("info") if !value.is_nil() => info = Some(value), Some("input_file") => input_file = value.as_str().map(Into::into), Some("snapshot_kind") => { snapshot_type = match value.as_str() { Some("binary") => TmpSnapshotKind::Binary, _ => TmpSnapshotKind::Text, } } Some("extension") => { extension = value.as_str().map(Into::into); } _ => {} } } Ok(MetaData { source, assertion_line, description, expression, info, input_file, snapshot_kind: match snapshot_type { TmpSnapshotKind::Text => SnapshotKind::Text, TmpSnapshotKind::Binary => SnapshotKind::Binary { extension: extension.ok_or(content::Error::MissingField)?, }, }, }) } else { Err(content::Error::UnexpectedDataType.into()) } } fn as_content(&self) -> Content { let mut fields = Vec::new(); if let Some(source) = self.source.as_deref() { fields.push(("source", Content::from(source))); } if let Some(line) = self.assertion_line { fields.push(("assertion_line", Content::from(line))); } if let Some(description) = self.description.as_deref() { fields.push(("description", Content::from(description))); } if let Some(expression) = self.expression.as_deref() { fields.push(("expression", Content::from(expression))); } if let Some(info) = &self.info { fields.push(("info", info.to_owned())); } if let Some(input_file) = self.input_file.as_deref() { fields.push(("input_file", Content::from(input_file))); } match self.snapshot_kind { SnapshotKind::Text => {} SnapshotKind::Binary { ref extension } => { fields.push(("extension", Content::from(extension.clone()))); fields.push(("snapshot_kind", Content::from("binary"))); } } Content::Struct("MetaData", fields) } /// Trims the metadata of fields that we don't save to `.snap` files (those /// we only use for display while reviewing) fn trim_for_persistence(&self) -> Cow<'_, MetaData> { // TODO: in order for `--require-full-match` to work on inline snapshots // without cargo-insta, we need to trim all fields if there's an inline // snapshot. But we don't know that from here (notably // `self.input_file.is_none()` is not a correct approach). Given that // `--require-full-match` is experimental and we're working on making // inline & file snapshots more coherent, I'm leaving this as is for // now. if self.assertion_line.is_some() { let mut rv = self.clone(); rv.assertion_line = None; Cow::Owned(rv) } else { Cow::Borrowed(self) } } } #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub enum TextSnapshotKind { Inline, File, } /// A helper to work with file snapshots. #[derive(Debug, Clone)] pub struct Snapshot { module_name: String, snapshot_name: Option, metadata: MetaData, snapshot: SnapshotContents, } impl Snapshot { /// Loads a snapshot from a file. pub fn from_file(p: &Path) -> Result> { let mut f = BufReader::new(fs::File::open(p)?); let mut buf = String::new(); f.read_line(&mut buf)?; // yaml format let metadata = if buf.trim_end() == "---" { loop { let read = f.read_line(&mut buf)?; if read == 0 { break; } if buf[buf.len() - read..].trim_end() == "---" { buf.truncate(buf.len() - read); break; } } let content = yaml::parse_str(&buf, p)?; MetaData::from_content(content)? // legacy format // (but not viable to move into `match_legacy` given it's more than // just the snapshot value itself...) } else { let mut rv = MetaData::default(); loop { buf.clear(); let read = f.read_line(&mut buf)?; if read == 0 || buf.trim_end().is_empty() { buf.truncate(buf.len() - read); break; } let mut iter = buf.splitn(2, ':'); if let Some(key) = iter.next() { if let Some(value) = iter.next() { let value = value.trim(); match key.to_lowercase().as_str() { "expression" => rv.expression = Some(value.to_string()), "source" => rv.source = Some(value.into()), _ => {} } } } } elog!("A snapshot uses a legacy snapshot format; please update it to the new format with `cargo insta test --force-update-snapshots --accept`.\nSnapshot is at: {}", p.to_string_lossy()); rv }; let contents = match metadata.snapshot_kind { SnapshotKind::Text => { buf.clear(); for (idx, line) in f.lines().enumerate() { let line = line?; if idx > 0 { buf.push('\n'); } buf.push_str(&line); } TextSnapshotContents { contents: buf, kind: TextSnapshotKind::File, } .into() } SnapshotKind::Binary { ref extension } => { let path = build_binary_path(extension, p); let contents = fs::read(path)?; SnapshotContents::Binary(Rc::new(contents)) } }; let (snapshot_name, module_name) = names_of_path(p); Ok(Snapshot::from_components( module_name, Some(snapshot_name), metadata, contents, )) } pub(crate) fn from_components( module_name: String, snapshot_name: Option, metadata: MetaData, snapshot: SnapshotContents, ) -> Snapshot { Snapshot { module_name, snapshot_name, metadata, snapshot, } } #[cfg(feature = "_cargo_insta_internal")] fn from_content(content: Content, kind: TextSnapshotKind) -> Result> { if let Content::Map(map) = content { let mut module_name = None; let mut snapshot_name = None; let mut metadata = None; let mut snapshot = None; for (key, value) in map.into_iter() { match key.as_str() { Some("module_name") => module_name = value.as_str().map(|x| x.to_string()), Some("snapshot_name") => snapshot_name = value.as_str().map(|x| x.to_string()), Some("metadata") => metadata = Some(MetaData::from_content(value)?), Some("snapshot") => { snapshot = Some( TextSnapshotContents { contents: value .as_str() .ok_or(content::Error::UnexpectedDataType)? .to_string(), kind, } .into(), ); } _ => {} } } Ok(Snapshot { module_name: module_name.ok_or(content::Error::MissingField)?, snapshot_name, metadata: metadata.ok_or(content::Error::MissingField)?, snapshot: snapshot.ok_or(content::Error::MissingField)?, }) } else { Err(content::Error::UnexpectedDataType.into()) } } fn as_content(&self) -> Content { let mut fields = vec![("module_name", Content::from(self.module_name.as_str()))]; // Note this is currently never used, since this method is only used for // inline snapshots if let Some(name) = self.snapshot_name.as_deref() { fields.push(("snapshot_name", Content::from(name))); } fields.push(("metadata", self.metadata.as_content())); if let SnapshotContents::Text(ref content) = self.snapshot { fields.push(("snapshot", Content::from(content.to_string()))); } Content::Struct("Content", fields) } /// Returns the module name. pub fn module_name(&self) -> &str { &self.module_name } /// Returns the snapshot name. pub fn snapshot_name(&self) -> Option<&str> { self.snapshot_name.as_deref() } /// The metadata in the snapshot. pub fn metadata(&self) -> &MetaData { &self.metadata } /// The snapshot contents pub fn contents(&self) -> &SnapshotContents { &self.snapshot } /// Snapshot contents match another snapshot's. pub fn matches(&self, other: &Self) -> bool { self.contents() == other.contents() // For binary snapshots the extension also need to be the same: && self.metadata.snapshot_kind == other.metadata.snapshot_kind } /// Both the exact snapshot contents and the persisted metadata match another snapshot's. // (could rename to `matches_exact` for consistency, after some current // pending merge requests are merged) pub fn matches_fully(&self, other: &Self) -> bool { match (self.contents(), other.contents()) { (SnapshotContents::Text(self_contents), SnapshotContents::Text(other_contents)) => { // Note that we previously would match the exact values of the // unnormalized text. But that's too strict — it means we can // never match a snapshot that has leading/trailing whitespace. // So instead we check it matches on the latest format. // Generally those should be the same — latest should be doing // the minimum normalization; if they diverge we could update // this to be stricter. // // (I think to do this perfectly, we'd want to match the // _reference_ value unnormalized, but the _generated_ value // normalized. That way, we can get the But at the moment we // don't distinguish between which is which in our data // structures.) let contents_match_exact = self_contents.matches_latest(other_contents); match self_contents.kind { TextSnapshotKind::File => { self.metadata.trim_for_persistence() == other.metadata.trim_for_persistence() && contents_match_exact } TextSnapshotKind::Inline => contents_match_exact, } } _ => self.matches(other), } } fn serialize_snapshot(&self, md: &MetaData) -> String { let mut buf = yaml::to_string(&md.as_content()); buf.push_str("---\n"); if let SnapshotContents::Text(ref contents) = self.snapshot { buf.push_str(&contents.to_string()); buf.push('\n'); } buf } // We take `md` as an argument here because the calling methods want to // adjust it; e.g. removing volatile fields when writing to the final // `.snap` file. fn save_with_metadata(&self, path: &Path, md: &MetaData) -> Result<(), Box> { if let Some(folder) = path.parent() { fs::create_dir_all(folder)?; } let serialized_snapshot = self.serialize_snapshot(md); fs::write(path, serialized_snapshot) .map_err(|e| content::Error::FileIo(e, path.to_path_buf()))?; if let SnapshotContents::Binary(ref contents) = self.snapshot { fs::write(self.build_binary_path(path).unwrap(), &**contents) .map_err(|e| content::Error::FileIo(e, path.to_path_buf()))?; } Ok(()) } pub fn build_binary_path(&self, path: impl Into) -> Option { if let SnapshotKind::Binary { ref extension } = self.metadata.snapshot_kind { Some(build_binary_path(extension, path)) } else { None } } /// Saves the snapshot. #[doc(hidden)] pub fn save(&self, path: &Path) -> Result<(), Box> { self.save_with_metadata(path, &self.metadata.trim_for_persistence()) } /// Same as [`Self::save`] but instead of writing a normal snapshot file this will write /// a `.snap.new` file with additional information. /// /// The path of the new snapshot file is returned. pub(crate) fn save_new(&self, path: &Path) -> Result> { // TODO: should we be the actual extension here rather than defaulting // to the standard `.snap`? let new_path = path.to_path_buf().with_extension("snap.new"); self.save_with_metadata(&new_path, &self.metadata)?; Ok(new_path) } } /// The contents of a Snapshot #[derive(Debug, Clone)] pub enum SnapshotContents { Text(TextSnapshotContents), // This is in an `Rc` because we need to be able to clone this struct cheaply and the contents // of the `Vec` could be rather large. The reason it's not an `Rc<[u8]>` is because creating one // of those would require re-allocating because of the additional size needed for the reference // count. Binary(Rc>), } // Could be Cow, but I think limited savings #[derive(Debug, PartialEq, Eq, Clone)] pub struct TextSnapshotContents { contents: String, pub kind: TextSnapshotKind, } impl From for SnapshotContents { fn from(value: TextSnapshotContents) -> Self { SnapshotContents::Text(value) } } impl SnapshotContents { pub fn is_binary(&self) -> bool { matches!(self, SnapshotContents::Binary(_)) } } impl TextSnapshotContents { pub fn new(contents: String, kind: TextSnapshotKind) -> TextSnapshotContents { // We could store a normalized version of the string as part of `new`; // it would avoid allocating a new `String` when we get the normalized // versions, which we may do a few times. (We want to store the // unnormalized version because it allows us to use `matches_fully`.) TextSnapshotContents { contents, kind } } /// Matches another snapshot without any normalization pub fn matches_fully(&self, other: &TextSnapshotContents) -> bool { self.contents == other.contents } /// Snapshot matches based on the latest format. pub fn matches_latest(&self, other: &Self) -> bool { self.to_string() == other.to_string() } pub fn matches_legacy(&self, other: &Self) -> bool { fn as_str_legacy(sc: &TextSnapshotContents) -> String { // First do the standard normalization let out = sc.to_string(); // Legacy snapshots trim newlines at the start. let out = out.trim_start_matches(['\r', '\n']); // Legacy inline snapshots have `---` at the start, so this strips that if // it exists. let out = match out.strip_prefix("---\n") { Some(old_snapshot) => old_snapshot, None => out, }; match sc.kind { TextSnapshotKind::Inline => { let out = legacy_inline_normalize(out); // Handle old multiline format where single-line content was stored // with code indentation (e.g., @r"\n content\n "). After // from_inline_literal processing, this becomes " content\n " // with leading spaces from source code indentation. // // We detect this by checking: // 1. Raw contents contain a newline (came from multiline literal) // 2. After trimming, it's effectively single-line (the legacy pattern) // // This distinguishes: // - Legacy single-line in multiline: @r"\n X\n " → trim // - Modern single-line: @" X" → don't trim (intentional spaces) // - True multiline: @r"\n A\n B\n" → don't trim (>1 line) // // See: https://github.com/mitsuhiko/insta/pull/819#issuecomment-3583709431 let is_legacy_single_line_in_multiline = sc.contents.contains('\n') && sc.contents.trim_end().lines().count() <= 1; if is_legacy_single_line_in_multiline { out.trim_start().to_string() } else { out } } TextSnapshotKind::File => out.to_string(), } } as_str_legacy(self) == as_str_legacy(other) } /// Convert a literal snapshot value (i.e. the string inside the quotes, /// from a rust file) to the value we retain in the struct. This is a small /// change to the value: we remove the leading newline and coerce newlines /// to `\n`. Otherwise, the value is retained unnormalized (generally we /// want to retain unnormalized values so we can run `matches_fully` on /// them) pub(crate) fn from_inline_literal(contents: &str) -> Self { // If it's a single line string, then we don't do anything. if contents.trim_end().lines().count() <= 1 { return Self::new(contents.trim_end().to_string(), TextSnapshotKind::Inline); } // If it's multiline, we trim the first line, which should be empty. // (Possibly in the future we'll do the same for the final line too) let lines = contents.lines().collect::>(); let (first, remainder) = lines.split_first().unwrap(); let snapshot = { // If the first isn't empty, something is up — include the first line // and print a warning. if first != &"" { elog!("{} {}{}{}\n{}",style("Multiline inline snapshot values should start and end with a newline.").yellow().bold()," The current value will fail to match in the future. Run `cargo insta test --force-update-snapshots` to rewrite snapshots. The existing value's first line is `", first, "`. Full value:", contents); once(first) .chain(remainder.iter()) .cloned() .collect::>() .join("\n") } else { remainder.join("\n") } }; Self::new(snapshot, TextSnapshotKind::Inline) } fn normalize(&self) -> String { let kind_specific_normalization = match self.kind { TextSnapshotKind::Inline => normalize_inline(&self.contents), TextSnapshotKind::File => self.contents.clone(), }; // Then this we do for both kinds let out = kind_specific_normalization.trim_end(); out.replace("\r\n", "\n") } /// Returns the string literal, including `#` delimiters, to insert into a /// Rust source file. pub fn to_inline(&self, indentation: &str) -> String { let contents = self.normalize(); let mut out = String::new(); // Some characters can't be escaped in a raw string literal, so we need // to escape the string if it contains them. We prefer escaping control // characters except for newlines, tabs, and ESC. let has_control_chars = contents .chars() .any(|c| c.is_control() && !['\n', '\t', '\x1b'].contains(&c)); // We prefer raw strings for strings containing a quote or an escape // character, as these would require escaping in regular strings. // We can't use raw strings for some control characters. // We don't use raw strings just for newlines, as they can appear // literally in regular strings (avoids clippy::needless_raw_strings). if !has_control_chars && contents.contains(['\\', '"']) { out.push('r'); } let delimiter = "#".repeat(required_hashes(&contents)); out.push_str(&delimiter); // If there are control characters, then we have to just use a simple // string with unicode escapes from the debug output. We don't attempt // block mode (though not impossible to do so). if has_control_chars { out.push_str(format!("{contents:?}").as_str()); } else { out.push('"'); // if we have more than one line we want to change into the block // representation mode if contents.contains('\n') { out.extend( contents .lines() // Adds an additional newline at the start of multiline // string (not sure this is the clearest way of representing // it, but it works...) .map(|l| { format!( "\n{i}{l}", i = if l.is_empty() { "" } else { indentation }, l = l ) }) // `lines` removes the final line ending — add back. Include // indentation so the closing delimited aligns with the full string. .chain(Some(format!("\n{indentation}"))), ); } else { out.push_str(contents.as_str()); } out.push('"'); } out.push_str(&delimiter); out } } impl fmt::Display for TextSnapshotContents { /// Returns the snapshot contents as a normalized string (for example, /// removing surrounding whitespace) fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self.normalize()) } } impl PartialEq for SnapshotContents { fn eq(&self, other: &Self) -> bool { match (self, other) { (SnapshotContents::Text(this), SnapshotContents::Text(other)) => { // Ideally match on current rules, but otherwise fall back to legacy rules if this.matches_latest(other) { true } else if this.matches_legacy(other) { elog!("{} {}\n{}",style("Snapshot test passes but the existing value is in a legacy format. Please run `cargo insta test --force-update-snapshots` to update to a newer format.").yellow().bold(),"Snapshot contents:", this.to_string()); true } else { false } } (SnapshotContents::Binary(this), SnapshotContents::Binary(other)) => this == other, _ => false, } } } fn build_binary_path(extension: &str, path: impl Into) -> PathBuf { let path = path.into(); let mut new_extension = path.extension().unwrap().to_os_string(); new_extension.push("."); new_extension.push(extension); path.with_extension(new_extension) } /// The number of `#` we need to surround a raw string literal with. fn required_hashes(text: &str) -> usize { text.split('"') .skip(1) // Skip the first part which is before the first quote .map(|s| s.chars().take_while(|&c| c == '#').count() + 1) .max() .unwrap_or_default() } #[test] fn test_required_hashes() { assert_snapshot!(required_hashes(""), @"0"); assert_snapshot!(required_hashes("Hello, world!"), @"0"); assert_snapshot!(required_hashes("\"\""), @"1"); assert_snapshot!(required_hashes("##"), @"0"); assert_snapshot!(required_hashes("\"#\"#"), @"2"); assert_snapshot!(required_hashes(r##""#"##), @"2"); assert_snapshot!(required_hashes(r######"foo ""##### bar "###" baz"######), @"6"); assert_snapshot!(required_hashes("\"\"\""), @"1"); assert_snapshot!(required_hashes("####"), @"0"); assert_snapshot!(required_hashes(r###"\"\"##\"\""###), @"3"); assert_snapshot!(required_hashes(r###"r"#"Raw string"#""###), @"2"); } fn leading_space(value: &str) -> String { value .chars() .take_while(|x| x.is_whitespace()) .collect::() } fn min_indentation(snapshot: &str) -> String { let lines = snapshot.trim_end().lines(); lines .filter(|l| !l.is_empty()) .map(leading_space) .min_by(|a, b| a.len().cmp(&b.len())) .unwrap_or("".into()) } /// Normalize snapshot value, which we apply to both generated and literal /// snapshots. Remove excess indentation, excess ending whitespace and coerce /// newlines to `\n`. fn normalize_inline(snapshot: &str) -> String { // If it's a single line string, then we don't do anything. if snapshot.trim_end().lines().count() <= 1 { return snapshot.trim_end().to_string(); } let indentation = min_indentation(snapshot); snapshot .lines() .map(|l| l.get(indentation.len()..).unwrap_or("")) .collect::>() .join("\n") } #[test] fn test_normalize_inline_snapshot() { fn normalized_of_literal(snapshot: &str) -> String { normalize_inline(&TextSnapshotContents::from_inline_literal(snapshot).contents) } use similar_asserts::assert_eq; // here we do exact matching (rather than `assert_snapshot`) to ensure we're // not incorporating the modifications that insta itself makes assert_eq!( normalized_of_literal( " 1 2 " ), "1 2" ); assert_eq!( normalized_of_literal( " 1 2 " ), " 1 2 " ); assert_eq!( normalized_of_literal( " 1 2 " ), "1 2 " ); assert_eq!( normalized_of_literal( " 1 2 " ), "1 2" ); assert_eq!( normalized_of_literal( " a " ), " a" ); assert_eq!(normalized_of_literal(""), ""); assert_eq!( normalized_of_literal( " a b c " ), " a b c " ); assert_eq!( normalized_of_literal( " a " ), "a" ); // This is a bit of a weird case, but because it's not a true multiline // (which requires an opening and closing newline), we don't trim the // indentation. Not terrible if this needs to change. The next test shows // how a real multiline string is handled. assert_eq!( normalized_of_literal( " a" ), " a" ); // This test will pass but raise a warning, so we comment it out for the moment. // assert_eq!( // normalized_of_literal( // "a // a" // ), // "a // a" // ); } /// Extracts the module and snapshot name from a snapshot path fn names_of_path(path: &Path) -> (String, String) { // The final part of the snapshot file name is the test name; the // initial parts are the module name let parts: Vec<&str> = path .file_stem() .unwrap() .to_str() .unwrap_or("") .rsplitn(2, "__") .collect(); match parts.as_slice() { [snapshot_name, module_name] => (snapshot_name.to_string(), module_name.to_string()), [snapshot_name] => (snapshot_name.to_string(), String::new()), _ => (String::new(), "".to_string()), } } #[test] fn test_names_of_path() { assert_debug_snapshot!( names_of_path(Path::new("/src/snapshots/insta_tests__tests__name_foo.snap")), @r#" ( "name_foo", "insta_tests__tests", ) "# ); assert_debug_snapshot!( names_of_path(Path::new("/src/snapshots/name_foo.snap")), @r#" ( "name_foo", "", ) "# ); assert_debug_snapshot!( names_of_path(Path::new("foo/src/snapshots/go1.20.5.snap")), @r#" ( "go1.20.5", "", ) "# ); } /// legacy format - retain so old snapshots still work fn legacy_inline_normalize(frozen_value: &str) -> String { if !frozen_value.trim_start().starts_with('⋮') { return frozen_value.to_string(); } let mut buf = String::new(); let mut line_iter = frozen_value.lines(); let mut indentation = 0; for line in &mut line_iter { let line_trimmed = line.trim_start(); if line_trimmed.is_empty() { continue; } indentation = line.len() - line_trimmed.len(); // 3 because '⋮' is three utf-8 bytes long buf.push_str(&line_trimmed[3..]); buf.push('\n'); break; } for line in &mut line_iter { if let Some(prefix) = line.get(..indentation) { if !prefix.trim().is_empty() { return "".to_string(); } } if let Some(remainder) = line.get(indentation..) { if let Some(rest) = remainder.strip_prefix('⋮') { buf.push_str(rest); buf.push('\n'); } else if remainder.trim().is_empty() { continue; } else { return "".to_string(); } } } buf.trim_end().to_string() } #[test] fn test_snapshot_contents_to_inline() { use similar_asserts::assert_eq; let snapshot_contents = TextSnapshotContents::new("testing".to_string(), TextSnapshotKind::Inline); assert_eq!(snapshot_contents.to_inline(""), r#""testing""#); assert_eq!( TextSnapshotContents::new("\na\nb".to_string(), TextSnapshotKind::Inline).to_inline(""), r##"" a b ""## ); assert_eq!( TextSnapshotContents::new("a\nb".to_string(), TextSnapshotKind::Inline).to_inline(" "), r##"" a b ""## ); assert_eq!( TextSnapshotContents::new("\n a\n b".to_string(), TextSnapshotKind::Inline) .to_inline(""), r##"" a b ""## ); assert_eq!( TextSnapshotContents::new("\na\n\nb".to_string(), TextSnapshotKind::Inline) .to_inline(" "), r##"" a b ""## ); assert_eq!( TextSnapshotContents::new( "ab " .to_string(), TextSnapshotKind::Inline ) .to_inline(""), r#""ab""# ); assert_eq!( TextSnapshotContents::new( " ab " .to_string(), TextSnapshotKind::Inline ) .to_inline(""), r##"" ab""## ); assert_eq!( TextSnapshotContents::new("\n ab\n".to_string(), TextSnapshotKind::Inline).to_inline(""), r##"" ab ""## ); assert_eq!( TextSnapshotContents::new("ab".to_string(), TextSnapshotKind::Inline).to_inline(""), r#""ab""# ); // Test control and special characters assert_eq!( TextSnapshotContents::new("a\tb".to_string(), TextSnapshotKind::Inline).to_inline(""), r##""a b""## ); assert_eq!( TextSnapshotContents::new("a\t\nb".to_string(), TextSnapshotKind::Inline).to_inline(""), "\" a\t b \"" ); assert_eq!( TextSnapshotContents::new("a\rb".to_string(), TextSnapshotKind::Inline).to_inline(""), r##""a\rb""## ); assert_eq!( TextSnapshotContents::new("a\0b".to_string(), TextSnapshotKind::Inline).to_inline(""), // Nul byte is printed as `\0` in Rust string literals r##""a\0b""## ); assert_eq!( TextSnapshotContents::new("a\u{FFFD}b".to_string(), TextSnapshotKind::Inline).to_inline(""), // Replacement character is returned as the character in literals r##""a�b""## ); } #[test] fn test_snapshot_contents_hashes() { assert_eq!( TextSnapshotContents::new("a###b".to_string(), TextSnapshotKind::Inline).to_inline(""), r#""a###b""# ); assert_eq!( TextSnapshotContents::new("a\n\\###b".to_string(), TextSnapshotKind::Inline).to_inline(""), r#####"r" a \###b ""##### ); } #[test] fn test_min_indentation() { use similar_asserts::assert_eq; assert_eq!( min_indentation( " 1 2 ", ), " ".to_string() ); assert_eq!( min_indentation( " 1 2" ), " ".to_string() ); assert_eq!( min_indentation( " 1 2 " ), " ".to_string() ); assert_eq!( min_indentation( " 1 2 " ), " ".to_string() ); assert_eq!( min_indentation( " a " ), " ".to_string() ); assert_eq!(min_indentation(""), "".to_string()); assert_eq!( min_indentation( " a b c " ), "".to_string() ); assert_eq!( min_indentation( " a " ), "".to_string() ); assert_eq!( min_indentation( " a" ), " ".to_string() ); assert_eq!( min_indentation( "a a" ), "".to_string() ); assert_eq!( normalize_inline( " 1 2" ), " 1 2" ); assert_eq!( normalize_inline( " 1 2 " ), " 1 2 " ); } #[test] fn test_min_indentation_additional() { use similar_asserts::assert_eq; let t = " 1 2 "; assert_eq!(min_indentation(t), " ".to_string()); let t = " a "; assert_eq!(min_indentation(t), " ".to_string()); let t = ""; assert_eq!(min_indentation(t), "".to_string()); let t = " a b c "; assert_eq!(min_indentation(t), "".to_string()); let t = " a"; assert_eq!(min_indentation(t), "".to_string()); let t = " a"; assert_eq!(min_indentation(t), " ".to_string()); let t = "a a"; assert_eq!(min_indentation(t), "".to_string()); let t = " 1 2 "; assert_eq!(min_indentation(t), " ".to_string()); let t = " 1 2"; assert_eq!(min_indentation(t), " ".to_string()); let t = " 1 2"; assert_eq!(min_indentation(t), " ".to_string()); } #[test] fn test_inline_snapshot_value_newline() { // https://github.com/mitsuhiko/insta/issues/39 assert_eq!(normalize_inline("\n"), ""); } #[test] fn test_parse_yaml_error() { use std::env::temp_dir; let mut temp = temp_dir(); temp.push("bad.yaml"); let mut f = fs::File::create(temp.clone()).unwrap(); let invalid = "--- This is invalid yaml: { { --- "; f.write_all(invalid.as_bytes()).unwrap(); let error = format!("{}", Snapshot::from_file(temp.as_path()).unwrap_err()); assert!(error.contains("Failed parsing the YAML from")); assert!(error.contains("bad.yaml")); } /// Check that snapshots don't take ownership of the value #[test] fn test_ownership() { // Range is non-copy use std::ops::Range; let r = Range { start: 0, end: 10 }; assert_debug_snapshot!(r, @"0..10"); assert_debug_snapshot!(r, @"0..10"); } #[test] fn test_empty_lines() { assert_snapshot!("single line should fit on a single line", @"single line should fit on a single line"); assert_snapshot!("single line should fit on a single line, even if it's really really really really really really really really really long", @"single line should fit on a single line, even if it's really really really really really really really really really long"); assert_snapshot!("multiline content starting on first line final line ", @" multiline content starting on first line final line "); assert_snapshot!(" multiline content starting on second line final line ", @" multiline content starting on second line final line "); } insta-1.46.1/src/snapshots/doctest_runtime_rs__named.snap000064400000000000000000000001561046102023000217500ustar 00000000000000--- source: insta/src/runtime.rs expression: "vec![1, 2, 3, 4, 5]" --- [ 1, 2, 3, 4, 5, ] insta-1.46.1/src/snapshots/insta__test__embedded.snap000064400000000000000000000001201046102023000210040ustar 00000000000000--- source: insta/src/test.rs expression: "\"Just a string\"" --- Just a string insta-1.46.1/src/test.rs000064400000000000000000000001271046102023000131470ustar 00000000000000#[test] fn test_embedded_test() { assert_snapshot!("embedded", "Just a string"); } insta-1.46.1/src/utils.rs000064400000000000000000000070711046102023000133350ustar 00000000000000use std::{ borrow::Cow, env, io::Write, path::Path, process::{Command, Stdio}, }; /// Are we running in in a CI environment? pub fn is_ci() -> bool { match env::var("CI").ok().as_deref() { Some("false") | Some("0") | Some("") => false, None => env::var("TF_BUILD").is_ok(), Some(_) => true, } } #[cfg(feature = "colors")] pub use console::style; #[cfg(not(feature = "colors"))] mod fake_colors { pub struct FakeStyledObject(D); macro_rules! style_attr { ($($name:ident)*) => { $( #[inline] pub fn $name(self) -> FakeStyledObject { self } )* } } impl FakeStyledObject { style_attr!(red green yellow cyan bold dim underlined); } impl std::fmt::Display for FakeStyledObject { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { std::fmt::Display::fmt(&self.0, f) } } pub fn style(val: D) -> FakeStyledObject { FakeStyledObject(val) } } #[cfg(not(feature = "colors"))] pub use self::fake_colors::*; /// Returns the term width that insta should use. pub fn term_width() -> usize { #[cfg(feature = "colors")] { console::Term::stdout().size().1 as usize } #[cfg(not(feature = "colors"))] { 74 } } /// Converts a path into a string that can be persisted. pub fn path_to_storage(path: &Path) -> String { #[cfg(windows)] { path.to_str().unwrap().replace('\\', "/") } #[cfg(not(windows))] { path.to_string_lossy().into() } } /// Tries to format a given rust expression with rustfmt pub fn format_rust_expression(value: &str) -> Cow<'_, str> { const PREFIX: &str = "const x:() = "; const SUFFIX: &str = ";\n"; if let Ok(mut proc) = Command::new("rustfmt") .arg("--emit=stdout") .arg("--edition=2018") .stdin(Stdio::piped()) .stdout(Stdio::piped()) .stderr(Stdio::null()) .spawn() { { let stdin = proc.stdin.as_mut().unwrap(); stdin.write_all(PREFIX.as_bytes()).unwrap(); stdin.write_all(value.as_bytes()).unwrap(); stdin.write_all(SUFFIX.as_bytes()).unwrap(); } if let Ok(output) = proc.wait_with_output() { if output.status.success() { // slice between after the prefix and before the suffix // (currently 14 from the start and 2 before the end, respectively) let start = PREFIX.len() + 1; let end = output.stdout.len() - SUFFIX.len(); return std::str::from_utf8(&output.stdout[start..end]) .unwrap() .replace("\r\n", "\n") .into(); } } } Cow::Borrowed(value) } #[cfg(feature = "_cargo_insta_internal")] pub fn get_cargo() -> std::ffi::OsString { let cargo = env::var_os("CARGO"); let cargo = cargo .as_deref() .unwrap_or_else(|| std::ffi::OsStr::new("cargo")); cargo.to_os_string() } #[test] fn test_format_rust_expression() { assert_snapshot!(format_rust_expression("vec![1,2,3]"), @"vec![1, 2, 3]"); assert_snapshot!(format_rust_expression("vec![1,2,3].iter()"), @"vec![1, 2, 3].iter()"); assert_snapshot!(format_rust_expression(r#" "aoeu""#), @r#""aoeu""#); assert_snapshot!(format_rust_expression(r#" "aoe😄""#), @r#""aoe😄""#); assert_snapshot!(format_rust_expression("😄😄😄😄😄"), @"😄😄😄😄😄") } insta-1.46.1/tests/glob_submodule/mod.rs000064400000000000000000000027151046102023000163310ustar 00000000000000#![cfg(feature = "glob")] #[test] fn test_basic_globbing_parent_dir() { insta::glob!("../inputs", "*.txt", |path| { let contents = std::fs::read_to_string(path).unwrap(); insta::assert_json_snapshot!(&contents); }); } #[test] fn test_basic_globbing_nested_parent_dir_base_path() { insta::glob!("../inputs-nested", "*/*.txt", |path| { let contents = std::fs::read_to_string(path).unwrap(); insta::assert_snapshot!(&contents); }); } #[test] fn test_basic_globbing_nested_parent_glob() { insta::glob!("..", "inputs-nested/*/*.txt", |path| { let contents = std::fs::read_to_string(path).unwrap(); insta::assert_snapshot!(&contents); }); } #[test] fn test_globs_follow_links_parent_dir_base_path() { insta::glob!("../link-to-inputs", "*.txt", |path| { let contents = std::fs::read_to_string(path).unwrap(); insta::assert_json_snapshot!(&contents); }); } #[test] fn test_globs_follow_links_parent_dir_glob() { insta::glob!("..", "link-to-inputs/*.txt", |path| { let contents = std::fs::read_to_string(path).unwrap(); insta::assert_json_snapshot!(&contents); }); } #[test] fn test_basic_globbing_absolute_dir() { insta::glob!( concat!(env!("CARGO_MANIFEST_DIR"), "/tests/inputs"), "*.txt", |path| { let contents = std::fs::read_to_string(path).unwrap(); insta::assert_json_snapshot!(&contents); } ); } ././@LongLink00006440000000000000000000000164000000000000007774Lustar insta-1.46.1/tests/glob_submodule/snapshots/test_glob__glob_submodule__basic_globbing_absolute_dir@goodbye.txt.snapinsta-1.46.1/tests/glob_submodule/snapshots/test_glob__glob_submodule__basic_globbing_absolute_dir@g000064400000000000000000000001771046102023000325220ustar 00000000000000--- source: tests/glob_submodule/mod.rs expression: "&contents" input_file: tests/inputs/goodbye.txt --- "Contents of goodbye" ././@LongLink00006440000000000000000000000162000000000000007772Lustar insta-1.46.1/tests/glob_submodule/snapshots/test_glob__glob_submodule__basic_globbing_absolute_dir@hello.txt.snapinsta-1.46.1/tests/glob_submodule/snapshots/test_glob__glob_submodule__basic_globbing_absolute_dir@h000064400000000000000000000001731046102023000325170ustar 00000000000000--- source: tests/glob_submodule/mod.rs expression: "&contents" input_file: tests/inputs/hello.txt --- "Contents of hello" ././@LongLink00006440000000000000000000000203000000000000007766Lustar insta-1.46.1/tests/glob_submodule/snapshots/test_glob__glob_submodule__basic_globbing_nested_parent_dir_base_path@a__file.txt.snapinsta-1.46.1/tests/glob_submodule/snapshots/test_glob__glob_submodule__basic_globbing_nested_parent_000064400000000000000000000001701046102023000325620ustar 00000000000000--- source: tests/glob_submodule/mod.rs expression: "&contents" input_file: tests/inputs-nested/a/file.txt --- Hello A ././@LongLink00006440000000000000000000000203000000000000007766Lustar insta-1.46.1/tests/glob_submodule/snapshots/test_glob__glob_submodule__basic_globbing_nested_parent_dir_base_path@b__file.txt.snapinsta-1.46.1/tests/glob_submodule/snapshots/test_glob__glob_submodule__basic_globbing_nested_parent_000064400000000000000000000001701046102023000325620ustar 00000000000000--- source: tests/glob_submodule/mod.rs expression: "&contents" input_file: tests/inputs-nested/b/file.txt --- Hello B ././@LongLink00006440000000000000000000000172000000000000007773Lustar insta-1.46.1/tests/glob_submodule/snapshots/test_glob__glob_submodule__basic_globbing_nested_parent_glob@a__file.txt.snapinsta-1.46.1/tests/glob_submodule/snapshots/test_glob__glob_submodule__basic_globbing_nested_parent_000064400000000000000000000001701046102023000325620ustar 00000000000000--- source: tests/glob_submodule/mod.rs expression: "&contents" input_file: tests/inputs-nested/a/file.txt --- Hello A ././@LongLink00006440000000000000000000000172000000000000007773Lustar insta-1.46.1/tests/glob_submodule/snapshots/test_glob__glob_submodule__basic_globbing_nested_parent_glob@b__file.txt.snapinsta-1.46.1/tests/glob_submodule/snapshots/test_glob__glob_submodule__basic_globbing_nested_parent_000064400000000000000000000001701046102023000325620ustar 00000000000000--- source: tests/glob_submodule/mod.rs expression: "&contents" input_file: tests/inputs-nested/b/file.txt --- Hello B ././@LongLink00006440000000000000000000000162000000000000007772Lustar insta-1.46.1/tests/glob_submodule/snapshots/test_glob__glob_submodule__basic_globbing_parent_dir@goodbye.txt.snapinsta-1.46.1/tests/glob_submodule/snapshots/test_glob__glob_submodule__basic_globbing_parent_dir@goo000064400000000000000000000001771046102023000325330ustar 00000000000000--- source: tests/glob_submodule/mod.rs expression: "&contents" input_file: tests/inputs/goodbye.txt --- "Contents of goodbye" ././@LongLink00006440000000000000000000000160000000000000007770Lustar insta-1.46.1/tests/glob_submodule/snapshots/test_glob__glob_submodule__basic_globbing_parent_dir@hello.txt.snapinsta-1.46.1/tests/glob_submodule/snapshots/test_glob__glob_submodule__basic_globbing_parent_dir@hel000064400000000000000000000001731046102023000325130ustar 00000000000000--- source: tests/glob_submodule/mod.rs expression: "&contents" input_file: tests/inputs/hello.txt --- "Contents of hello" ././@LongLink00006440000000000000000000000200000000000000007763Lustar insta-1.46.1/tests/glob_submodule/snapshots/test_glob__glob_submodule__globs_follow_links_parent_dir_base_path@goodbye.txt.snapinsta-1.46.1/tests/glob_submodule/snapshots/test_glob__glob_submodule__globs_follow_links_parent_dir000064400000000000000000000001771046102023000326720ustar 00000000000000--- source: tests/glob_submodule/mod.rs expression: "&contents" input_file: tests/inputs/goodbye.txt --- "Contents of goodbye" ././@LongLink00006440000000000000000000000176000000000000007777Lustar insta-1.46.1/tests/glob_submodule/snapshots/test_glob__glob_submodule__globs_follow_links_parent_dir_base_path@hello.txt.snapinsta-1.46.1/tests/glob_submodule/snapshots/test_glob__glob_submodule__globs_follow_links_parent_dir000064400000000000000000000001731046102023000326660ustar 00000000000000--- source: tests/glob_submodule/mod.rs expression: "&contents" input_file: tests/inputs/hello.txt --- "Contents of hello" ././@LongLink00006440000000000000000000000173000000000000007774Lustar insta-1.46.1/tests/glob_submodule/snapshots/test_glob__glob_submodule__globs_follow_links_parent_dir_glob@goodbye.txt.snapinsta-1.46.1/tests/glob_submodule/snapshots/test_glob__glob_submodule__globs_follow_links_parent_dir000064400000000000000000000001771046102023000326720ustar 00000000000000--- source: tests/glob_submodule/mod.rs expression: "&contents" input_file: tests/inputs/goodbye.txt --- "Contents of goodbye" ././@LongLink00006440000000000000000000000171000000000000007772Lustar insta-1.46.1/tests/glob_submodule/snapshots/test_glob__glob_submodule__globs_follow_links_parent_dir_glob@hello.txt.snapinsta-1.46.1/tests/glob_submodule/snapshots/test_glob__glob_submodule__globs_follow_links_parent_dir000064400000000000000000000001731046102023000326660ustar 00000000000000--- source: tests/glob_submodule/mod.rs expression: "&contents" input_file: tests/inputs/hello.txt --- "Contents of hello" insta-1.46.1/tests/inputs/goodbye.txt000064400000000000000000000000231046102023000157030ustar 00000000000000Contents of goodbyeinsta-1.46.1/tests/inputs/hello.txt000064400000000000000000000000211046102023000153540ustar 00000000000000Contents of helloinsta-1.46.1/tests/inputs-nested/a/file.txt000064400000000000000000000000101046102023000166660ustar 00000000000000Hello A insta-1.46.1/tests/inputs-nested/b/file.txt000064400000000000000000000000101046102023000166670ustar 00000000000000Hello B insta-1.46.1/tests/link-to-inputs/goodbye.txt000064400000000000000000000000231046102023000172560ustar 00000000000000Contents of goodbyeinsta-1.46.1/tests/link-to-inputs/hello.txt000064400000000000000000000000211046102023000167270ustar 00000000000000Contents of helloinsta-1.46.1/tests/snapshots/snapshot_no_module_prepending.snap000064400000000000000000000001421046102023000232120ustar 00000000000000--- source: insta/tests/test_settings.rs expression: "vec![1, 2, 3]" --- [ 1, 2, 3, ] insta-1.46.1/tests/snapshots/test_advanced__basic_suffixes@1.snap000064400000000000000000000001041046102023000232760ustar 00000000000000--- source: insta/tests/test_advanced.rs expression: "&value" --- 1 insta-1.46.1/tests/snapshots/test_advanced__basic_suffixes@2.snap000064400000000000000000000001041046102023000232770ustar 00000000000000--- source: insta/tests/test_advanced.rs expression: "&value" --- 2 insta-1.46.1/tests/snapshots/test_advanced__basic_suffixes@3.snap000064400000000000000000000001041046102023000233000ustar 00000000000000--- source: insta/tests/test_advanced.rs expression: "&value" --- 3 insta-1.46.1/tests/snapshots/test_basic__Testing.snap000064400000000000000000000001001046102023000210450ustar 00000000000000--- source: insta/tests/test_basic.rs expression: expr --- name insta-1.46.1/tests/snapshots/test_basic__crlf.snap000064400000000000000000000001361046102023000203670ustar 00000000000000--- source: insta/tests/test_basic.rs expression: "\"foo\\r\\nbar\\r\\nbaz\"" --- foo bar baz insta-1.46.1/tests/snapshots/test_basic__debug_vector.snap000064400000000000000000000001371046102023000221120ustar 00000000000000--- source: insta/tests/test_basic.rs expression: "vec![1, 2, 3]" --- [ 1, 2, 3, ] insta-1.46.1/tests/snapshots/test_basic__display.snap000064400000000000000000000001141046102023000211020ustar 00000000000000--- source: insta/tests/test_basic.rs expression: td --- TestDisplay struct insta-1.46.1/tests/snapshots/test_basic__insta_sort_order.snap000064400000000000000000000002101046102023000230120ustar 00000000000000--- source: insta/tests/test_basic.rs expression: m --- ? - 1 - 3 : 4 ? - 1 - 4 : 4 ? - 2 - 3 : 4 ? - 3 - 3 : 4 ? - 9 - 3 : 4 insta-1.46.1/tests/snapshots/test_basic__json_vector.snap000064400000000000000000000001301046102023000217660ustar 00000000000000--- source: insta/tests/test_basic.rs expression: "vec![1, 2, 3]" --- [ 1, 2, 3 ] insta-1.46.1/tests/snapshots/test_basic__nested__nested_module.snap000064400000000000000000000001061046102023000237660ustar 00000000000000--- source: insta/tests/test_basic.rs expression: "\"aoeu\"" --- aoeu insta-1.46.1/tests/snapshots/test_basic__trailing_commas-2.snap000064400000000000000000000001401046102023000227430ustar 00000000000000--- source: insta/tests/test_basic.rs expression: "vec![1, 2, 3, 4, 5]" --- - 1 - 2 - 3 - 4 - 5 insta-1.46.1/tests/snapshots/test_basic__trailing_commas.snap000064400000000000000000000001141046102023000226050ustar 00000000000000--- source: insta/tests/test_basic.rs expression: "\"Testing\"" --- Testing insta-1.46.1/tests/snapshots/test_basic__trailing_crlf.snap000064400000000000000000000001441046102023000222570ustar 00000000000000--- source: insta/tests/test_basic.rs expression: "\"foo\\r\\nbar\\r\\nbaz\\r\\n\"" --- foo bar baz insta-1.46.1/tests/snapshots/test_basic__unnamed_debug_vector-2.snap000064400000000000000000000001511046102023000237540ustar 00000000000000--- source: insta/tests/test_basic.rs expression: "vec![1, 2, 3, 4]" --- [ 1, 2, 3, 4, ] insta-1.46.1/tests/snapshots/test_basic__unnamed_debug_vector-3.snap000064400000000000000000000001631046102023000237600ustar 00000000000000--- source: insta/tests/test_basic.rs expression: "vec![1, 2, 3, 4, 5]" --- [ 1, 2, 3, 4, 5, ] insta-1.46.1/tests/snapshots/test_basic__unnamed_debug_vector.snap000064400000000000000000000001371046102023000236210ustar 00000000000000--- source: insta/tests/test_basic.rs expression: "vec![1, 2, 3]" --- [ 1, 2, 3, ] insta-1.46.1/tests/snapshots/test_basic__unnamed_display-2.snap000064400000000000000000000001161046102023000227520ustar 00000000000000--- source: insta/tests/test_basic.rs expression: "\"whatever\"" --- whatever insta-1.46.1/tests/snapshots/test_basic__unnamed_display.snap000064400000000000000000000001141046102023000226110ustar 00000000000000--- source: insta/tests/test_basic.rs expression: td --- TestDisplay struct insta-1.46.1/tests/snapshots/test_basic__unnamed_json_vector-2.snap000064400000000000000000000001401046102023000236350ustar 00000000000000--- source: insta/tests/test_basic.rs expression: "vec![1, 2, 3, 4]" --- [ 1, 2, 3, 4 ] insta-1.46.1/tests/snapshots/test_basic__unnamed_json_vector-3.snap000064400000000000000000000001501046102023000236370ustar 00000000000000--- source: insta/tests/test_basic.rs expression: "vec![1, 2, 3, 4, 5]" --- [ 1, 2, 3, 4, 5 ] insta-1.46.1/tests/snapshots/test_basic__unnamed_json_vector.snap000064400000000000000000000001301046102023000234750ustar 00000000000000--- source: insta/tests/test_basic.rs expression: "vec![1, 2, 3]" --- [ 1, 2, 3 ] insta-1.46.1/tests/snapshots/test_basic__unnamed_nested_closure.snap000064400000000000000000000001371046102023000241670ustar 00000000000000--- source: insta/tests/test_basic.rs expression: "vec![1, 2, 3]" --- [ 1, 2, 3, ] insta-1.46.1/tests/snapshots/test_basic__unnamed_yaml_vector-2.snap000064400000000000000000000001311046102023000236260ustar 00000000000000--- source: insta/tests/test_basic.rs expression: "vec![1, 2, 3, 4]" --- - 1 - 2 - 3 - 4 insta-1.46.1/tests/snapshots/test_basic__unnamed_yaml_vector-3.snap000064400000000000000000000001401046102023000236270ustar 00000000000000--- source: insta/tests/test_basic.rs expression: "vec![1, 2, 3, 4, 5]" --- - 1 - 2 - 3 - 4 - 5 insta-1.46.1/tests/snapshots/test_basic__unnamed_yaml_vector.snap000064400000000000000000000001221046102023000234670ustar 00000000000000--- source: insta/tests/test_basic.rs expression: "vec![1, 2, 3]" --- - 1 - 2 - 3 insta-1.46.1/tests/snapshots/test_basic__yaml_vector.snap000064400000000000000000000001221046102023000217600ustar 00000000000000--- source: insta/tests/test_basic.rs expression: "vec![1, 2, 3]" --- - 1 - 2 - 3 insta-1.46.1/tests/snapshots/test_binary__binary_snapshot.snap000064400000000000000000000001611046102023000230450ustar 00000000000000--- source: insta/tests/test_binary.rs expression: "b\"test\".to_vec()" extension: txt snapshot_kind: binary --- insta-1.46.1/tests/snapshots/test_binary__binary_snapshot.snap.txt000064400000000000000000000000041046102023000236570ustar 00000000000000testinsta-1.46.1/tests/snapshots/test_binary__multipart_extension.snap000064400000000000000000000001641046102023000237620ustar 00000000000000--- source: insta/tests/test_binary.rs expression: "b\"test\".to_vec()" extension: tar.gz snapshot_kind: binary --- insta-1.46.1/tests/snapshots/test_binary__multipart_extension.snap.tar.gz000064400000000000000000000000041046102023000251570ustar 00000000000000testinsta-1.46.1/tests/snapshots/test_binary__name.snap000064400000000000000000000001621046102023000205630ustar 00000000000000--- source: insta/tests/test_binary.rs expression: "b\"null\".to_vec()" extension: json snapshot_kind: binary --- insta-1.46.1/tests/snapshots/test_binary__name.snap.json000064400000000000000000000000041046102023000215260ustar 00000000000000nullinsta-1.46.1/tests/snapshots/test_glob__basic_globbing@goodbye.txt.snap000064400000000000000000000001661046102023000245210ustar 00000000000000--- source: tests/test_glob.rs expression: "&contents" input_file: tests/inputs/goodbye.txt --- "Contents of goodbye" insta-1.46.1/tests/snapshots/test_glob__basic_globbing@hello.txt.snap000064400000000000000000000001621046102023000241700ustar 00000000000000--- source: tests/test_glob.rs expression: "&contents" input_file: tests/inputs/hello.txt --- "Contents of hello" insta-1.46.1/tests/snapshots/test_glob__basic_globbing_nested@a__file.txt.snap000064400000000000000000000001571046102023000260110ustar 00000000000000--- source: tests/test_glob.rs expression: "&contents" input_file: tests/inputs-nested/a/file.txt --- Hello A insta-1.46.1/tests/snapshots/test_glob__basic_globbing_nested@b__file.txt.snap000064400000000000000000000001571046102023000260120ustar 00000000000000--- source: tests/test_glob.rs expression: "&contents" input_file: tests/inputs-nested/b/file.txt --- Hello B insta-1.46.1/tests/snapshots/test_glob__globs_follow_links@goodbye.txt.snap000064400000000000000000000001661046102023000254650ustar 00000000000000--- source: tests/test_glob.rs expression: "&contents" input_file: tests/inputs/goodbye.txt --- "Contents of goodbye" insta-1.46.1/tests/snapshots/test_glob__globs_follow_links@hello.txt.snap000064400000000000000000000001621046102023000251340ustar 00000000000000--- source: tests/test_glob.rs expression: "&contents" input_file: tests/inputs/hello.txt --- "Contents of hello" insta-1.46.1/tests/snapshots/test_inline__unnamed_thread_single_line-2.snap000064400000000000000000000001371046102023000253240ustar 00000000000000--- source: insta/tests/test_inline.rs expression: "\"Testing-thread-2\"" --- Testing-thread-2 insta-1.46.1/tests/snapshots/test_inline__unnamed_thread_single_line.snap000064400000000000000000000001331046102023000251610ustar 00000000000000--- source: insta/tests/test_inline.rs expression: "\"Testing-thread\"" --- Testing-thread insta-1.46.1/tests/snapshots/test_redaction__foo_bar.snap000064400000000000000000000004151046102023000217370ustar 00000000000000--- source: insta/tests/test_redaction.rs expression: "Selector :: parse(\".foo.bar\").unwrap()" --- Selector { selectors: [ [ Key( "foo", ), Key( "bar", ), ], ], } insta-1.46.1/tests/snapshots/test_redaction__foo_bar_alt.snap000064400000000000000000000004261046102023000226010ustar 00000000000000--- source: insta/tests/test_redaction.rs expression: "Selector :: parse(\".foo[\\\"bar\\\"]\").unwrap()" --- Selector { selectors: [ [ Key( "foo", ), Key( "bar", ), ], ], } insta-1.46.1/tests/snapshots/test_redaction__foo_bar_deep.snap000064400000000000000000000004521046102023000227350ustar 00000000000000--- source: insta/tests/test_redaction.rs expression: "Selector :: parse(\".foo.bar.**\").unwrap()" --- Selector { selectors: [ [ Key( "foo", ), Key( "bar", ), DeepWildcard, ], ], } insta-1.46.1/tests/snapshots/test_redaction__foo_bar_full_range.snap000064400000000000000000000005351046102023000241400ustar 00000000000000--- source: insta/tests/test_redaction.rs expression: "Selector :: parse(\".foo.bar[]\").unwrap()" --- Selector { selectors: [ [ Key( "foo", ), Key( "bar", ), Range( None, None, ), ], ], } insta-1.46.1/tests/snapshots/test_redaction__foo_bar_range.snap000064400000000000000000000006701046102023000231160ustar 00000000000000--- source: insta/tests/test_redaction.rs expression: "Selector :: parse(\".foo.bar[10:20]\").unwrap()" --- Selector { selectors: [ [ Key( "foo", ), Key( "bar", ), Range( Some( 10, ), Some( 20, ), ), ], ], } insta-1.46.1/tests/snapshots/test_redaction__foo_bar_range_from.snap000064400000000000000000000006131046102023000241360ustar 00000000000000--- source: insta/tests/test_redaction.rs expression: "Selector :: parse(\".foo.bar[10:]\").unwrap()" --- Selector { selectors: [ [ Key( "foo", ), Key( "bar", ), Range( Some( 10, ), None, ), ], ], } insta-1.46.1/tests/snapshots/test_redaction__foo_bar_range_to.snap000064400000000000000000000006131046102023000236150ustar 00000000000000--- source: insta/tests/test_redaction.rs expression: "Selector :: parse(\".foo.bar[:10]\").unwrap()" --- Selector { selectors: [ [ Key( "foo", ), Key( "bar", ), Range( None, Some( 10, ), ), ], ], } insta-1.46.1/tests/snapshots/test_redaction__map_key_redaction.snap000064400000000000000000000002061046102023000240030ustar 00000000000000--- source: insta/tests/test_redaction.rs expression: foo_value --- hm: ? bucket: "[bucket]" value: 0 : 42 btm: "[key]": 23 insta-1.46.1/tests/snapshots/test_redaction__metadata_raw_info_no_redaction.snap000064400000000000000000000002111046102023000265120ustar 00000000000000--- source: insta/tests/test_redaction.rs expression: "&vec![1, 2, 3]" info: secret: sensitive_value public: visible --- - 1 - 2 - 3 insta-1.46.1/tests/snapshots/test_redaction__metadata_redaction_test.snap000064400000000000000000000002061046102023000251750ustar 00000000000000--- source: insta/tests/test_redaction.rs expression: "&vec![1, 2, 3]" info: secret: "[REDACTED]" public: visible --- - 1 - 2 - 3 insta-1.46.1/tests/snapshots/test_redaction__named_redacted_debug_expr.snap000064400000000000000000000002761046102023000254600ustar 00000000000000--- source: insta/tests/test_redaction.rs expression: This is a custom debug expression for the snapshot --- id: "[id]" items: - one - two - three metadata: count: 42 version: 123 insta-1.46.1/tests/snapshots/test_redaction__named_redacted_supported.snap000064400000000000000000000002221046102023000253500ustar 00000000000000--- source: insta/tests/test_redaction.rs expression: "&obj" --- id: "[id]" items: - one - two - three metadata: count: 42 version: 123 insta-1.46.1/tests/snapshots/test_redaction__rounded_redaction.snap000064400000000000000000000002031046102023000240130ustar 00000000000000--- source: insta/tests/test_redaction.rs expression: "&MyPoint { x: 1.0 / 3.0, y: 6.0 / 3.0, }" --- { "x": 0.3333, "y": 2.0 } insta-1.46.1/tests/snapshots/test_redaction__struct_array_redaction.snap000064400000000000000000000003541046102023000251040ustar 00000000000000--- source: insta/tests/test_redaction.rs expression: "vec![checkout]" --- - _id: "[checkout_id]" products: - _id: "[product_id]" product_name: "[product_name]" - _id: "[product_id]" product_name: "[product_name]" insta-1.46.1/tests/snapshots/test_redaction__user_csv.snap000064400000000000000000000004361046102023000221640ustar 00000000000000--- source: tests/test_redaction.rs expression: "&User {\n id: 44,\n username: \"julius_csv\".to_string(),\n email: Email(\"julius@example.com\".to_string()),\n extra: \"\".to_string(),\n }" --- id,username,email,extra [id],julius_csv,julius@example.com, insta-1.46.1/tests/snapshots/test_redaction__user_json.snap000064400000000000000000000005011046102023000223330ustar 00000000000000--- source: insta/tests/test_redaction.rs expression: "&User\n{\n id: 9999, username: \"jason_doe\".to_string(), email:\n Email(\"jason@example.com\".to_string()), extra:\n \"ssn goes here\".to_string(),\n}" --- { "id": "[id]", "username": "jason_doe", "email": "jason@example.com", "extra": "[extra]" } insta-1.46.1/tests/snapshots/test_redaction__user_json_flags.snap000064400000000000000000000005351046102023000235160ustar 00000000000000--- source: insta/tests/test_redaction.rs expression: "&User\n{\n id: 122, username: \"jason_doe\".to_string(), flags:\n vec![\"zzz\".into(), \"foo\".into(), \"aha\".into(),\n \"is_admin\".into()].into_iter().collect(),\n}" --- { "id": "[id]", "username": "jason_doe", "flags": [ "aha", "foo", "is_admin", "zzz" ] } insta-1.46.1/tests/snapshots/test_redaction__user_json_flags_alt.snap000064400000000000000000000005411046102023000243530ustar 00000000000000--- source: insta/tests/test_redaction.rs expression: "&User\n{\n id: 122, username: \"jason_doe\".to_string(), flags:\n MySet(vec![\"zzz\".into(), \"foo\".into(), \"aha\".into(),\n \"is_admin\".into()].into_iter().collect()),\n}" --- { "flags": [ "aha", "foo", "is_admin", "zzz" ], "id": 122, "username": "jason_doe" } insta-1.46.1/tests/snapshots/test_redaction__user_json_settings.snap000064400000000000000000000005001046102023000242520ustar 00000000000000--- source: insta/tests/test_redaction.rs expression: "&User\n{\n id: 122, username: \"jason_doe\".to_string(), email:\n Email(\"jason@example.com\".to_string()), extra:\n \"ssn goes here\".to_string(),\n}" --- { "id": "[id]", "username": "jason_doe", "email": "jason@example.com", "extra": "[extra]" } insta-1.46.1/tests/snapshots/test_redaction__user_json_settings_callback.snap000064400000000000000000000004741046102023000261000ustar 00000000000000--- source: insta/tests/test_redaction.rs expression: "&User\n{\n id: 1234, username: \"jason_doe\".to_string(), email:\n Email(\"jason@example.com\".to_string()), extra: \"extra here\".to_string(),\n}" --- { "id": "[id]", "username": "jason_doe", "email": "jason@example.com", "extra": "extra here" } insta-1.46.1/tests/snapshots/test_redaction__user_ron.snap000064400000000000000000000004751046102023000221720ustar 00000000000000--- source: tests/test_redaction.rs expression: "&User {\n id: 53,\n username: \"john_ron\".to_string(),\n email: Email(\"john@example.com\".to_string()),\n extra: \"\".to_string(),\n }" --- User( id: "[id]", username: "john_ron", email: Email("john@example.com"), extra: "", ) insta-1.46.1/tests/snapshots/test_redaction__user_toml.snap000064400000000000000000000004461046102023000223450ustar 00000000000000--- source: tests/test_redaction.rs expression: "&User {\n id: 53,\n username: \"john_ron\".to_string(),\n email: Email(\"john@example.com\".to_string()),\n extra: \"\".to_string(),\n }" --- id = '[id]' username = 'john_ron' email = 'john@example.com' extra = '' insta-1.46.1/tests/snapshots/test_redaction__with_random_value.snap000064400000000000000000000004071046102023000240400ustar 00000000000000--- source: insta/tests/test_redaction.rs expression: "&User\n{\n id: 42, username: \"john_doe\".to_string(), email:\n Email(\"john@example.com\".to_string()), extra: \"\".to_string(),\n}" --- id: "[id]" username: john_doe email: john@example.com extra: "" insta-1.46.1/tests/snapshots/test_redaction__with_random_value_and_match_comma-2.snap000064400000000000000000000004071046102023000273510ustar 00000000000000--- source: insta/tests/test_redaction.rs expression: "&User\n{\n id: 11, username: \"john_doe\".to_string(), email:\n Email(\"john@example.com\".to_string()), extra: \"\".to_string(),\n}" --- id: "[id]" username: john_doe email: john@example.com extra: "" insta-1.46.1/tests/snapshots/test_redaction__with_random_value_and_match_comma.snap000064400000000000000000000004071046102023000272120ustar 00000000000000--- source: insta/tests/test_redaction.rs expression: "&User\n{\n id: 11, username: \"john_doe\".to_string(), email:\n Email(\"john@example.com\".to_string()), extra: \"\".to_string(),\n}" --- id: "[id]" username: john_doe email: john@example.com extra: "" insta-1.46.1/tests/snapshots/test_redaction__with_random_value_and_trailing_comma.snap000064400000000000000000000004071046102023000277270ustar 00000000000000--- source: insta/tests/test_redaction.rs expression: "&User\n{\n id: 11, username: \"john_doe\".to_string(), email:\n Email(\"john@example.com\".to_string()), extra: \"\".to_string(),\n}" --- id: "[id]" username: john_doe email: john@example.com extra: "" insta-1.46.1/tests/snapshots/test_redaction__with_random_value_csv_match.snap000064400000000000000000000004361046102023000260710ustar 00000000000000--- source: tests/test_redaction.rs expression: "&User {\n id: 44,\n username: \"julius_csv\".to_string(),\n email: Email(\"julius@example.com\".to_string()),\n extra: \"\".to_string(),\n }" --- id,username,email,extra [id],julius_csv,julius@example.com, insta-1.46.1/tests/snapshots/test_redaction__with_random_value_inline_callback.snap000064400000000000000000000004071046102023000272120ustar 00000000000000--- source: insta/tests/test_redaction.rs expression: "&User\n{\n id: 23, username: \"john_doe\".to_string(), email:\n Email(\"john@example.com\".to_string()), extra: \"\".to_string(),\n}" --- id: "[id]" username: john_doe email: john@example.com extra: "" insta-1.46.1/tests/snapshots/test_redaction__with_random_value_json_match.snap000064400000000000000000000005011046102023000262400ustar 00000000000000--- source: insta/tests/test_redaction.rs expression: "&User\n{\n id: 9999, username: \"jason_doe\".to_string(), email:\n Email(\"jason@example.com\".to_string()), extra:\n \"ssn goes here\".to_string(),\n}" --- { "id": "[id]", "username": "jason_doe", "email": "jason@example.com", "extra": "[extra]" } insta-1.46.1/tests/snapshots/test_redaction__with_random_value_json_settings2.snap000064400000000000000000000005001046102023000270650ustar 00000000000000--- source: insta/tests/test_redaction.rs expression: "&User\n{\n id: 975, username: \"jason_doe\".to_string(), email:\n Email(\"jason@example.com\".to_string()), extra:\n \"ssn goes here\".to_string(),\n}" --- { "id": "[id]", "username": "jason_doe", "email": "jason@example.com", "extra": "[extra]" } insta-1.46.1/tests/snapshots/test_redaction__with_random_value_ron_match.snap000064400000000000000000000004751046102023000260770ustar 00000000000000--- source: tests/test_redaction.rs expression: "&User {\n id: 53,\n username: \"john_ron\".to_string(),\n email: Email(\"john@example.com\".to_string()),\n extra: \"\".to_string(),\n }" --- User( id: "[id]", username: "john_ron", email: Email("john@example.com"), extra: "", ) insta-1.46.1/tests/snapshots/test_redaction__with_random_value_toml_match.snap000064400000000000000000000004461046102023000262520ustar 00000000000000--- source: tests/test_redaction.rs expression: "&User {\n id: 53,\n username: \"john_ron\".to_string(),\n email: Email(\"john@example.com\".to_string()),\n extra: \"\".to_string(),\n }" --- id = '[id]' username = 'john_ron' email = 'john@example.com' extra = '' insta-1.46.1/tests/snapshots/test_settings__snapshot_with_description.snap000064400000000000000000000002161046102023000255140ustar 00000000000000--- source: insta/tests/test_settings.rs description: The snapshot is three integers expression: "vec![1, 2, 3]" --- [ 1, 2, 3, ] insta-1.46.1/tests/snapshots/test_settings__snapshot_with_description_and_info.snap000064400000000000000000000003431046102023000273520ustar 00000000000000--- source: insta/tests/test_settings.rs description: The snapshot is four integers expression: "vec![1, 2, 3, 4]" info: env: ENVIRONMENT: production cmdline: - my-tool - run --- [ 1, 2, 3, 4, ] insta-1.46.1/tests/snapshots/test_settings__snapshot_with_description_and_raw_info.snap000064400000000000000000000003521046102023000302230ustar 00000000000000--- source: insta/tests/test_settings.rs description: The snapshot is four integers expression: "vec![1, 2, 3, 4]" info: env: - ENVIRONMENT - production cmdline: - my-tool - run --- [ 1, 2, 3, 4, ] insta-1.46.1/tests/snapshots2/test_settings__snapshot_path.snap000064400000000000000000000001421046102023000231520ustar 00000000000000--- source: insta/tests/test_settings.rs expression: "vec![1, 2, 3]" --- [ 1, 2, 3, ] insta-1.46.1/tests/test_advanced.rs000064400000000000000000000021231046102023000153450ustar 00000000000000use insta::{allow_duplicates, assert_debug_snapshot}; #[cfg(feature = "filters")] #[test] fn test_basic_filter() { use insta::{assert_snapshot, with_settings}; with_settings!({filters => vec![ (r"\b[[:xdigit:]]{8}\b", "[SHORT_HEX]") ]}, { assert_snapshot!("Hello DEADBEEF!", @"Hello [SHORT_HEX]!"); }) } #[cfg(feature = "json")] #[test] fn test_basic_suffixes() { for value in [1, 2, 3] { insta::with_settings!({snapshot_suffix => value.to_string()}, { insta::assert_json_snapshot!(&value); }); } } #[test] fn test_basic_duplicates_passes() { allow_duplicates! { for x in (0..10).step_by(2) { let is_even = x % 2 == 0; assert_debug_snapshot!(is_even, @"true"); } } } #[test] #[should_panic = "snapshot assertion for 'basic_duplicates_assertion_failed' failed in line"] fn test_basic_duplicates_assertion_failed() { allow_duplicates! { for x in (0..10).step_by(3) { let is_even = x % 2 == 0; assert_debug_snapshot!(is_even, @"true"); } } } insta-1.46.1/tests/test_basic.rs000064400000000000000000000056121046102023000146670ustar 00000000000000#[cfg(feature = "json")] use insta::assert_json_snapshot; #[cfg(feature = "yaml")] use insta::assert_yaml_snapshot; #[allow(deprecated)] use insta::{assert_debug_snapshot, assert_display_snapshot, assert_snapshot}; use std::fmt; #[test] fn test_debug_vector() { assert_debug_snapshot!("debug_vector", vec![1, 2, 3]); } #[test] fn test_unnamed_debug_vector() { assert_debug_snapshot!(vec![1, 2, 3]); assert_debug_snapshot!(vec![1, 2, 3, 4]); assert_debug_snapshot!(vec![1, 2, 3, 4, 5]); } #[test] fn test_unnamed_nested_closure() { #![allow(clippy::redundant_closure_call)] (|| { (|| { assert_debug_snapshot!(vec![1, 2, 3]); })(); })(); } #[cfg(feature = "yaml")] #[test] fn test_yaml_vector() { assert_yaml_snapshot!("yaml_vector", vec![1, 2, 3]); } #[cfg(feature = "yaml")] #[test] fn test_unnamed_yaml_vector() { assert_yaml_snapshot!(vec![1, 2, 3]); assert_yaml_snapshot!(vec![1, 2, 3, 4]); assert_yaml_snapshot!(vec![1, 2, 3, 4, 5]); } #[cfg(feature = "json")] #[test] fn test_json_vector() { assert_json_snapshot!("json_vector", vec![1, 2, 3]); } #[cfg(feature = "json")] #[test] fn test_unnamed_json_vector() { assert_json_snapshot!(vec![1, 2, 3]); assert_json_snapshot!(vec![1, 2, 3, 4]); assert_json_snapshot!(vec![1, 2, 3, 4, 5]); } mod nested { #[test] fn test_nested_module() { insta::assert_snapshot!("aoeu"); } } #[test] fn test_trailing_commas() { assert_snapshot!("Testing",); assert_snapshot!("Testing", "name",); assert_snapshot!("Testing", "name", "expr",); #[cfg(feature = "yaml")] assert_yaml_snapshot!(vec![1, 2, 3, 4, 5],); } struct TestDisplay; impl fmt::Display for TestDisplay { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { write!(f, "TestDisplay struct") } } #[test] #[allow(deprecated)] fn test_display() { let td = TestDisplay; assert_display_snapshot!("display", td); } #[test] #[allow(deprecated)] fn test_unnamed_display() { let td = TestDisplay; assert_display_snapshot!(td); assert_display_snapshot!("whatever"); } #[cfg(feature = "json")] #[test] fn test_u128_json() { let x: u128 = u128::from(u64::MAX) * 2; assert_json_snapshot!(&x, @"36893488147419103230"); } #[cfg(feature = "yaml")] #[test] fn insta_sort_order() { use std::collections::HashMap; let mut m = HashMap::new(); m.insert((1, 3), 4); m.insert((2, 3), 4); m.insert((1, 4), 4); m.insert((3, 3), 4); m.insert((9, 3), 4); insta::with_settings!({sort_maps =>true}, { insta::assert_yaml_snapshot!(m); }); } #[test] fn test_crlf() { insta::assert_snapshot!("foo\r\nbar\r\nbaz"); } #[test] fn test_trailing_crlf() { insta::assert_snapshot!("foo\r\nbar\r\nbaz\r\n"); } #[test] fn test_trailing_crlf_inline() { insta::assert_snapshot!("foo\r\nbar\r\nbaz\r\n", @" foo bar baz "); } insta-1.46.1/tests/test_binary.rs000064400000000000000000000015501046102023000150670ustar 00000000000000#[test] fn test_binary_snapshot() { insta::assert_binary_snapshot!(".txt", b"test".to_vec()); } #[test] #[should_panic(expected = "'.new' is not allowed as a file extension")] fn test_new_extension() { insta::assert_binary_snapshot!(".new", b"test".to_vec()); } #[test] #[should_panic(expected = "\"test\" does not match the format \"name.extension\"")] fn test_malformed_name_and_extension() { insta::assert_binary_snapshot!("test", b"test".to_vec()); } #[test] #[should_panic(expected = "file extensions starting with 'new.' are not allowed")] fn test_extension_starting_with_new() { insta::assert_binary_snapshot!(".new.gz", b"test".to_vec()); } #[test] fn test_multipart_extension() { insta::assert_binary_snapshot!(".tar.gz", b"test".to_vec()); } #[test] fn test_named() { insta::assert_binary_snapshot!("name.json", b"null".to_vec()); } insta-1.46.1/tests/test_glob.rs000064400000000000000000000021621046102023000145260ustar 00000000000000#![cfg(feature = "glob")] mod glob_submodule; #[test] fn test_basic_globbing() { insta::glob!("inputs/*.txt", |path| { let contents = std::fs::read_to_string(path).unwrap(); insta::assert_json_snapshot!(&contents); }); } #[test] fn test_basic_globbing_nested() { insta::glob!("inputs-nested/*/*.txt", |path| { let contents = std::fs::read_to_string(path).unwrap(); insta::assert_snapshot!(&contents); }); } #[test] fn test_globs_follow_links() { insta::glob!("link-to-inputs/*.txt", |path| { let contents = std::fs::read_to_string(path).unwrap(); insta::assert_json_snapshot!(&contents); }); } #[test] #[should_panic(expected = "the glob! macro did not match any files.")] fn test_empty_glob_fails() { insta::glob!("nonexistent", |_| { // nothing }); } #[test] #[should_panic(expected = "Parent directory traversal is not supported in glob patterns")] fn test_parent_dir_glob_fails_with_helpful_message() { insta::glob!("../**/*.rs", |_| { // This should fail with a helpful error message about parent directory traversal }); } insta-1.46.1/tests/test_inline.rs000064400000000000000000000145241046102023000150660ustar 00000000000000#[cfg(feature = "csv")] use insta::assert_csv_snapshot; #[cfg(feature = "ron")] use insta::assert_ron_snapshot; #[cfg(feature = "toml")] use insta::assert_toml_snapshot; #[cfg(feature = "yaml")] use insta::assert_yaml_snapshot; #[cfg(feature = "json")] use insta::{assert_compact_json_snapshot, assert_json_snapshot}; use insta::{assert_compact_debug_snapshot, assert_debug_snapshot, assert_snapshot}; use std::thread; #[test] fn test_simple() { assert_debug_snapshot!(vec![1, 2, 3, 4], @" [ 1, 2, 3, 4, ] "); } #[test] fn test_trailing_commas() { assert_snapshot!( "Testing", @"Testing", ); } #[test] fn test_single_line() { assert_snapshot!("Testing", @"Testing"); } // We used to use the thread name for snapshot name detection. This is unreliable // so this test now basically does exactly the same as `test_unnamed_single_line`. #[test] fn test_unnamed_thread_single_line() { let builder = thread::Builder::new().name("foo::lol::something".into()); let handler = builder .spawn(|| { assert_snapshot!("Testing-thread"); assert_snapshot!("Testing-thread-2"); }) .unwrap(); handler.join().unwrap(); } #[test] fn test_newline() { // https://github.com/mitsuhiko/insta/issues/39 assert_snapshot!("\n", @""); } #[test] fn test_inline_debug_expr() { assert_snapshot!("hello", "a debug expr", @"hello"); } #[cfg(feature = "csv")] #[test] fn test_csv_inline() { #[derive(serde::Serialize)] pub struct Email(String); #[derive(serde::Serialize)] pub struct User { id: u32, username: String, email: Email, } assert_csv_snapshot!(User { id: 1453, username: "mehmed-doe".into(), email: Email("mehmed@doe.invalid".into()), }, @r###" id,username,email 1453,mehmed-doe,mehmed@doe.invalid "###); } #[cfg(feature = "csv")] #[test] fn test_csv_inline_multiple_values() { #[derive(serde::Serialize)] pub struct Email(String); #[derive(serde::Serialize)] pub struct User { id: u32, username: String, email: Email, } let user1 = User { id: 1453, username: "mehmed-doe".into(), email: Email("mehmed@doe.invalid".into()), }; let user2 = User { id: 1455, username: "mehmed-doe-di".into(), email: Email("mehmed@doe-di.invalid".into()), }; assert_csv_snapshot!(vec![user1, user2], @r###" id,username,email 1453,mehmed-doe,mehmed@doe.invalid 1455,mehmed-doe-di,mehmed@doe-di.invalid "###); } #[cfg(feature = "ron")] #[test] fn test_ron_inline() { #[derive(serde::Serialize)] pub struct Email(String); #[derive(serde::Serialize)] pub struct User { id: u32, username: String, email: Email, } assert_ron_snapshot!(User { id: 42, username: "peter-doe".into(), email: Email("peter@doe.invalid".into()), }, @r###" User( id: 42, username: "peter-doe", email: Email("peter@doe.invalid"), ) "###); } #[cfg(feature = "toml")] #[test] fn test_toml_inline() { #[derive(serde::Serialize)] pub struct Email(String); #[derive(serde::Serialize)] pub struct User { id: u32, username: String, email: Email, } assert_toml_snapshot!(User { id: 42, username: "peter-doe".into(), email: Email("peter@doe.invalid".into()), }, @r" id = 42 username = 'peter-doe' email = 'peter@doe.invalid' "); } #[cfg(feature = "json")] #[test] fn test_json_inline() { assert_json_snapshot!(vec!["foo", "bar"], @r#" [ "foo", "bar" ] "#); } #[cfg(feature = "yaml")] #[test] fn test_yaml_inline() { #[derive(serde::Serialize)] pub struct User { id: u32, username: String, email: String, } assert_yaml_snapshot!(User { id: 42, username: "peter-pan".into(), email: "peterpan@wonderland.invalid".into() }, @" id: 42 username: peter-pan email: peterpan@wonderland.invalid "); } #[cfg(all(feature = "redactions", feature = "yaml"))] #[test] fn test_yaml_inline_redacted() { #[derive(serde::Serialize)] pub struct User { id: u32, username: String, email: String, } assert_yaml_snapshot!(User { id: 42, username: "peter-pan".into(), email: "peterpan@wonderland.invalid".into() }, { ".id" => "[user-id]" }, @r#" id: "[user-id]" username: peter-pan email: peterpan@wonderland.invalid "#); } #[test] fn test_non_basic_plane() { assert_snapshot!("a 😀oeu", @"a 😀oeu"); } #[test] fn test_multiline_with_empty_lines() { assert_snapshot!("# first\nsecond\n third\n\n# alternative", @" # first second third # alternative "); } #[cfg(feature = "json")] #[test] fn test_compact_json() { assert_compact_json_snapshot!((1..30).collect::>(), @"[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29]"); assert_compact_json_snapshot!((1..34).collect::>(), @" [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33 ] "); } #[test] fn test_compact_debug() { assert_compact_debug_snapshot!((1..30).collect::>(), @"[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29]"); assert_compact_debug_snapshot!((1..34).collect::>(), @"[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33]"); } #[test] #[should_panic = "Insta does not allow inline snapshot assertions in loops"] fn test_inline_test_in_loop() { for i in 0..10 { assert_snapshot!(i.to_string(), @"0"); } } #[test] fn test_inline_snapshot_whitespace() { assert_snapshot!("\n\nfoo\n\n bar\n\n", @" foo bar "); } #[test] fn test_indentation() { assert_snapshot!("aaa\nbbb\nccc\nddd", @" aaa bbb ccc ddd " ); } insta-1.46.1/tests/test_redaction.rs000064400000000000000000000372331046102023000155620ustar 00000000000000#![cfg(feature = "redactions")] use insta::_macro_support::Selector; #[cfg(feature = "csv")] use insta::assert_csv_snapshot; #[cfg(feature = "json")] use insta::assert_json_snapshot; #[cfg(feature = "ron")] use insta::assert_ron_snapshot; #[cfg(feature = "toml")] use insta::assert_toml_snapshot; #[cfg(feature = "yaml")] use insta::assert_yaml_snapshot; use insta::assert_debug_snapshot; use serde::Serialize; #[test] fn test_selector_parser() { macro_rules! assert_selector_snapshot { ($short:expr, $sel:expr) => { assert_debug_snapshot!($short, Selector::parse($sel).unwrap()); }; } assert_selector_snapshot!("foo_bar", ".foo.bar"); assert_selector_snapshot!("foo_bar_alt", ".foo[\"bar\"]"); assert_selector_snapshot!("foo_bar_full_range", ".foo.bar[]"); assert_selector_snapshot!("foo_bar_range_to", ".foo.bar[:10]"); assert_selector_snapshot!("foo_bar_range_from", ".foo.bar[10:]"); assert_selector_snapshot!("foo_bar_range", ".foo.bar[10:20]"); assert_selector_snapshot!("foo_bar_deep", ".foo.bar.**"); } #[derive(Serialize)] pub struct Email(String); #[derive(Serialize)] pub struct User { id: u32, username: String, email: Email, extra: String, } #[cfg(feature = "yaml")] #[test] fn test_with_random_value() { assert_yaml_snapshot!(&User { id: 42, username: "john_doe".to_string(), email: Email("john@example.com".to_string()), extra: "".to_string(), }, { ".id" => "[id]" }); } #[cfg(feature = "yaml")] #[test] fn test_with_random_value_inline_callback() { assert_yaml_snapshot!(&User { id: 23, username: "john_doe".to_string(), email: Email("john@example.com".to_string()), extra: "".to_string(), }, { ".id" => insta::dynamic_redaction(|value, path| { similar_asserts::assert_eq!(path.to_string(), ".id"); similar_asserts::assert_eq!(value.as_u64().unwrap(), 23); "[id]" }), }); } #[cfg(feature = "yaml")] #[test] fn test_with_random_value_and_trailing_comma() { assert_yaml_snapshot!(&User { id: 11, username: "john_doe".to_string(), email: Email("john@example.com".to_string()), extra: "".to_string(), }, { ".id" => "[id]", }); } #[cfg(feature = "yaml")] #[test] fn test_with_random_value_and_match_comma() { assert_yaml_snapshot!( &User { id: 11, username: "john_doe".to_string(), email: Email("john@example.com".to_string()), extra: "".to_string(), }, match .. { ".id" => "[id]", } ); assert_yaml_snapshot!( &User { id: 11, username: "john_doe".to_string(), email: Email("john@example.com".to_string()), extra: "".to_string(), }, match .. { ".id" => "[id]", }, // comma here ); assert_yaml_snapshot!( &User { id: 11, username: "john_doe".to_string(), email: Email("john@example.com".to_string()), extra: "".to_string(), }, match .. { ".id" => "[id]", }, @r#" id: "[id]" username: john_doe email: john@example.com extra: "" "#, // comma here ); } #[cfg(feature = "csv")] #[test] fn test_with_random_value_csv() { assert_csv_snapshot!("user_csv", &User { id: 44, username: "julius_csv".to_string(), email: Email("julius@example.com".to_string()), extra: "".to_string(), }, { ".id" => "[id]" }); } #[cfg(feature = "csv")] #[test] fn test_with_random_value_csv_match() { assert_csv_snapshot!( &User { id: 44, username: "julius_csv".to_string(), email: Email("julius@example.com".to_string()), extra: "".to_string(), }, match .. { ".id" => "[id]", } ); } #[cfg(feature = "ron")] #[test] fn test_with_random_value_ron() { assert_ron_snapshot!("user_ron", &User { id: 53, username: "john_ron".to_string(), email: Email("john@example.com".to_string()), extra: "".to_string(), }, { ".id" => "[id]" }); } #[cfg(feature = "ron")] #[test] fn test_with_random_value_ron_match() { assert_ron_snapshot!( &User { id: 53, username: "john_ron".to_string(), email: Email("john@example.com".to_string()), extra: "".to_string(), }, match .. { ".id" => "[id]", } ); } #[cfg(feature = "toml")] #[test] fn test_with_random_value_toml() { assert_toml_snapshot!("user_toml", &User { id: 53, username: "john_ron".to_string(), email: Email("john@example.com".to_string()), extra: "".to_string(), }, { ".id" => "[id]" }); } #[cfg(feature = "toml")] #[test] fn test_with_random_value_toml_match() { assert_toml_snapshot!( &User { id: 53, username: "john_ron".to_string(), email: Email("john@example.com".to_string()), extra: "".to_string(), }, match .. { ".id" => "[id]", } ); } #[cfg(feature = "json")] #[test] fn test_with_random_value_json() { assert_json_snapshot!("user_json", &User { id: 9999, username: "jason_doe".to_string(), email: Email("jason@example.com".to_string()), extra: "ssn goes here".to_string(), }, { ".id" => "[id]", ".extra" => "[extra]" }); } #[cfg(feature = "json")] #[test] fn test_with_random_value_json_match() { assert_json_snapshot!( &User { id: 9999, username: "jason_doe".to_string(), email: Email("jason@example.com".to_string()), extra: "ssn goes here".to_string(), }, match .. { ".id" => "[id]", ".extra" => "[extra]", } ); } #[cfg(feature = "json")] #[test] fn test_with_random_value_json_settings() { let mut settings = insta::Settings::new(); settings.add_redaction(".id", "[id]"); settings.add_redaction(".extra", "[extra]"); settings.bind(|| { assert_json_snapshot!( "user_json_settings", &User { id: 122, username: "jason_doe".to_string(), email: Email("jason@example.com".to_string()), extra: "ssn goes here".to_string(), } ); }); } #[cfg(feature = "json")] #[test] fn test_with_callbacks() { let mut settings = insta::Settings::new(); settings.add_dynamic_redaction(".id", |value, path| { similar_asserts::assert_eq!(path.to_string(), ".id"); similar_asserts::assert_eq!(value.as_u64().unwrap(), 1234); "[id]" }); settings.bind(|| { assert_json_snapshot!( "user_json_settings_callback", &User { id: 1234, username: "jason_doe".to_string(), email: Email("jason@example.com".to_string()), extra: "extra here".to_string(), } ); }); } #[cfg(feature = "json")] #[test] fn test_with_random_value_json_settings2() { insta::with_settings!({redactions => vec![ (".id", "[id]".into()), (".extra", "[extra]".into()), ]}, { assert_json_snapshot!( &User { id: 975, username: "jason_doe".to_string(), email: Email("jason@example.com".to_string()), extra: "ssn goes here".to_string(), } ); }); } #[cfg(feature = "json")] #[test] fn test_redact_newtype_struct() { #[derive(Serialize)] pub struct UserWrapper(User); let wrapper = UserWrapper(User { id: 42, username: "john_doe".to_string(), email: Email("john@example.com".to_string()), extra: "".to_string(), }); assert_json_snapshot!(wrapper, { r#".id"# => "[id]" }, @r#" { "id": "[id]", "username": "john_doe", "email": "john@example.com", "extra": "" } "#); } #[cfg(feature = "yaml")] #[test] fn test_redact_newtype_enum() { #[derive(Serialize)] pub enum Role { Admin(User), Visitor { id: String, name: String }, } let visitor = Role::Visitor { id: "my-id".into(), name: "my-name".into(), }; assert_yaml_snapshot!(visitor, { r#".id"# => "[id]", }, @r#" Visitor: id: "[id]" name: my-name "#); let admin = Role::Admin(User { id: 42, username: "john_doe".to_string(), email: Email("john@example.com".to_string()), extra: "".to_string(), }); assert_yaml_snapshot!(admin, { r#".id"# => "[id]", }, @r#" Admin: id: "[id]" username: john_doe email: john@example.com extra: "" "#); } #[cfg(feature = "json")] #[test] fn test_redact_recursive() { #[derive(Serialize)] pub struct Node { id: u64, next: Option>, } let root = Node { id: 0, next: Some(Box::new(Node { id: 1, next: None })), }; assert_json_snapshot!(root, { ".**.id" => "[id]", }, @r#" { "id": "[id]", "next": { "id": "[id]", "next": null } } "#); } #[cfg(feature = "yaml")] #[test] fn test_struct_array_redaction() { #[derive(Serialize)] pub struct Product { _id: String, product_name: String, } #[derive(Serialize)] pub struct Checkout { _id: String, products: Vec, } let checkout = Checkout { _id: "checkout/1".to_string(), products: vec![ Product { _id: "product/1".to_string(), product_name: "a car".to_string(), }, Product { _id: "product/2".to_string(), product_name: "a boat".to_string(), }, ], }; assert_yaml_snapshot!(vec![checkout], { "[]._id" => "[checkout_id]", "[].products[]._id" => "[product_id]", "[].products[].product_name" => "[product_name]", }); } #[cfg(feature = "yaml")] #[test] fn test_map_key_redaction() { #[derive(Serialize, Hash, PartialEq, PartialOrd, Eq, Ord)] struct Key { bucket: u32, value: u32, } #[derive(Serialize)] struct Foo { hm: std::collections::HashMap, btm: std::collections::BTreeMap<(u32, u32), u32>, } let mut hm = std::collections::HashMap::new(); hm.insert( Key { bucket: 1, value: 0, }, 42, ); let mut btm = std::collections::BTreeMap::new(); btm.insert((0, 0), 23); let foo_value = Foo { hm, btm }; assert_yaml_snapshot!(foo_value, { ".hm.$key.bucket" => "[bucket]", ".btm.$key" => "[key]", }); } #[cfg(feature = "json")] #[test] fn test_ordering() { #[derive(Debug, Serialize)] pub struct User { id: u64, username: String, flags: std::collections::HashSet, } let mut settings = insta::Settings::new(); settings.add_redaction(".id", "[id]"); settings.sort_selector(".flags"); settings.bind(|| { assert_json_snapshot!( "user_json_flags", &User { id: 122, username: "jason_doe".to_string(), flags: vec!["zzz".into(), "foo".into(), "aha".into(), "is_admin".into()] .into_iter() .collect(), } ); }); } #[cfg(feature = "json")] #[test] fn test_ordering_newtype_set() { #[derive(Debug, Serialize)] pub struct MySet(std::collections::HashSet); #[derive(Debug, Serialize)] pub struct User { id: u64, username: String, flags: MySet, } assert_json_snapshot!( "user_json_flags_alt", &User { id: 122, username: "jason_doe".to_string(), flags: MySet(vec!["zzz".into(), "foo".into(), "aha".into(), "is_admin".into()] .into_iter() .collect()), }, { "." => insta::sorted_redaction(), ".flags" => insta::sorted_redaction() } ); } #[cfg(feature = "json")] #[test] fn test_rounded_redaction() { #[derive(Debug, Serialize)] pub struct MyPoint { x: f64, y: f64, } assert_json_snapshot!( "rounded_redaction", &MyPoint { x: 1.0 / 3.0, y: 6.0 / 3.0, }, { ".x" => insta::rounded_redaction(4), ".y" => insta::rounded_redaction(4), } ); } #[cfg(feature = "yaml")] #[test] fn test_named_redacted_with_debug_expr() { // This test demonstrates the form with a name, redactions, and debug expression // | File, redacted, named, debug expr | `assert_yaml_snapshot!("name", expr, {"." => sorted_redaction()}, "debug_expr")` | #[derive(Serialize, Debug)] pub struct ComplexObject { id: u32, items: Vec, metadata: std::collections::HashMap, } let mut metadata = std::collections::HashMap::new(); metadata.insert("count".to_string(), 42); metadata.insert("version".to_string(), 123); let complex_obj = ComplexObject { id: 12345, items: vec!["one".to_string(), "two".to_string(), "three".to_string()], metadata, }; // Now that we've added support for this form, we can test it directly assert_yaml_snapshot!( "named_redacted_debug_expr", &complex_obj, { ".id" => "[id]", ".metadata" => insta::sorted_redaction() }, "This is a custom debug expression for the snapshot" ); } #[cfg(feature = "yaml")] #[test] fn test_named_redacted_supported_form() { #[derive(Serialize, Debug)] pub struct ComplexObject { id: u32, items: Vec, metadata: std::collections::HashMap, } let mut metadata = std::collections::HashMap::new(); metadata.insert("count".to_string(), 42); metadata.insert("version".to_string(), 123); let obj = ComplexObject { id: 12345, items: vec!["one".to_string(), "two".to_string(), "three".to_string()], metadata, }; assert_yaml_snapshot!( "named_redacted_supported", &obj, { ".id" => "[id]", ".metadata" => insta::sorted_redaction() } ); } #[cfg(all(feature = "yaml", feature = "redactions"))] #[test] fn test_metadata_redaction() { #[derive(Serialize)] struct Info { secret: String, public: String, } let mut settings = insta::Settings::new(); settings.add_redaction(".secret", "[REDACTED]"); settings.set_info(&Info { secret: "sensitive_value".into(), public: "visible".into(), }); settings.bind(|| { assert_yaml_snapshot!("metadata_redaction_test", &vec![1, 2, 3]); }); } #[cfg(all(feature = "yaml", feature = "redactions"))] #[test] fn test_metadata_raw_info_no_redaction() { use insta::internals::Content; let mut settings = insta::Settings::new(); settings.add_redaction(".secret", "[REDACTED]"); // Create content that would be redacted if redactions were applied let content = Content::Map(vec![ (Content::from("secret"), Content::from("sensitive_value")), (Content::from("public"), Content::from("visible")), ]); settings.set_raw_info(&content); settings.bind(|| { assert_yaml_snapshot!("metadata_raw_info_no_redaction", &vec![1, 2, 3]); }); } insta-1.46.1/tests/test_settings.rs000064400000000000000000000071361046102023000154510ustar 00000000000000#[cfg(feature = "yaml")] use insta::assert_yaml_snapshot; use similar_asserts::assert_eq; use insta::{assert_debug_snapshot, with_settings, Settings}; #[cfg(feature = "yaml")] #[test] fn test_simple() { let mut map = std::collections::HashMap::new(); map.insert("a", "first value"); map.insert("b", "second value"); map.insert("c", "third value"); map.insert("d", "fourth value"); let mut settings = insta::Settings::new(); settings.set_sort_maps(true); settings.bind(|| { assert_yaml_snapshot!(&map, @" a: first value b: second value c: third value d: fourth value "); }); } #[cfg(feature = "yaml")] #[test] fn test_bound_to_scope() { let mut map = std::collections::HashMap::new(); map.insert("a", "first value"); map.insert("b", "second value"); map.insert("c", "third value"); map.insert("d", "fourth value"); { let mut settings = Settings::new(); settings.set_sort_maps(true); let _guard = settings.bind_to_scope(); assert_yaml_snapshot!(&map, @" a: first value b: second value c: third value d: fourth value "); } assert!(!Settings::clone_current().sort_maps()); } #[cfg(feature = "yaml")] #[test] fn test_settings_macro() { let mut map = std::collections::HashMap::new(); map.insert("a", "first value"); map.insert("b", "second value"); map.insert("c", "third value"); map.insert("d", "fourth value"); with_settings!({sort_maps => true}, { insta::assert_yaml_snapshot!(&map, @" a: first value b: second value c: third value d: fourth value "); }); } #[test] fn test_snapshot_path() { with_settings!({snapshot_path => "snapshots2"}, { assert_debug_snapshot!(vec![1, 2, 3]); }); } #[test] fn test_snapshot_no_module_prepending() { with_settings!({prepend_module_to_snapshot => false}, { assert_debug_snapshot!(vec![1, 2, 3]); }); } #[test] fn test_snapshot_with_description() { with_settings!({description => "The snapshot is three integers"}, { assert_debug_snapshot!(vec![1, 2, 3]) }); } #[test] fn test_snapshot_with_description_and_raw_info() { use insta::internals::Content; let raw_info = Content::Map(vec![ ( Content::from("env"), Content::Seq(vec![ Content::from("ENVIRONMENT"), Content::from("production"), ]), ), ( Content::from("cmdline"), Content::Seq(vec![Content::from("my-tool"), Content::from("run")]), ), ]); with_settings!({description => "The snapshot is four integers", raw_info => &raw_info}, { assert_debug_snapshot!(vec![1, 2, 3, 4]) }); } #[cfg(feature = "serde")] #[test] fn test_snapshot_with_description_and_info() { #[derive(serde::Serialize)] pub struct Info { env: std::collections::HashMap<&'static str, &'static str>, cmdline: Vec<&'static str>, } let info = Info { env: From::from([("ENVIRONMENT", "production")]), cmdline: vec!["my-tool", "run"], }; with_settings!({description => "The snapshot is four integers", info => &info}, { assert_debug_snapshot!(vec![1, 2, 3, 4]) }); } #[test] fn test_with_settings_inherit() { with_settings!({sort_maps => true}, { with_settings!({description => "aha"}, { let settings = Settings::clone_current(); assert!(settings.sort_maps()); assert_eq!(settings.description(), Some("aha")); }); }); } insta-1.46.1/tests/test_toml.rs000064400000000000000000000433011046102023000145560ustar 00000000000000//! Tests for TOML serialization in insta. //! //! These tests verify: //! - Backward compatibility (single-quoted strings via Pretty) //! - Support for types that toml 0.5.x couldn't serialize (issue #439) //! - Proper handling of special characters, escapes, and edge cases #![cfg(feature = "toml")] use insta::assert_toml_snapshot; use serde::Serialize; use std::collections::BTreeMap; // ============================================================================= // BACKWARD COMPATIBILITY - Critical for existing snapshots // ============================================================================= // // The old `toml` 0.5.x crate used single-quoted (literal) strings by default. // The new `toml_edit` crate uses double-quoted (basic) strings by default. // // To maintain backward compatibility with existing snapshots, the Pretty // visitor converts strings back to single-quoted format where possible. // // This is CRITICAL because changing quote style would break every existing // TOML snapshot in downstream projects. /// Verifies that simple strings use single quotes (backward compat with toml 0.5.x) #[test] fn test_toml_backward_compat_single_quotes() { #[derive(Serialize)] struct Config { name: String, version: String, path: String, } // CRITICAL: These MUST be single-quoted to match toml 0.5.x output assert_toml_snapshot!(Config { name: "my-package".into(), version: "1.0.0".into(), path: "/usr/local/bin".into(), }, @r" name = 'my-package' version = '1.0.0' path = '/usr/local/bin' "); } /// Verifies fallback to double quotes only when single quotes are impossible #[test] fn test_toml_backward_compat_quote_fallback() { #[derive(Serialize)] struct Data { // Can use single quotes - no special chars simple: String, // Must use double quotes - contains single quote with_apostrophe: String, // Must use multi-line - contains newline with_newline: String, } assert_toml_snapshot!(Data { simple: "hello world".into(), with_apostrophe: "it's here".into(), with_newline: "line1\nline2".into(), }, @r#" simple = 'hello world' with_apostrophe = '''it's here''' with_newline = ''' line1 line2''' "#); } /// Regression test for issue #439 - types that toml 0.5.x couldn't serialize /// The old toml crate would panic with "UnsupportedType" for unit struct variants #[test] fn test_toml_issue_439_unit_struct_variant() { #[derive(Serialize)] #[allow(dead_code)] enum MyEnum { Variant1 {}, Variant2 {}, } #[derive(Serialize)] struct Config { value: MyEnum, } // This would PANIC with toml 0.5.x: "UnsupportedType" // Now it works with toml_edit assert_toml_snapshot!(Config { value: MyEnum::Variant1 {} }, @"[value.Variant1]"); } // ============================================================================= // Core Types // ============================================================================= #[test] fn test_toml_basic_types() { #[derive(Serialize)] struct Data { string: String, integer: i64, unsigned: u64, float: f64, boolean: bool, } assert_toml_snapshot!(Data { string: "hello".into(), integer: -42, unsigned: 9007199254740991, float: 1.5, boolean: true, }, @r" string = 'hello' integer = -42 unsigned = 9007199254740991 float = 1.5 boolean = true "); } #[test] fn test_toml_special_floats() { #[derive(Serialize)] struct Floats { pos_inf: f64, neg_inf: f64, nan_value: f64, } assert_toml_snapshot!(Floats { pos_inf: f64::INFINITY, neg_inf: f64::NEG_INFINITY, nan_value: f64::NAN, }, @r" pos_inf = inf neg_inf = -inf nan_value = nan "); } #[test] fn test_toml_integer_boundaries() { #[derive(Serialize)] struct Boundaries { min_i64: i64, max_i64: i64, } assert_toml_snapshot!(Boundaries { min_i64: i64::MIN, max_i64: i64::MAX, }, @r" min_i64 = -9223372036854775808 max_i64 = 9223372036854775807 "); } // ============================================================================= // String Handling - Pretty Backward Compatibility // ============================================================================= #[test] fn test_toml_string_quoting() { #[derive(Serialize)] struct Strings { simple: String, with_double_quotes: String, with_single_quotes: String, with_both_quotes: String, empty: String, } assert_toml_snapshot!(Strings { simple: "hello".into(), with_double_quotes: r#"He said "Hello""#.into(), with_single_quotes: "It's working".into(), with_both_quotes: r#"He said "It's done""#.into(), empty: "".into(), }, @r#" simple = 'hello' with_double_quotes = 'He said "Hello"' with_single_quotes = '''It's working''' with_both_quotes = '''He said "It's done"''' empty = '' "#); } #[test] fn test_toml_string_escapes() { #[derive(Serialize)] struct Data { with_newline: String, with_tab: String, with_backslash: String, with_null: String, } assert_toml_snapshot!(Data { with_newline: "line1\nline2".into(), with_tab: "col1\tcol2".into(), with_backslash: "path\\to\\file".into(), with_null: "hello\0world".into(), }, @r#" with_newline = ''' line1 line2''' with_tab = 'col1 col2' with_backslash = 'path\to\file' with_null = "hello\u0000world" "#); } #[test] fn test_toml_control_characters() { #[derive(Serialize)] struct Data { carriage_return: String, form_feed: String, bell: String, } assert_toml_snapshot!(Data { carriage_return: "line1\rline2".into(), form_feed: "page1\x0cpage2".into(), bell: "alert\x07here".into(), }, @r#" carriage_return = "line1\rline2" form_feed = "page1\fpage2" bell = "alert\u0007here" "#); } // ============================================================================= // Structures and Nesting // ============================================================================= #[test] fn test_toml_nested_struct() { #[derive(Serialize)] struct Inner { value: i32, } #[derive(Serialize)] struct Outer { name: String, inner: Inner, } assert_toml_snapshot!(Outer { name: "test".into(), inner: Inner { value: 42 }, }, @r" name = 'test' [inner] value = 42 "); } #[test] fn test_toml_empty_struct() { #[derive(Serialize)] struct Empty {} #[derive(Serialize)] struct Container { empty: Empty, } assert_toml_snapshot!(Container { empty: Empty {} }, @"[empty]"); } /// Unit structs are NOT supported by TOML - this documents the limitation #[test] #[should_panic(expected = "UnsupportedType")] fn test_toml_unit_struct_unsupported() { #[derive(Serialize)] struct Marker; #[derive(Serialize)] struct Data { marker: Marker, } insta::_macro_support::serialize_value( &Data { marker: Marker }, insta::_macro_support::SerializationFormat::Toml, ); } // ============================================================================= // Arrays // ============================================================================= #[test] fn test_toml_arrays() { #[derive(Serialize)] struct Item { id: u32, name: String, } #[derive(Serialize)] struct Data { empty: Vec, numbers: Vec, strings: Vec, structs: Vec, nested: Vec>, } assert_toml_snapshot!(Data { empty: vec![], numbers: vec![1, 2, 3], strings: vec!["a".into(), "b".into()], structs: vec![ Item { id: 1, name: "first".into() }, Item { id: 2, name: "second".into() }, ], nested: vec![vec![1, 2], vec![3, 4]], }, @r" empty = [] numbers = [ 1, 2, 3, ] strings = [ 'a', 'b', ] nested = [ [ 1, 2, ], [ 3, 4, ], ] [[structs]] id = 1 name = 'first' [[structs]] id = 2 name = 'second' "); } #[test] fn test_toml_special_floats_in_array() { #[derive(Serialize)] struct Data { floats: Vec, } assert_toml_snapshot!(Data { floats: vec![1.5, f64::NAN, f64::INFINITY, f64::NEG_INFINITY], }, @r" floats = [ 1.5, nan, inf, -inf, ] "); } // ============================================================================= // Maps // ============================================================================= #[test] fn test_toml_maps() { let mut simple = BTreeMap::new(); simple.insert("alpha", 1); simple.insert("beta", 2); let mut nested_inner = BTreeMap::new(); nested_inner.insert("x".to_string(), 10); let mut nested = BTreeMap::new(); nested.insert("coords".to_string(), nested_inner); #[derive(Serialize)] struct Data { simple: BTreeMap<&'static str, i32>, nested: BTreeMap>, } assert_toml_snapshot!(Data { simple, nested }, @r" [simple] alpha = 1 beta = 2 [nested.coords] x = 10 "); } #[test] fn test_toml_integer_keys() { let mut map = BTreeMap::new(); map.insert(1, "first"); map.insert(10, "tenth"); #[derive(Serialize)] struct Data { items: BTreeMap, } assert_toml_snapshot!(Data { items: map }, @r" [items] 1 = 'first' 10 = 'tenth' "); } // ============================================================================= // Key Edge Cases // ============================================================================= #[test] fn test_toml_special_keys() { let mut map = BTreeMap::new(); map.insert("", "empty key"); map.insert("some.dotted.key", "dotted"); map.insert("it's", "single quote"); map.insert("key with spaces", "spaces"); map.insert("key=value", "equals"); map.insert("[section]", "brackets"); #[derive(Serialize)] struct Data { items: BTreeMap<&'static str, &'static str>, } assert_toml_snapshot!(Data { items: map }, @r#" [items] "" = 'empty key' "[section]" = 'brackets' "it's" = 'single quote' "key with spaces" = 'spaces' "key=value" = 'equals' "some.dotted.key" = 'dotted' "#); } #[test] fn test_toml_unicode_keys() { let mut map = BTreeMap::new(); map.insert("键", "Chinese"); map.insert("キー", "Japanese"); map.insert("ключ", "Russian"); #[derive(Serialize)] struct Data { items: BTreeMap<&'static str, &'static str>, } assert_toml_snapshot!(Data { items: map }, @r#" [items] "ключ" = 'Russian' "キー" = 'Japanese' "键" = 'Chinese' "#); } #[test] fn test_toml_keyword_keys() { let mut map = BTreeMap::new(); map.insert("true", "bool keyword"); map.insert("false", "bool keyword"); map.insert("inf", "float keyword"); map.insert("nan", "float keyword"); #[derive(Serialize)] struct Data { items: BTreeMap<&'static str, &'static str>, } let result = insta::_macro_support::serialize_value( &Data { items: map }, insta::_macro_support::SerializationFormat::Toml, ); assert!(result.contains("bool keyword")); } // ============================================================================= // Serde Attributes // ============================================================================= #[test] fn test_toml_serde_skip() { #[derive(Serialize)] #[allow(dead_code)] struct Data { included: String, #[serde(skip)] excluded: String, } assert_toml_snapshot!(Data { included: "visible".into(), excluded: "hidden".into(), }, @"included = 'visible'"); } #[test] fn test_toml_serde_rename() { #[derive(Serialize)] struct Data { #[serde(rename = "newName")] old_name: String, } assert_toml_snapshot!(Data { old_name: "value".into(), }, @"newName = 'value'"); } #[test] fn test_toml_serde_flatten() { #[derive(Serialize)] struct Base { name: String, age: u32, } #[derive(Serialize)] struct Extended { id: i32, #[serde(flatten)] base: Base, } assert_toml_snapshot!(Extended { id: 1, base: Base { name: "Alice".into(), age: 30, }, }, @r" id = 1 name = 'Alice' age = 30 "); } #[test] fn test_toml_option_skip_serializing_if() { #[derive(Serialize)] struct Data { present: Option, #[serde(skip_serializing_if = "Option::is_none")] missing: Option, } assert_toml_snapshot!(Data { present: Some("value".into()), missing: None, }, @"present = 'value'"); } // ============================================================================= // Enums // ============================================================================= #[test] fn test_toml_enum_externally_tagged() { #[derive(Serialize)] enum Value { Text(String), Number(i32), } #[derive(Serialize)] struct Data { values: Vec, } assert_toml_snapshot!(Data { values: vec![Value::Text("hello".into()), Value::Number(42)], }, @r" [[values]] Text = 'hello' [[values]] Number = 42 "); } #[test] fn test_toml_enum_internally_tagged() { #[derive(Serialize)] #[serde(tag = "type")] #[allow(dead_code)] enum Event { Login { user: String }, Logout { user: String }, } #[derive(Serialize)] struct Data { event: Event, } assert_toml_snapshot!(Data { event: Event::Login { user: "alice".into() }, }, @r" [event] type = 'Login' user = 'alice' "); } #[test] fn test_toml_enum_adjacently_tagged() { #[derive(Serialize)] #[serde(tag = "t", content = "c")] #[allow(dead_code)] enum Message { Text(String), Number(i32), } #[derive(Serialize)] struct Data { msg: Message, } assert_toml_snapshot!(Data { msg: Message::Text("hello".into()), }, @r" [msg] t = 'Text' c = 'hello' "); } #[test] fn test_toml_enum_untagged() { #[derive(Serialize)] #[serde(untagged)] #[allow(dead_code)] enum Mixed { Int(i32), Str(String), } #[derive(Serialize)] struct Data { value: Mixed, } assert_toml_snapshot!(Data { value: Mixed::Str("hello".into()), }, @"value = 'hello'"); } // ============================================================================= // Special Types // ============================================================================= #[test] fn test_toml_char() { #[derive(Serialize)] struct Data { letter: char, emoji: char, } assert_toml_snapshot!(Data { letter: 'A', emoji: '🎉', }, @r" letter = 'A' emoji = '🎉' "); } #[test] fn test_toml_newtype_wrapper() { #[derive(Serialize)] struct UserId(u64); #[derive(Serialize)] struct Username(String); #[derive(Serialize)] struct User { id: UserId, name: Username, } assert_toml_snapshot!(User { id: UserId(12345), name: Username("alice".into()), }, @r" id = 12345 name = 'alice' "); } #[test] fn test_toml_type_distinction() { #[derive(Serialize)] struct Data { actual_bool: bool, bool_string: String, actual_number: i32, number_string: String, } assert_toml_snapshot!(Data { actual_bool: true, bool_string: "true".into(), actual_number: 123, number_string: "123".into(), }, @r" actual_bool = true bool_string = 'true' actual_number = 123 number_string = '123' "); } // ============================================================================= // Stress Tests // ============================================================================= #[test] fn test_toml_deep_nesting() { #[derive(Serialize)] struct L5 { v: i32, } #[derive(Serialize)] struct L4 { x: L5, } #[derive(Serialize)] struct L3 { x: L4, } #[derive(Serialize)] struct L2 { x: L3, } #[derive(Serialize)] struct L1 { x: L2, } let data = L1 { x: L2 { x: L3 { x: L4 { x: L5 { v: 42 } }, }, }, }; let result = insta::_macro_support::serialize_value( &data, insta::_macro_support::SerializationFormat::Toml, ); assert!(result.contains("v = 42")); } #[test] fn test_toml_large_array() { #[derive(Serialize)] struct Data { numbers: Vec, } let result = insta::_macro_support::serialize_value( &Data { numbers: (0..1000).collect(), }, insta::_macro_support::SerializationFormat::Toml, ); assert!(result.contains("999")); } #[test] fn test_toml_long_string() { #[derive(Serialize)] struct Data { content: String, } let long = "x".repeat(10_000); let result = insta::_macro_support::serialize_value( &Data { content: long.clone(), }, insta::_macro_support::SerializationFormat::Toml, ); assert!(result.len() > 10_000); }