lsp-textdocument-0.4.2/.cargo_vcs_info.json0000644000000001360000000000100143560ustar { "git": { "sha1": "4df4aa125ae2423e72a162999494054e9f42dc7a" }, "path_in_vcs": "" }lsp-textdocument-0.4.2/.github/workflows/ci.yml000064400000000000000000000010351046102023000176600ustar 00000000000000name: Cargo Build & Test on: pull_request: paths-ignore: - ".gitignore" - "README.md" - "LICENSE" - ".github/workflows/*" jobs: test: name: Test on ${{ matrix.os }} strategy: matrix: os: [ubuntu-latest, windows-latest, macos-latest] toolchain: - nightly-2024-10-17 runs-on: ${{ matrix.os }} steps: - uses: actions/checkout@v3 - run: rustup update ${{ matrix.toolchain }} && rustup default ${{ matrix.toolchain }} - run: cargo test --verbose lsp-textdocument-0.4.2/.github/workflows/publish.yml000064400000000000000000000006451046102023000207410ustar 00000000000000name: Publish on: push: branches: ["main"] env: CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} jobs: publish: if: "startsWith(github.event.head_commit.message, 'chore(release): publish')" runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - run: rustup update nightly-2024-10-17 && rustup default nightly-2024-10-17 - name: publish run: cargo publish lsp-textdocument-0.4.2/.gitignore000064400000000000000000000000221046102023000151300ustar 00000000000000/target .DS_Storelsp-textdocument-0.4.2/Cargo.lock0000644000000102720000000000100123330ustar # This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 4 [[package]] name = "anyhow" version = "1.0.72" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b13c32d80ecc7ab747b80c3784bce54ee8a7a0cc4fbda9bf4cda2cf6fe90854" [[package]] name = "bitflags" version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "crossbeam-channel" version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" dependencies = [ "crossbeam-utils", ] [[package]] name = "crossbeam-utils" version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" [[package]] name = "fluent-uri" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17c704e9dbe1ddd863da1e6ff3567795087b1eb201ce80d8fa81162e1516500d" dependencies = [ "bitflags", ] [[package]] name = "itoa" version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" [[package]] name = "log" version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" [[package]] name = "lsp-server" version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "248f65b78f6db5d8e1b1604b4098a28b43d21a8eb1deeca22b1c421b276c7095" dependencies = [ "crossbeam-channel", "log", "serde", "serde_json", ] [[package]] name = "lsp-textdocument" version = "0.4.2" dependencies = [ "anyhow", "lsp-server", "lsp-types", "serde", "serde_json", ] [[package]] name = "lsp-types" version = "0.97.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53353550a17c04ac46c585feb189c2db82154fc84b79c7a66c96c2c644f66071" dependencies = [ "bitflags", "fluent-uri", "serde", "serde_json", "serde_repr", ] [[package]] name = "proc-macro2" version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" dependencies = [ "unicode-ident", ] [[package]] name = "quote" version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" dependencies = [ "proc-macro2", ] [[package]] name = "ryu" version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" [[package]] name = "serde" version = "1.0.204" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc76f558e0cbb2a839d37354c575f1dc3fdc6546b5be373ba43d95f231bf7c12" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" version = "1.0.204" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e0cd7e117be63d3c3678776753929474f3b04a43a080c744d6b0ae2a8c28e222" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "serde_json" version = "1.0.120" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4e0d21c9a8cae1235ad58a00c11cb40d4b1e5c784f1ef2c537876ed6ffd8b7c5" dependencies = [ "itoa", "ryu", "serde", ] [[package]] name = "serde_repr" version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "syn" version = "2.0.70" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2f0209b68b3613b093e0ec905354eccaedcfe83b8cb37cbdeae64026c3064c16" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "unicode-ident" version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c" lsp-textdocument-0.4.2/Cargo.toml0000644000000023660000000000100123630ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" name = "lsp-textdocument" version = "0.4.2" authors = ["qixuan"] build = false autolib = false autobins = false autoexamples = false autotests = false autobenches = false description = "A LSP text documents manager that map of text document." homepage = "https://github.com/GiveMe-A-Name/lsp-textdocument" readme = "README.md" license = "MIT" repository = "https://github.com/GiveMe-A-Name/lsp-textdocument" [lib] name = "lsp_textdocument" path = "src/lib.rs" [[example]] name = "with_lsp_server" path = "examples/with_lsp_server.rs" [dependencies.lsp-types] version = "0.97.0" [dependencies.serde_json] version = "1.0" [dev-dependencies.anyhow] version = "1" [dev-dependencies.lsp-server] version = "0.7.6" [dev-dependencies.serde] version = "1" features = ["derive"] lsp-textdocument-0.4.2/Cargo.toml.orig000064400000000000000000000007501046102023000160370ustar 00000000000000[package] name = "lsp-textdocument" version = "0.4.2" edition = "2021" authors = ["qixuan"] description = "A LSP text documents manager that map of text document." readme = "README.md" license = "MIT" homepage = "https://github.com/GiveMe-A-Name/lsp-textdocument" repository = "https://github.com/GiveMe-A-Name/lsp-textdocument" [dependencies] lsp-types = "0.97.0" serde_json = "1.0" [dev-dependencies] anyhow = "1" lsp-server = "0.7.6" serde = { version = "1", features = ["derive"] } lsp-textdocument-0.4.2/LICENSE000064400000000000000000000020411046102023000141500ustar 00000000000000Copyright (c) 2022 GiveMe-A-Name Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. lsp-textdocument-0.4.2/README.md000064400000000000000000000026001046102023000144230ustar 00000000000000# `lsp-textdocument` A LSP text documents manager that helps mapping of textual content. ## Introduction You may not be able to manage your text documents comfortably when developing an LSP service. There are two reasons why we develop hard. - Always given a URL variable only, so we need to read the contents of the file ourselves. - Need map offsets from string index to text dimensional coordinates. By listening to the notification from the LSP client, `lsp-textdocument` can help you automatically manage text documents. This crate is base on [vscode-languageserver-textdocument](https://github.com/microsoft/vscode-languageserver-node/tree/main/textDocument). ## Example usage ### Basic usage ```rust use lsp_textdocument::TextDocuments; fn main() { let text_documents = TextDocument::new(); ... let text = text_documents.get_document_content(&url, None); } ``` ### with [`lsp-server`](https://github.com/rust-analyzer/lsp-server) [`examples/with_lsp_server.rs`](/examples/with_lsp_server.rs) ### with [`tower-lsp`](https://github.com/ebkalderon/tower-lsp) **Contact us via [issues](https://github.com/GiveMe-A-Name/lsp-textdocument/issues) if you require this with `tower-lsp`** ## Attention - The text documents [position-encoding](https://microsoft.github.io/language-server-protocol/specifications/lsp/3.17/specification/#positionEncodingKind) only supports `UTF-16` lsp-textdocument-0.4.2/examples/with_lsp_server.rs000064400000000000000000000060231046102023000205520ustar 00000000000000use anyhow::Result; use lsp_server::{Connection, ExtractError, Message, Request, RequestId}; use lsp_textdocument::TextDocuments; use lsp_types::request::Formatting; use lsp_types::{HoverProviderCapability, OneOf, TextDocumentSyncCapability, TextDocumentSyncKind}; use lsp_types::{InitializeParams, ServerCapabilities}; fn main() -> Result<()> { // Note that we must have our logging only write out to stderr. eprintln!("starting generic LSP server"); // Create the transport. Includes the stdio (stdin and stdout) versions but this could // also be implemented to use sockets or HTTP. let (connection, io_threads) = Connection::stdio(); // Run the server and wait for the two threads to end (typically by trigger LSP Exit event). let mut documents = TextDocuments::new(); let server_capabilities = serde_json::to_value(ServerCapabilities { hover_provider: Some(HoverProviderCapability::Simple(true)), text_document_sync: Some(TextDocumentSyncCapability::Kind( TextDocumentSyncKind::INCREMENTAL, )), document_formatting_provider: Some(OneOf::Left(true)), ..Default::default() })?; let initialization_params = connection.initialize(server_capabilities)?; main_loop(connection, initialization_params, &mut documents)?; io_threads.join()?; // Shut down gracefully. eprintln!("shutting down server"); Ok(()) } fn main_loop( connection: Connection, params: serde_json::Value, documents: &mut TextDocuments, ) -> Result<()> { let _params: InitializeParams = serde_json::from_value(params).unwrap(); eprintln!("starting example main loop"); for msg in connection.receiver.iter() { // eprintln!("got msg: {:?}", msg); match msg { Message::Request(req) => { if connection.handle_shutdown(&req)? { return Ok(()); } match cast::(req) { std::result::Result::Ok((_id, params)) => { let uri = params.text_document.uri; let text = documents.get_document_content(&uri, None); // !you can get document that handle by user content by using documents eprintln!("the document text: {:?}", text); } Err(err) => { eprintln!("{:?}", err); } } // ... } Message::Response(resp) => { eprintln!("got response: {:?}", resp); } Message::Notification(not) => { if !documents.listen(not.method.as_str(), ¬.params) { // Add handlers for other types of notifications here. } } } } Ok(()) } fn cast(req: Request) -> Result<(RequestId, R::Params), ExtractError> where R: lsp_types::request::Request, R::Params: serde::de::DeserializeOwned, { req.extract(R::METHOD) } lsp-textdocument-0.4.2/rust-toolchain.toml000064400000000000000000000000771046102023000170220ustar 00000000000000[toolchain] profile = "default" channel = "nightly-2024-10-17" lsp-textdocument-0.4.2/src/lib.rs000064400000000000000000000005731046102023000150560ustar 00000000000000//! //! A LSP text documents manager that helps mapping of text document. //! //! The text documents [position-encoding](https://microsoft.github.io/language-server-protocol/specifications/lsp/3.17/specification/#positionEncodingKind) only supports `UTF-16` mod text_document; mod text_documents; pub use text_document::FullTextDocument; pub use text_documents::TextDocuments; lsp-textdocument-0.4.2/src/text_document.rs000064400000000000000000000631451046102023000171760ustar 00000000000000use lsp_types::{Position, Range, TextDocumentContentChangeEvent}; #[derive(Debug)] pub struct FullTextDocument { language_id: String, version: i32, content: String, /// The value at index `i` in `line_offsets` is the index into `content` /// that is the start of line `i`. As such, the first element of /// `line_offsets` is always 0. line_offsets: Vec, } fn computed_line_offsets(text: &str, is_at_line_start: bool, text_offset: Option) -> Vec { let text_offset = text_offset.unwrap_or(0); let mut line_offsets = if is_at_line_start { vec![text_offset] } else { vec![] }; let mut chars = text.char_indices().peekable(); while let Some((idx, char)) = chars.next() { let idx: u32 = idx .try_into() .expect("The length of the text involved in the calculation is too long"); if char == '\r' && chars.peek() == Some(&(idx as usize + 1, '\n')) { chars.next(); line_offsets.push(text_offset + idx + 2); } else if char == '\n' || char == '\r' { line_offsets.push(text_offset + idx + 1); } } line_offsets } /// given a string (in UTF-8) and a byte offset, returns the offset in UTF-16 code units /// /// for example, consider a string containing a single 4-byte emoji. 4-byte characters /// in UTF-8 are supplementary plane characters that require two UTF-16 code units /// (surrogate pairs). /// /// in this example: /// - offset 4 returns 2; /// - offsets 1, 2 or 3 return 0, because they are not on a character boundary and round down; /// - offset 5+ will return 2, the length of the string in UTF-16 fn line_offset_utf16(line: &str, offset: u32) -> u32 { let mut c = 0; for (idx, char) in line.char_indices() { if idx + char.len_utf8() > offset as usize || idx == offset as usize { break; } c += char.len_utf16() as u32; } c } impl FullTextDocument { pub fn new(language_id: String, version: i32, content: String) -> Self { let line_offsets = computed_line_offsets(&content, true, None); Self { language_id, version, content, line_offsets, } } pub fn update(&mut self, changes: &[TextDocumentContentChangeEvent], version: i32) { for change in changes { let TextDocumentContentChangeEvent { range, text, .. } = change; match range { Some(range) => { // update content let Range { start, end } = range; let (start, start_offset) = self.find_canonical_position(start); let (end, end_offset) = self.find_canonical_position(end); assert!( start_offset <= end_offset, "Start offset must be less than end offset. {}:{} (offset {}) is not <= {}:{} (offset {})", start.line, start.character, start_offset, end.line, end.character, end_offset ); self.content .replace_range((start_offset as usize)..(end_offset as usize), text); let (start_line, end_line) = (start.line, end.line); assert!(start_line <= end_line); let added_line_offsets = computed_line_offsets(text, false, Some(start_offset)); let num_added_line_offsets = added_line_offsets.len(); let splice_start = start_line as usize + 1; let splice_end = std::cmp::min(end_line, self.line_count() - 1) as usize; self.line_offsets .splice(splice_start..=splice_end, added_line_offsets); let diff = (text.len() as i32).saturating_sub_unsigned(end_offset - start_offset); if diff != 0 { for i in (splice_start + num_added_line_offsets)..(self.line_count() as usize) { self.line_offsets[i] = self.line_offsets[i].saturating_add_signed(diff); } } } None => { // Full Text // update line_offsets self.line_offsets = computed_line_offsets(text, true, None); // update content self.content = text.to_owned(); } } } self.version = version; } /// As demonstrated by test_multiple_position_same_offset(), in some cases, /// there are multiple ways to reference the same Position. We map to a /// "canonical Position" so we can avoid worrying about edge cases all over /// the place. fn find_canonical_position(&self, position: &Position) -> (Position, u32) { let offset = self.offset_at(*position); if offset == 0 { ( Position { line: 0, character: 0, }, 0, ) } else if self.content.as_bytes().get(offset as usize - 1) == Some(&b'\n') { if self.line_offsets[position.line as usize] == offset { (*position, offset) } else if self.line_offsets[position.line as usize + 1] == offset { ( Position { line: position.line + 1, character: 0, }, offset, ) } else { panic!( "Could not determine canonical value for {position:?} in {:?}", self.content ) } } else { (*position, offset) } } /// Document's language id pub fn language_id(&self) -> &str { &self.language_id } /// Document's version pub fn version(&self) -> i32 { self.version } /// Get document content /// /// # Examples /// /// Basic usage: /// ``` /// use lsp_textdocument::FullTextDocument; /// use lsp_types::{Range, Position}; /// /// let text_documents = FullTextDocument::new("plain_text".to_string(), 1, "hello rust!".to_string()); /// /// // get document all content /// let content = text_documents.get_content(None); /// assert_eq!(content, "hello rust!"); /// /// // get document specify content by range /// let (start, end) = (Position::new(0, 1), Position::new(0, 9)); /// let range = Range::new(start, end); /// let sub_content = text_documents.get_content(Some(range)); /// assert_eq!(sub_content, "ello rus"); /// ``` pub fn get_content(&self, range: Option) -> &str { match range { Some(Range { start, end }) => { let start = self.offset_at(start); let end = self.offset_at(end).min(self.content_len()); self.content.get(start as usize..end as usize).unwrap() } None => &self.content, } } fn get_line_and_offset(&self, line: u32) -> Option<(&str, u32)> { self.line_offsets.get(line as usize).map(|&line_offset| { let len: u32 = self.content_len(); let eol_offset = self.line_offsets.get((line + 1) as usize).unwrap_or(&len); let line = &self.content[line_offset as usize..*eol_offset as usize]; (line, line_offset) }) } fn get_line(&self, line: u32) -> Option<&str> { self.get_line_and_offset(line).map(|(line, _)| line) } /// A amount of document content line pub fn line_count(&self) -> u32 { self.line_offsets .len() .try_into() .expect("The number of lines of text passed in is too long") } /// The length of the document content in UTF-8 bytes pub fn content_len(&self) -> u32 { self.content .len() .try_into() .expect("The length of the text passed in is too long") } /// Converts a zero-based byte offset in the UTF8-encoded content to a position /// /// the offset is in bytes, the position is in UTF16 code units. rounds down if /// the offset is not on a code unit boundary, or is beyond the end of the /// content. pub fn position_at(&self, offset: u32) -> Position { let offset = offset.min(self.content_len()); let line_count = self.line_count(); if line_count == 1 { // only one line return Position { line: 0, character: line_offset_utf16(self.get_line(0).unwrap(), offset), }; } let (mut low, mut high) = (0, line_count); while low < high { let mid = (low + high) / 2; if offset >= *self .line_offsets .get(mid as usize) .expect("Unknown mid value") { low = mid + 1; } else { high = mid; } } if low == 0 { // offset is on the first line return Position { line: 0, character: line_offset_utf16(self.get_line(0).unwrap(), offset), }; } let line = low - 1; Position { line, character: line_offset_utf16( self.get_line(line).unwrap(), offset - self.line_offsets[line as usize], ), } } /// Converts a position to a zero-based byte offset, suitable for slicing the /// UTF-8 encoded content. pub fn offset_at(&self, position: Position) -> u32 { let Position { line, character } = position; match self.get_line_and_offset(line) { Some((line, offset)) => { let mut c = 0; let iter = line.char_indices(); for (idx, char) in iter { if c == character { return offset + idx as u32; } c += char.len_utf16() as u32; } offset + line.len() as u32 } None => { if line >= self.line_count() { self.content_len() } else { 0 } } } } } #[cfg(test)] mod tests { use super::*; fn full_text_document() -> FullTextDocument { FullTextDocument::new( "js".to_string(), 2, "he\nllo\nworld\r\nfoo\rbar".to_string(), ) } #[test] fn test_offset_at() { let text_document = full_text_document(); let offset = text_document.offset_at(Position { line: 1, character: 1, }); assert_eq!(offset, 4); let offset = text_document.offset_at(Position { line: 2, character: 3, }); assert_eq!(offset, 10); // the `f` in `foo` (\r\n is a single line terminator) let offset = text_document.offset_at(Position { line: 3, character: 1, }); assert_eq!(offset, 15); } /// basic multilingual plane #[test] fn test_offset_at_bmp() { // Euro symbol let text_document = FullTextDocument::new("js".to_string(), 2, "\u{20AC} euro".to_string()); let offset = text_document.offset_at(Position { line: 0, // E euro // ^ character: 2, }); assert_eq!(offset, 4); } /// supplementary multilingual plane, aka surrogate pair #[test] fn test_offset_at_smp() { // Deseret Small Letter Yee let text_document = FullTextDocument::new("js".to_string(), 2, "\u{10437} yee".to_string()); let offset = text_document.offset_at(Position { line: 0, // HL yee // ^ character: 3, }); assert_eq!(offset, 5); } /// a character beyond the end of the line should clamp to the end of the line #[test] fn test_offset_at_beyond_end_of_line() { let text_document = FullTextDocument::new("js".to_string(), 2, "\u{20AC} abc\nline 2".to_string()); // "\u{20AC} abc\nline 2" in UTF-8: // \xE2 \x82 \xAC \x20 \x61 \x62 \x63 \x0A \x6C \x69 \x6E \x65 \x20 \x32 // ^ line 1 == 0 ^ line 2 == 8 assert_eq!(text_document.line_offsets, vec![0, 8]); let offset = text_document.offset_at(Position { line: 0, character: 100, }); assert_eq!(offset, 8); } #[test] fn test_position_at() { let text_document = full_text_document(); let position = text_document.position_at(5); assert_eq!( position, Position { line: 1, character: 2 } ); let position = text_document.position_at(11); assert_eq!( position, Position { line: 2, character: 4, } ); let position = text_document.position_at(15); assert_eq!( position, Position { line: 3, character: 1, } ); let position = text_document.position_at(0); assert_eq!( position, Position { line: 0, character: 0, } ); } /// basic multilingual plane #[test] fn test_position_at_bmp() { // Euro symbol let text_document = FullTextDocument::new("js".to_string(), 2, "\u{20AC} euro".to_string()); let position = text_document.position_at(4); assert_eq!( position, Position { line: 0, // E euro // ^ character: 2, } ); // multi-line content let text_document = FullTextDocument::new("js".to_string(), 2, "\n\n\u{20AC} euro\n\n".to_string()); let position = text_document.position_at(6); assert_eq!( position, Position { line: 2, // E euro // ^ character: 2, } ); } /// supplementary multilingual plane, aka surrogate pair #[test] fn test_position_at_smp() { // Deseret Small Letter Yee let text_document = FullTextDocument::new("js".to_string(), 2, "\u{10437} yee".to_string()); assert_eq!( text_document.position_at(5), Position { line: 0, // HL yee // ^ character: 3, } ); // \u{10437} is 4 bytes wide. if not on a char boundary, round down assert_eq!( text_document.position_at(2), Position { line: 0, character: 0, } ); // multi-line content let text_document = FullTextDocument::new("js".to_string(), 2, "\n\n\u{10437} yee\n\n".to_string()); let position = text_document.position_at(7); assert_eq!( position, Position { line: 2, // HL yee // ^ character: 3, } ); } /// https://github.com/GiveMe-A-Name/lsp-textdocument/issues/53 #[test] fn test_position_at_line_head() { let text_document = FullTextDocument::new("js".to_string(), 2, "\nyee\n\n".to_string()); let position = text_document.position_at(1); assert_eq!( position, Position { line: 1, character: 0, } ); } #[test] fn test_get_content() { let text_document = full_text_document(); let start = Position { line: 0, character: 0, }; let end = Position { line: 1, character: 2, }; let range = Range { start, end }; let content = text_document.get_content(Some(range)); assert_eq!(content, "he\nll"); let end = Position { line: 100, character: 100, }; let range = Range { start, end }; let content = text_document.get_content(Some(range)); assert_eq!(content, text_document.content); let range = Range { start: Position { line: 1, character: 0, }, end: Position { line: 2, character: 3, }, }; let content = text_document.get_content(Some(range)); assert_eq!(content, "llo\nwor"); } /// basic multilingual plane #[test] fn test_get_content_bmp() { // Euro symbol let text_document = FullTextDocument::new("js".to_string(), 2, "\u{20AC} euro".to_string()); // Euro symbol is 1 UTF16 code unit wide let range = Range { start: Position { line: 0, character: 0, }, end: Position { line: 0, character: 1, }, }; let content = text_document.get_content(Some(range)); assert_eq!(content, "\u{20AC}"); // E euro // ^ let range = Range { start: Position { line: 0, character: 2, }, end: Position { line: 0, character: 3, }, }; let content = text_document.get_content(Some(range)); assert_eq!(content, "e"); } /// supplementary multilingual plane, aka surrogate pairs #[test] fn test_get_content_smp() { // Deseret Small Letter Yee let text_document = FullTextDocument::new("js".to_string(), 2, "\u{10437} yee".to_string()); // surrogate pairs are 2 UTF16 code units wide let range = Range { start: Position { line: 0, character: 0, }, end: Position { line: 0, character: 2, }, }; let content = text_document.get_content(Some(range)); assert_eq!(content, "\u{10437}"); } #[test] fn test_update_full_content() { let mut text_document = full_text_document(); let new_text = "hello\n js!"; text_document.update( &[TextDocumentContentChangeEvent { text: new_text.to_string(), range: None, range_length: None, }], 1, ); assert_eq!(&text_document.content, new_text); assert_eq!(text_document.line_offsets, vec![0, 6]); } #[test] fn test_update_part_content() { let mut text_document = full_text_document(); assert_eq!(text_document.version(), 2); let new_text = String::from("xx\ny"); let range = Range { start: Position { line: 1, character: 0, }, end: Position { line: 1, character: 3, }, }; text_document.update( &[TextDocumentContentChangeEvent { range: Some(range), range_length: None, text: new_text, }], 1, ); assert_eq!(&text_document.content, "he\nxx\ny\nworld\r\nfoo\rbar"); assert_eq!(text_document.line_offsets, vec![0, 3, 6, 8, 15, 19]); assert_eq!(text_document.version(), 1) } #[test] fn test_update_new_content_at_end() { let mut text_document = full_text_document(); let new_text = String::from("bar\nbaz"); let range = Range { start: Position { line: 4, character: 0, }, end: Position { line: 5, character: 0, }, }; text_document.update( &[TextDocumentContentChangeEvent { range: Some(range), range_length: None, text: new_text, }], 1, ); assert_eq!(&text_document.content, "he\nllo\nworld\r\nfoo\rbar\nbaz"); assert_eq!(text_document.line_offsets, vec![0, 3, 7, 14, 18, 22]); } #[test] #[should_panic( expected = "Start offset must be less than end offset. 2:0 (offset 7) is not <= 1:0 (offset 3)" )] fn test_update_invalid_range() { let mut text_document = full_text_document(); // start is after end let range = Range { start: Position { line: 2, character: 0, }, end: Position { line: 1, character: 0, }, }; text_document.update( &[TextDocumentContentChangeEvent { text: String::from(""), range: Some(range), range_length: Some(0), }], 1, ); } /// It turns out that there are multiple values for Position that can map to /// the same offset following a newline. #[test] fn test_multiple_position_same_offset() { let text_document = full_text_document(); let end_of_first_line = Position { line: 0, character: 3, }; let start_of_second_line = Position { line: 1, character: 0, }; assert_eq!( text_document.offset_at(end_of_first_line), text_document.offset_at(start_of_second_line) ); let beyond_end_of_first_line = Position { line: 0, character: 10_000, }; assert_eq!( text_document.offset_at(beyond_end_of_first_line), text_document.offset_at(start_of_second_line) ); } #[test] fn test_insert_using_positions_after_newline_at_end_of_line() { let mut doc = FullTextDocument::new( "text".to_string(), 0, "0:1332533\n0:1332534\n0:1332535\n0:1332536\n".to_string(), ); doc.update( &[TextDocumentContentChangeEvent { range: Some(Range { // After \n at the end of line 1. start: Position { line: 1, character: 10, }, // After \n at the end of line 2. end: Position { line: 2, character: 10, }, }), range_length: None, text: "1:6188912\n1:6188913\n1:6188914\n".to_string(), }], 1, ); assert_eq!( doc.get_content(None), concat!( "0:1332533\n0:1332534\n", "1:6188912\n1:6188913\n1:6188914\n", "0:1332536\n", ), ); assert_eq!(doc.line_offsets, vec!(0, 10, 20, 30, 40, 50, 60)); } #[test] fn test_line_offsets() { let mut doc = FullTextDocument::new("text".to_string(), 0, "123456789\n123456789\n".to_string()); assert_eq!(doc.line_offsets, vec!(0, 10, 20)); doc.update( &[TextDocumentContentChangeEvent { range: Some(Range { start: Position { line: 1, character: 5, }, end: Position { line: 1, character: 5, }, }), range_length: None, text: "\nA\nB\nC\n".to_string(), }], 1, ); assert_eq!(doc.get_content(None), "123456789\n12345\nA\nB\nC\n6789\n",); assert_eq!(doc.line_offsets, vec!(0, 10, 16, 18, 20, 22, 27)); } /// This tests a regression caused by confusing byte and character offsets. /// When [update] was called on a position whose offset points just after a /// non-newline when interpreted as bytes, but pointed just after at a /// newline when interpreted as chars, it led to a crash. #[test] fn test_find_canonical_position_regression() { // \u{20AC} is a single character in utf-16 but 3 bytes in utf-8, // so the offsets in bytes of everything after it is +2 their offsets // in characters. let str = "\u{20AC}456789\n123456789\n"; let mut doc = FullTextDocument::new("text".to_string(), 0, str.to_string()); let pos = Position { line: 0, character: 6, }; let offset = doc.offset_at(pos) as usize; assert_ne!(str.as_bytes().get(offset - 1), Some(&b'\n')); assert_eq!(str.chars().nth(offset - 1), Some('\n')); doc.update( &[TextDocumentContentChangeEvent { range: Some(Range { start: pos, end: Position { line: 0, character: 7, }, }), range_length: None, text: "X".to_string(), }], 1, ); assert_eq!(doc.get_content(None), "\u{20AC}45678X\n123456789\n",); assert_eq!(doc.line_offsets, vec!(0, 10, 20)); } } lsp-textdocument-0.4.2/src/text_documents.rs000064400000000000000000000115231046102023000173520ustar 00000000000000use crate::FullTextDocument; use lsp_types::{ notification::{ DidChangeTextDocument, DidCloseTextDocument, DidOpenTextDocument, Notification, }, DidChangeTextDocumentParams, DidCloseTextDocumentParams, DidOpenTextDocumentParams, Range, Uri, }; use serde_json::Value; use std::collections::BTreeMap; #[derive(Default)] pub struct TextDocuments(BTreeMap); impl TextDocuments { /// Create a text documents /// /// # Examples /// /// Basic usage: /// /// ``` /// use lsp_textdocument::TextDocuments; /// /// let text_documents = TextDocuments::new(); /// ``` pub fn new() -> Self { Self(BTreeMap::new()) } pub fn documents(&self) -> &BTreeMap { &self.0 } /// Get specify document by giving Uri /// /// # Examples: /// /// Basic usage: /// ``` /// use lsp_textdocument::TextDocuments; /// use lsp_types::Uri; /// /// let text_documents = TextDocuments::new(); /// let uri:Uri = "file://example.txt".parse().unwrap(); /// text_documents.get_document(&uri); /// ``` pub fn get_document(&self, uri: &Uri) -> Option<&FullTextDocument> { self.0.get(uri) } /// Get specify document content by giving Range /// /// # Examples /// /// Basic usage: /// ```no_run /// use lsp_textdocument::TextDocuments; /// use lsp_types::{Uri, Range, Position}; /// /// let uri: Uri = "file://example.txt".parse().unwrap(); /// let text_documents = TextDocuments::new(); /// /// // get document all content /// let content = text_documents.get_document_content(&uri, None); /// assert_eq!(content, Some("hello rust!")); /// /// // get document specify content by range /// let (start, end) = (Position::new(0, 1), Position::new(0, 9)); /// let range = Range::new(start, end); /// let sub_content = text_documents.get_document_content(&uri, Some(range)); /// assert_eq!(sub_content, Some("ello rus")); /// ``` pub fn get_document_content(&self, uri: &Uri, range: Option) -> Option<&str> { self.0.get(uri).map(|document| document.get_content(range)) } /// Get specify document's language by giving Uri /// /// # Examples /// /// Basic usage: /// ```no_run /// use lsp_textdocument::TextDocuments; /// use lsp_types::Uri; /// /// let text_documents = TextDocuments::new(); /// let uri:Uri = "file://example.js".parse().unwrap(); /// let language = text_documents.get_document_language(&uri); /// assert_eq!(language, Some("javascript")); /// ``` pub fn get_document_language(&self, uri: &Uri) -> Option<&str> { self.0.get(uri).map(|document| document.language_id()) } /// Listening the notification from client, you just need to pass `method` and `params` /// /// # Examples: /// /// Basic usage: /// ```no_run /// use lsp_textdocument::TextDocuments; /// /// let method = "textDocument/didOpen"; /// let params = serde_json::to_value("message produced by client").unwrap(); /// /// let mut text_documents = TextDocuments::new(); /// let accept: bool = text_documents.listen(method, ¶ms); /// ``` pub fn listen(&mut self, method: &str, params: &Value) -> bool { match method { DidOpenTextDocument::METHOD => { let params: DidOpenTextDocumentParams = serde_json::from_value(params.clone()) .expect("Expect receive DidOpenTextDocumentParams"); let text_document = params.text_document; let document = FullTextDocument::new( text_document.language_id, text_document.version, text_document.text, ); self.0.insert(text_document.uri, document); true } DidChangeTextDocument::METHOD => { let params: DidChangeTextDocumentParams = serde_json::from_value(params.clone()) .expect("Expect receive DidChangeTextDocumentParams"); if let Some(document) = self.0.get_mut(¶ms.text_document.uri) { let changes = ¶ms.content_changes; let version = params.text_document.version; document.update(changes, version); }; true } DidCloseTextDocument::METHOD => { let params: DidCloseTextDocumentParams = serde_json::from_value(params.clone()) .expect("Expect receive DidCloseTextDocumentParams"); self.0.remove(¶ms.text_document.uri); true } _ => { // ignore other request false } } } }