summaryrefslogtreecommitdiff
path: root/tests/integration/lsp_tests.rs
diff options
context:
space:
mode:
authorNathan Whitaker <17734409+nathanwhit@users.noreply.github.com>2024-05-14 18:51:48 -0700
committerGitHub <noreply@github.com>2024-05-15 01:51:48 +0000
commit36d877be4a220cb30ddf69d43c386ae8d15f4b32 (patch)
tree08bdf14935f68928452a5b9665f3c65e6e9670b7 /tests/integration/lsp_tests.rs
parent1a788b58a0e80c4504a0fdf5d47db41c46dc8d37 (diff)
perf(lsp): Cache semantic tokens for open documents (#23799)
VScode will typically send a `textDocument/semanticTokens/full` request followed by `textDocument/semanticTokens/range`, and occassionally request semantic tokens even when we know nothing has changed. Semantic tokens also get refreshed on each change. Computing semantic tokens is relatively heavy in TSC, so we should avoid it as much as possible. Caches the semantic tokens for open documents, to avoid making TSC do unnecessary work. Results in a noticeable improvement in local benchmarking before: ``` Starting Deno benchmark -> Start benchmarking lsp - Simple Startup/Shutdown (10 runs, mean: 383ms) - Big Document/Several Edits (5 runs, mean: 1079ms) - Find/Replace (10 runs, mean: 59ms) - Code Lens (10 runs, mean: 440ms) - deco-cx/apps Multiple Edits + Navigation (5 runs, mean: 9921ms) <- End benchmarking lsp ``` after: ``` Starting Deno benchmark -> Start benchmarking lsp - Simple Startup/Shutdown (10 runs, mean: 395ms) - Big Document/Several Edits (5 runs, mean: 1024ms) - Find/Replace (10 runs, mean: 56ms) - Code Lens (10 runs, mean: 438ms) - deco-cx/apps Multiple Edits + Navigation (5 runs, mean: 8927ms) <- End benchmarking lsp ```
Diffstat (limited to 'tests/integration/lsp_tests.rs')
-rw-r--r--tests/integration/lsp_tests.rs84
1 files changed, 84 insertions, 0 deletions
diff --git a/tests/integration/lsp_tests.rs b/tests/integration/lsp_tests.rs
index 84f334992..47fefeafe 100644
--- a/tests/integration/lsp_tests.rs
+++ b/tests/integration/lsp_tests.rs
@@ -12698,3 +12698,87 @@ fn lsp_ts_code_fix_any_param() {
panic!("failed to find 'Infer parameter types from usage' fix in fixes: {fixes:#?}");
}
+
+#[test]
+fn lsp_semantic_token_caching() {
+ let context = TestContextBuilder::new().use_temp_cwd().build();
+ let temp_dir = context.temp_dir().path();
+
+ let mut client: LspClient = context
+ .new_lsp_command()
+ .collect_perf()
+ .set_root_dir(temp_dir.clone())
+ .build();
+ client.initialize_default();
+
+ let a = source_file(
+ temp_dir.join("a.ts"),
+ r#"
+ export const a = 1;
+ export const b = 2;
+ export const bar = () => "bar";
+ function foo(fun: (number, number, number) => number, c: number) {
+ const double = (x) => x * 2;
+ return fun(double(a), b, c);
+ }"#,
+ );
+
+ client.did_open_file(&a);
+
+ // requesting a range won't cache the tokens, so this will
+ // be computed
+ let res = client.write_request(
+ "textDocument/semanticTokens/range",
+ json!({
+ "textDocument": a.identifier(),
+ "range": {
+ "start": a.range_of("const bar").start,
+ "end": a.range_of("}").end,
+ }
+ }),
+ );
+
+ assert_eq!(
+ client
+ .perf()
+ .measure_count("tsc.request.getEncodedSemanticClassifications"),
+ 1,
+ );
+
+ // requesting for the full doc should compute and cache the tokens
+ let _full = client.write_request(
+ "textDocument/semanticTokens/full",
+ json!({
+ "textDocument": a.identifier(),
+ }),
+ );
+
+ assert_eq!(
+ client
+ .perf()
+ .measure_count("tsc.request.getEncodedSemanticClassifications"),
+ 2,
+ );
+
+ // use the cached tokens
+ let res_cached = client.write_request(
+ "textDocument/semanticTokens/range",
+ json!({
+ "textDocument": a.identifier(),
+ "range": {
+ "start": a.range_of("const bar").start,
+ "end": a.range_of("}").end,
+ }
+ }),
+ );
+
+ // make sure we actually used the cache
+ assert_eq!(
+ client
+ .perf()
+ .measure_count("tsc.request.getEncodedSemanticClassifications"),
+ 2,
+ );
+
+ assert_eq!(res, res_cached);
+}