-
Notifications
You must be signed in to change notification settings - Fork 5.6k
perf(lsp): Cache semantic tokens for open documents #23799
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
4414032
332f910
54d5329
bb7196b
61d0028
320130d
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -12591,3 +12591,87 @@ fn lsp_ts_code_fix_any_param() { | |
|
||
panic!("failed to find 'Infer parameter types from usage' fix in fixes: {fixes:#?}"); | ||
} | ||
|
||
#[test] | ||
fn lsp_semantic_token_caching() { | ||
let context = TestContextBuilder::new().use_temp_cwd().build(); | ||
let temp_dir = context.temp_dir().path(); | ||
|
||
let mut client: LspClient = context | ||
.new_lsp_command() | ||
.collect_perf() | ||
.set_root_dir(temp_dir.clone()) | ||
.build(); | ||
client.initialize_default(); | ||
|
||
let a = source_file( | ||
temp_dir.join("a.ts"), | ||
r#" | ||
export const a = 1; | ||
export const b = 2; | ||
export const bar = () => "bar"; | ||
function foo(fun: (number, number, number) => number, c: number) { | ||
const double = (x) => x * 2; | ||
return fun(double(a), b, c); | ||
}"#, | ||
); | ||
|
||
client.did_open_file(&a); | ||
|
||
// requesting a range won't cache the tokens, so this will | ||
// be computed | ||
let res = client.write_request( | ||
"textDocument/semanticTokens/range", | ||
json!({ | ||
"textDocument": a.identifier(), | ||
"range": { | ||
"start": a.range_of("const bar").start, | ||
"end": a.range_of("}").end, | ||
} | ||
}), | ||
); | ||
|
||
assert_eq!( | ||
client | ||
.perf() | ||
.measure_count("tsc.request.getEncodedSemanticClassifications"), | ||
1, | ||
); | ||
|
||
// requesting for the full doc should compute and cache the tokens | ||
let _full = client.write_request( | ||
"textDocument/semanticTokens/full", | ||
json!({ | ||
"textDocument": a.identifier(), | ||
}), | ||
); | ||
|
||
assert_eq!( | ||
client | ||
.perf() | ||
.measure_count("tsc.request.getEncodedSemanticClassifications"), | ||
2, | ||
); | ||
|
||
// use the cached tokens | ||
let res_cached = client.write_request( | ||
"textDocument/semanticTokens/range", | ||
json!({ | ||
"textDocument": a.identifier(), | ||
"range": { | ||
"start": a.range_of("const bar").start, | ||
"end": a.range_of("}").end, | ||
} | ||
}), | ||
); | ||
|
||
// make sure we actually used the cache | ||
assert_eq!( | ||
client | ||
.perf() | ||
.measure_count("tsc.request.getEncodedSemanticClassifications"), | ||
2, | ||
); | ||
|
||
assert_eq!(res, res_cached); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. How are we integration testing whether or not a cache is being used? Shouldn't the result be the same? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Added some code to the test client to collect performance markers, so we can assert on how many times |
||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Might be nice to add something like
CloneCell<T: Send + Clone>
tocli::util::sync
that has methods likeset(value: T)
andget_cloned(&self): T
in order to bury the mutex there.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
That said, should this use something like OnceCell instead? I guess no because async.