diff options
author | Bartek IwaĆczuk <biwanczuk@gmail.com> | 2023-05-26 06:31:54 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-05-26 04:31:54 +0000 |
commit | 7ae55e75d812edc93251357465f8d49fc2fb5d26 (patch) | |
tree | 9d9a803cec2c5fd0dc6709f43115b82515c2dc32 /cli/lsp/diagnostics.rs | |
parent | 89026abe395c22eb2ace4ea5f948189daa1dadf1 (diff) |
Revert "chore(lsp/tests): diagnostic synchronization (#19264)" (#19268)
This reverts commit 89026abe395c22eb2ace4ea5f948189daa1dadf1.
This change caused LSP benchmarks to fail on `main`.
Diffstat (limited to 'cli/lsp/diagnostics.rs')
-rw-r--r-- | cli/lsp/diagnostics.rs | 134 |
1 files changed, 13 insertions, 121 deletions
diff --git a/cli/lsp/diagnostics.rs b/cli/lsp/diagnostics.rs index a5e9d7bf8..1a57ad03b 100644 --- a/cli/lsp/diagnostics.rs +++ b/cli/lsp/diagnostics.rs @@ -16,7 +16,6 @@ use super::tsc::TsServer; use crate::args::LintOptions; use crate::graph_util; use crate::graph_util::enhanced_resolution_error_message; -use crate::lsp::lsp_custom::DiagnosticBatchNotificationParams; use crate::tools::lint::get_configured_rules; use deno_ast::MediaType; @@ -38,7 +37,6 @@ use deno_runtime::tokio_util::create_basic_runtime; use deno_semver::npm::NpmPackageReqReference; use log::error; use std::collections::HashMap; -use std::sync::atomic::AtomicUsize; use std::sync::Arc; use std::thread; use tokio::sync::mpsc; @@ -47,13 +45,8 @@ use tokio::time::Duration; use tokio_util::sync::CancellationToken; use tower_lsp::lsp_types as lsp; -#[derive(Debug)] -pub struct DiagnosticServerUpdateMessage { - pub snapshot: Arc<StateSnapshot>, - pub config: Arc<ConfigSnapshot>, - pub lint_options: LintOptions, -} - +pub type SnapshotForDiagnostics = + (Arc<StateSnapshot>, Arc<ConfigSnapshot>, LintOptions); pub type DiagnosticRecord = (ModuleSpecifier, Option<i32>, Vec<lsp::Diagnostic>); pub type DiagnosticVec = Vec<DiagnosticRecord>; @@ -152,55 +145,13 @@ impl TsDiagnosticsStore { } } -pub fn should_send_diagnostic_batch_index_notifications() -> bool { - crate::args::has_flag_env_var( - "DENO_DONT_USE_INTERNAL_LSP_DIAGNOSTIC_SYNC_FLAG", - ) -} - -#[derive(Clone, Debug)] -struct DiagnosticBatchCounter(Option<Arc<AtomicUsize>>); - -impl Default for DiagnosticBatchCounter { - fn default() -> Self { - if should_send_diagnostic_batch_index_notifications() { - Self(Some(Default::default())) - } else { - Self(None) - } - } -} - -impl DiagnosticBatchCounter { - pub fn inc(&self) -> Option<usize> { - self - .0 - .as_ref() - .map(|value| value.fetch_add(1, std::sync::atomic::Ordering::SeqCst) + 1) - } - - pub fn get(&self) -> Option<usize> { - self - .0 - .as_ref() - .map(|value| value.load(std::sync::atomic::Ordering::SeqCst)) - } -} - -#[derive(Debug)] -struct ChannelMessage { - message: DiagnosticServerUpdateMessage, - batch_index: Option<usize>, -} - #[derive(Debug)] pub struct DiagnosticsServer { - channel: Option<mpsc::UnboundedSender<ChannelMessage>>, + channel: Option<mpsc::UnboundedSender<SnapshotForDiagnostics>>, ts_diagnostics: TsDiagnosticsStore, client: Client, performance: Arc<Performance>, ts_server: Arc<TsServer>, - batch_counter: DiagnosticBatchCounter, } impl DiagnosticsServer { @@ -215,7 +166,6 @@ impl DiagnosticsServer { client, performance, ts_server, - batch_counter: Default::default(), } } @@ -237,7 +187,7 @@ impl DiagnosticsServer { #[allow(unused_must_use)] pub fn start(&mut self) { - let (tx, mut rx) = mpsc::unbounded_channel::<ChannelMessage>(); + let (tx, mut rx) = mpsc::unbounded_channel::<SnapshotForDiagnostics>(); self.channel = Some(tx); let client = self.client.clone(); let performance = self.performance.clone(); @@ -258,17 +208,7 @@ impl DiagnosticsServer { match rx.recv().await { // channel has closed None => break, - Some(message) => { - let ChannelMessage { - message: - DiagnosticServerUpdateMessage { - snapshot, - config, - lint_options, - }, - batch_index, - } = message; - + Some((snapshot, config, lint_options)) => { // cancel the previous run token.cancel(); token = CancellationToken::new(); @@ -315,7 +255,6 @@ impl DiagnosticsServer { }) .unwrap_or_default(); - let messages_len = diagnostics.len(); if !token.is_cancelled() { ts_diagnostics_store.update(&diagnostics); diagnostics_publisher.publish(diagnostics, &token).await; @@ -324,17 +263,6 @@ impl DiagnosticsServer { performance.measure(mark); } } - - if let Some(batch_index) = batch_index { - diagnostics_publisher - .client - .send_diagnostic_batch_notification( - DiagnosticBatchNotificationParams { - batch_index, - messages_len, - }, - ); - } } })); @@ -358,24 +286,10 @@ impl DiagnosticsServer { ) .await; - let messages_len = diagnostics.len(); - if !token.is_cancelled() { - diagnostics_publisher.publish(diagnostics, &token).await; + diagnostics_publisher.publish(diagnostics, &token).await; - if !token.is_cancelled() { - performance.measure(mark); - } - } - - if let Some(batch_index) = batch_index { - diagnostics_publisher - .client - .send_diagnostic_batch_notification( - DiagnosticBatchNotificationParams { - batch_index, - messages_len, - }, - ); + if !token.is_cancelled() { + performance.measure(mark); } } })); @@ -401,24 +315,10 @@ impl DiagnosticsServer { ) .await; - let messages_len = diagnostics.len(); - if !token.is_cancelled() { - diagnostics_publisher.publish(diagnostics, &token).await; - - if !token.is_cancelled() { - performance.measure(mark); - } - } + diagnostics_publisher.publish(diagnostics, &token).await; - if let Some(batch_index) = batch_index { - diagnostics_publisher - .client - .send_diagnostic_batch_notification( - DiagnosticBatchNotificationParams { - batch_index, - messages_len, - }, - ); + if !token.is_cancelled() { + performance.measure(mark); } } })); @@ -429,23 +329,15 @@ impl DiagnosticsServer { }); } - pub fn latest_batch_index(&self) -> Option<usize> { - self.batch_counter.get() - } - pub fn update( &self, - message: DiagnosticServerUpdateMessage, + message: SnapshotForDiagnostics, ) -> Result<(), AnyError> { // todo(dsherret): instead of queuing up messages, it would be better to // instead only store the latest message (ex. maybe using a // tokio::sync::watch::channel) if let Some(tx) = &self.channel { - tx.send(ChannelMessage { - message, - batch_index: self.batch_counter.inc(), - }) - .map_err(|err| err.into()) + tx.send(message).map_err(|err| err.into()) } else { Err(anyhow!("diagnostics server not started")) } |