From fdb437a73278e29c3a08d35432183331c3993a4a Mon Sep 17 00:00:00 2001 From: orangels Date: Wed, 6 May 2026 23:15:33 +0800 Subject: [PATCH] fix: correct streaming bugs and RPM diagnose gaps - Move TTFT timer start before .send().await so it includes server processing time (standard TTFT semantics) - Add done flag to break outer while loop when stream ends - Rewrite SseLineBuffer to use Vec with 1MB cap, fixing UTF-8 corruption on multi-byte chars split across chunks - Add sliding_window_schedule probes to Diagnose mode so the SlidingWindow inference branch is no longer dead code - Move build_rpm_mode_plan after AppConfig::load to eliminate window-boundary timing drift --- src/cli.rs | 3 ++- src/protocols/anthropic.rs | 7 ++++++- src/protocols/mod.rs | 28 ++++++++++++++++++---------- src/protocols/openai.rs | 7 ++++++- 4 files changed, 32 insertions(+), 13 deletions(-) diff --git a/src/cli.rs b/src/cli.rs index 79610db..9541069 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -400,6 +400,7 @@ struct RpmCommandOptions { } async fn run_rpm(config_path: PathBuf, options: RpmCommandOptions) -> Result<()> { + let config = AppConfig::load(&config_path)?; let mode_plan = build_rpm_mode_plan( options.mode, options.rpm, @@ -408,7 +409,6 @@ async fn run_rpm(config_path: PathBuf, options: RpmCommandOptions) -> Result<()> options.probe_seconds, options.window_offset_ms, )?; - let config = AppConfig::load(&config_path)?; let provider_name = provider_name(&config, options.provider.as_deref())?; let provider_config = config.resolved_provider(Some(&provider_name))?; let model = options @@ -536,6 +536,7 @@ fn build_rpm_mode_plan( let rpm = require_positive("rpm", rpm)?; let burst = require_positive_value("burst", burst)?; let mut probes = token_bucket_schedule(rpm, burst, probe_seconds.unwrap_or(90)); + probes.extend(sliding_window_schedule(burst, probe_seconds.unwrap_or(90))); probes.extend(window_boundary_plan(Utc::now(), burst, window_offset_ms).probes); probes } diff --git a/src/protocols/anthropic.rs b/src/protocols/anthropic.rs index ac240b0..03ad004 100644 --- a/src/protocols/anthropic.rs +++ b/src/protocols/anthropic.rs @@ -57,6 +57,7 @@ pub async fn send(client: &Client, request: &ModelRequest) -> Result Result { let url = super::endpoint_url(&request.base_url, "/v1/messages")?; + let started = Instant::now(); let response = client .post(url) .header("x-api-key", &request.api_token) @@ -86,12 +87,12 @@ pub async fn send_stream(client: &Client, request: &ModelRequest) -> Result = None; let mut current_event = String::new(); + let mut done = false; while let Some(chunk) = stream.next().await { let chunk = chunk.context("Anthropic stream interrupted")?; @@ -113,6 +114,7 @@ pub async fn send_stream(client: &Client, request: &ModelRequest) -> Result Result, } +const MAX_SSE_BUFFER_SIZE: usize = 1_048_576; + pub(crate) struct SseLineBuffer { - buffer: String, + buffer: Vec, } impl SseLineBuffer { pub fn new() -> Self { - Self { - buffer: String::new(), - } + Self { buffer: Vec::new() } } pub fn feed(&mut self, chunk: &[u8]) -> Vec { - let text = String::from_utf8_lossy(chunk); - self.buffer.push_str(&text); + self.buffer.extend_from_slice(chunk); + if self.buffer.len() > MAX_SSE_BUFFER_SIZE { + self.buffer.clear(); + return Vec::new(); + } let mut lines = Vec::new(); - while let Some(pos) = self.buffer.find('\n') { - let line = self.buffer[..pos].trim_end_matches('\r').to_string(); - self.buffer.drain(..=pos); - lines.push(line); + while let Some(pos) = self.buffer.iter().position(|&b| b == b'\n') { + let line_bytes: Vec = self.buffer.drain(..=pos).collect(); + let raw = &line_bytes[..line_bytes.len() - 1]; + let raw = if raw.last() == Some(&b'\r') { + &raw[..raw.len() - 1] + } else { + raw + }; + lines.push(String::from_utf8_lossy(raw).into_owned()); } lines } diff --git a/src/protocols/openai.rs b/src/protocols/openai.rs index c61871d..f3a2601 100644 --- a/src/protocols/openai.rs +++ b/src/protocols/openai.rs @@ -55,6 +55,7 @@ pub async fn send(client: &Client, request: &ModelRequest) -> Result Result { let url = super::endpoint_url(&request.base_url, "/chat/completions")?; + let started = Instant::now(); let response = client .post(url) .bearer_auth(&request.api_token) @@ -83,11 +84,11 @@ pub async fn send_stream(client: &Client, request: &ModelRequest) -> Result = None; + let mut done = false; while let Some(chunk) = stream.next().await { let chunk = chunk.context("OpenAI stream interrupted")?; @@ -96,6 +97,7 @@ pub async fn send_stream(client: &Client, request: &ModelRequest) -> Result(data) @@ -110,6 +112,9 @@ pub async fn send_stream(client: &Client, request: &ModelRequest) -> Result