Parcourir la source

fix: correct streaming bugs and RPM diagnose gaps

- Move TTFT timer start before .send().await so it includes server
  processing time (standard TTFT semantics)
- Add done flag to break outer while loop when stream ends
- Rewrite SseLineBuffer to use Vec<u8> with 1MB cap, fixing UTF-8
  corruption on multi-byte chars split across chunks
- Add sliding_window_schedule probes to Diagnose mode so the
  SlidingWindow inference branch is no longer dead code
- Move build_rpm_mode_plan after AppConfig::load to eliminate
  window-boundary timing drift
main
orangels il y a 1 semaine
Parent
révision
fdb437a732
4 fichiers modifiés avec 32 ajouts et 13 suppressions
  1. +2
    -1
      src/cli.rs
  2. +6
    -1
      src/protocols/anthropic.rs
  3. +18
    -10
      src/protocols/mod.rs
  4. +6
    -1
      src/protocols/openai.rs

+ 2
- 1
src/cli.rs Voir le fichier

@@ -400,6 +400,7 @@ struct RpmCommandOptions {
}

async fn run_rpm(config_path: PathBuf, options: RpmCommandOptions) -> Result<()> {
let config = AppConfig::load(&config_path)?;
let mode_plan = build_rpm_mode_plan(
options.mode,
options.rpm,
@@ -408,7 +409,6 @@ async fn run_rpm(config_path: PathBuf, options: RpmCommandOptions) -> Result<()>
options.probe_seconds,
options.window_offset_ms,
)?;
let config = AppConfig::load(&config_path)?;
let provider_name = provider_name(&config, options.provider.as_deref())?;
let provider_config = config.resolved_provider(Some(&provider_name))?;
let model = options
@@ -536,6 +536,7 @@ fn build_rpm_mode_plan(
let rpm = require_positive("rpm", rpm)?;
let burst = require_positive_value("burst", burst)?;
let mut probes = token_bucket_schedule(rpm, burst, probe_seconds.unwrap_or(90));
probes.extend(sliding_window_schedule(burst, probe_seconds.unwrap_or(90)));
probes.extend(window_boundary_plan(Utc::now(), burst, window_offset_ms).probes);
probes
}


+ 6
- 1
src/protocols/anthropic.rs Voir le fichier

@@ -57,6 +57,7 @@ pub async fn send(client: &Client, request: &ModelRequest) -> Result<ModelRespon

pub async fn send_stream(client: &Client, request: &ModelRequest) -> Result<ModelResponse> {
let url = super::endpoint_url(&request.base_url, "/v1/messages")?;
let started = Instant::now();
let response = client
.post(url)
.header("x-api-key", &request.api_token)
@@ -86,12 +87,12 @@ pub async fn send_stream(client: &Client, request: &ModelRequest) -> Result<Mode
);
}

let started = Instant::now();
let mut stream = response.bytes_stream();
let mut buffer = super::SseLineBuffer::new();
let mut text = String::new();
let mut first_token_ms: Option<u128> = None;
let mut current_event = String::new();
let mut done = false;

while let Some(chunk) = stream.next().await {
let chunk = chunk.context("Anthropic stream interrupted")?;
@@ -113,6 +114,7 @@ pub async fn send_stream(client: &Client, request: &ModelRequest) -> Result<Mode
text.push_str(&content);
}
} else if current_event == "message_stop" {
done = true;
break;
}
}
@@ -120,6 +122,9 @@ pub async fn send_stream(client: &Client, request: &ModelRequest) -> Result<Mode
current_event.clear();
}
}
if done {
break;
}
}

if text.is_empty() {


+ 18
- 10
src/protocols/mod.rs Voir le fichier

@@ -97,25 +97,33 @@ struct ErrorBody {
message: Option<String>,
}

const MAX_SSE_BUFFER_SIZE: usize = 1_048_576;

pub(crate) struct SseLineBuffer {
buffer: String,
buffer: Vec<u8>,
}

impl SseLineBuffer {
pub fn new() -> Self {
Self {
buffer: String::new(),
}
Self { buffer: Vec::new() }
}

pub fn feed(&mut self, chunk: &[u8]) -> Vec<String> {
let text = String::from_utf8_lossy(chunk);
self.buffer.push_str(&text);
self.buffer.extend_from_slice(chunk);
if self.buffer.len() > MAX_SSE_BUFFER_SIZE {
self.buffer.clear();
return Vec::new();
}
let mut lines = Vec::new();
while let Some(pos) = self.buffer.find('\n') {
let line = self.buffer[..pos].trim_end_matches('\r').to_string();
self.buffer.drain(..=pos);
lines.push(line);
while let Some(pos) = self.buffer.iter().position(|&b| b == b'\n') {
let line_bytes: Vec<u8> = self.buffer.drain(..=pos).collect();
let raw = &line_bytes[..line_bytes.len() - 1];
let raw = if raw.last() == Some(&b'\r') {
&raw[..raw.len() - 1]
} else {
raw
};
lines.push(String::from_utf8_lossy(raw).into_owned());
}
lines
}


+ 6
- 1
src/protocols/openai.rs Voir le fichier

@@ -55,6 +55,7 @@ pub async fn send(client: &Client, request: &ModelRequest) -> Result<ModelRespon

pub async fn send_stream(client: &Client, request: &ModelRequest) -> Result<ModelResponse> {
let url = super::endpoint_url(&request.base_url, "/chat/completions")?;
let started = Instant::now();
let response = client
.post(url)
.bearer_auth(&request.api_token)
@@ -83,11 +84,11 @@ pub async fn send_stream(client: &Client, request: &ModelRequest) -> Result<Mode
);
}

let started = Instant::now();
let mut stream = response.bytes_stream();
let mut buffer = super::SseLineBuffer::new();
let mut text = String::new();
let mut first_token_ms: Option<u128> = None;
let mut done = false;

while let Some(chunk) = stream.next().await {
let chunk = chunk.context("OpenAI stream interrupted")?;
@@ -96,6 +97,7 @@ pub async fn send_stream(client: &Client, request: &ModelRequest) -> Result<Mode
continue;
};
if data == "[DONE]" {
done = true;
break;
}
if let Some(content) = serde_json::from_str::<OpenAiStreamChunk>(data)
@@ -110,6 +112,9 @@ pub async fn send_stream(client: &Client, request: &ModelRequest) -> Result<Mode
text.push_str(&content);
}
}
if done {
break;
}
}

if text.is_empty() {


Chargement…
Annuler
Enregistrer