diff --git a/Cargo.lock b/Cargo.lock index c558014..4304f95 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1252,6 +1252,7 @@ dependencies = [ "base64 0.22.1", "bytes", "futures-core", + "futures-util", "http", "http-body", "http-body-util", @@ -1271,12 +1272,14 @@ dependencies = [ "sync_wrapper", "tokio", "tokio-rustls", + "tokio-util", "tower", "tower-http", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", + "wasm-streams", "web-sys", "webpki-roots", ] @@ -1948,6 +1951,19 @@ dependencies = [ "wasmparser", ] +[[package]] +name = "wasm-streams" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15053d8d85c7eccdbefef60f06769760a563c7f0a9d6902a13d35c7800b0ad65" +dependencies = [ + "futures-util", + "js-sys", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + [[package]] name = "wasmparser" version = "0.244.0" diff --git a/Cargo.toml b/Cargo.toml index 498f096..af6810b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,7 +11,7 @@ futures = "0.3" hdrhistogram = "7" indicatif = "0.17" regex = "1" -reqwest = { version = "0.12", default-features = false, features = ["json", "rustls-tls"] } +reqwest = { version = "0.12", default-features = false, features = ["json", "rustls-tls", "stream"] } serde = { version = "1", features = ["derive"] } serde_json = "1" serde_yaml = "0.9" diff --git a/config.example.yaml b/config.example.yaml index 0f52a3f..5f70a9f 100644 --- a/config.example.yaml +++ b/config.example.yaml @@ -6,12 +6,14 @@ providers: base_url: "https://relay.example.com/v1" api_token: "${OPENAI_RELAY_TOKEN}" default_model: "gpt-4o-mini" + stream: false anthropic: protocol: anthropic base_url: "https://relay.example.com" api_token: "${ANTHROPIC_RELAY_TOKEN}" default_model: "claude-3-5-sonnet-latest" + stream: true benchmarks: data_dir: "data/benchmarks" diff --git a/src/cli.rs b/src/cli.rs index 85e6524..79610db 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -45,6 +45,8 @@ pub enum Command { model: Option, #[arg(long)] prompt: String, + #[arg(long)] + stream: Option, }, Dataset { #[command(subcommand)] @@ -77,6 +79,8 @@ pub enum Command { concurrency: Option, #[arg(long)] prompt: String, + #[arg(long)] + stream: Option, }, } @@ -98,6 +102,8 @@ pub enum BenchCommand { concurrency: usize, #[arg(long)] limit: Option, + #[arg(long)] + stream: Option, }, GpqaDiamond { #[arg(long, default_value = "config.yaml")] @@ -110,6 +116,8 @@ pub enum BenchCommand { concurrency: usize, #[arg(long)] limit: Option, + #[arg(long)] + stream: Option, }, } @@ -120,6 +128,7 @@ pub async fn dispatch(cli: Cli) -> Result<()> { provider, model, prompt, + stream, } => { let config = AppConfig::load(&config)?; let provider = config.resolved_provider(provider.as_deref())?; @@ -130,11 +139,15 @@ pub async fn dispatch(cli: Cli) -> Result<()> { prompt, temperature: 0.0, max_tokens: 1024, + stream: stream.unwrap_or(provider.stream), }; let response = run_model_request(provider.protocol, request).await?; println!("status: {}", response.status); println!("elapsed_ms: {}", response.elapsed_ms); + if let Some(ttft) = response.first_token_ms { + println!("first_token_ms: {}", ttft); + } println!("{}", response.text); Ok(()) @@ -160,6 +173,7 @@ pub async fn dispatch(cli: Cli) -> Result<()> { window_offset_ms, concurrency, prompt, + stream, } => { run_rpm( config, @@ -174,6 +188,7 @@ pub async fn dispatch(cli: Cli) -> Result<()> { window_offset_ms, concurrency, prompt, + stream, }, ) .await @@ -189,14 +204,16 @@ async fn dispatch_bench(command: BenchCommand) -> Result<()> { model, concurrency, limit, - } => run_aime_benchmark(config, provider, model, concurrency, limit).await, + stream, + } => run_aime_benchmark(config, provider, model, concurrency, limit, stream).await, BenchCommand::GpqaDiamond { config, provider, model, concurrency, limit, - } => run_gpqa_benchmark(config, provider, model, concurrency, limit).await, + stream, + } => run_gpqa_benchmark(config, provider, model, concurrency, limit, stream).await, } } @@ -206,6 +223,7 @@ async fn run_aime_benchmark( model: Option, concurrency: usize, limit: Option, + stream: Option, ) -> Result<()> { let config = AppConfig::load(&config_path)?; let provider_name = provider_name(&config, provider.as_deref())?; @@ -223,7 +241,8 @@ async fn run_aime_benchmark( let cases = apply_limit(loaded.cases, limit); let started_at = Utc::now(); let started = Instant::now(); - let base_request = request_template(&provider_config, &model, 0.0, 1024); + let mut base_request = request_template(&provider_config, &model, 0.0, 1024); + base_request.stream = stream.unwrap_or(provider_config.stream); let protocol = provider_config.protocol; let results = stream::iter(cases) @@ -244,7 +263,7 @@ async fn run_aime_benchmark( for (case, result) in results { match result { Ok(response) => { - metrics.record_success(response.status, response.elapsed_ms as u64); + metrics.record_success(response.status, response.elapsed_ms as u64, response.first_token_ms.map(|ms| ms as u64)); let actual = judge::extract_final_integer(&response.text) .unwrap_or_else(|| "no_answer".to_string()); let correct = judge::judge_integer(&response.text, &case.answer); @@ -287,6 +306,7 @@ async fn run_gpqa_benchmark( model: Option, concurrency: usize, limit: Option, + stream: Option, ) -> Result<()> { let config = AppConfig::load(&config_path)?; let provider_name = provider_name(&config, provider.as_deref())?; @@ -304,7 +324,8 @@ async fn run_gpqa_benchmark( let cases = apply_limit(loaded.cases, limit); let started_at = Utc::now(); let started = Instant::now(); - let base_request = request_template(&provider_config, &model, 0.0, 1024); + let mut base_request = request_template(&provider_config, &model, 0.0, 1024); + base_request.stream = stream.unwrap_or(provider_config.stream); let protocol = provider_config.protocol; let results = stream::iter(cases) @@ -325,7 +346,7 @@ async fn run_gpqa_benchmark( for (case, result) in results { match result { Ok(response) => { - metrics.record_success(response.status, response.elapsed_ms as u64); + metrics.record_success(response.status, response.elapsed_ms as u64, response.first_token_ms.map(|ms| ms as u64)); let actual = judge::extract_choice(&response.text) .map(|choice| choice.to_string()) .unwrap_or_else(|| "no_answer".to_string()); @@ -375,6 +396,7 @@ struct RpmCommandOptions { window_offset_ms: u64, concurrency: Option, prompt: String, + stream: Option, } async fn run_rpm(config_path: PathBuf, options: RpmCommandOptions) -> Result<()> { @@ -394,6 +416,7 @@ async fn run_rpm(config_path: PathBuf, options: RpmCommandOptions) -> Result<()> .unwrap_or_else(|| provider_config.default_model.clone()); let request = ModelRequest { prompt: options.prompt, + stream: options.stream.unwrap_or(provider_config.stream), ..request_template(&provider_config, &model, 0.0, 1024) }; let started_at = Utc::now(); @@ -413,7 +436,7 @@ async fn run_rpm(config_path: PathBuf, options: RpmCommandOptions) -> Result<()> let success = result.result.is_ok(); mode_summary.record(result.phase, result.second, success); match result.result { - Ok(response) => metrics.record_success(response.status, response.elapsed_ms as u64), + Ok(response) => metrics.record_success(response.status, response.elapsed_ms as u64, response.first_token_ms.map(|ms| ms as u64)), Err(error) => metrics.record_failure(error_code(&error)), } } @@ -436,6 +459,7 @@ async fn run_rpm(config_path: PathBuf, options: RpmCommandOptions) -> Result<()> success: summary.success, failure: summary.failed, latency_ms: latency_report(&summary.latency_ms), + ttft_ms: latency_report(&summary.ttft_ms), }, mode: mode_plan.mode_name.to_string(), mode_detail: mode_summary.into_report(options.mode), @@ -798,6 +822,7 @@ fn request_template( prompt: String::new(), temperature, max_tokens, + stream: provider.stream, } } @@ -853,6 +878,7 @@ fn benchmark_report(input: BenchmarkReportInput) -> BenchmarkReport { wrong: input.summary.wrong, failed: input.summary.failed, latency_ms: latency_report(&input.summary.latency_ms), + ttft_ms: latency_report(&input.summary.ttft_ms), }, errors: input.summary.errors, wrong_cases: input.wrong_cases, @@ -887,6 +913,14 @@ fn print_benchmark_report(report: &BenchmarkReport, report_path: &Path) { format_optional_latency(report.summary.latency_ms.p95), format_optional_latency(report.summary.latency_ms.p99) ); + if report.summary.ttft_ms.p50.is_some() { + println!( + "ttft_ms: p50={} p95={} p99={}", + format_optional_latency(report.summary.ttft_ms.p50), + format_optional_latency(report.summary.ttft_ms.p95), + format_optional_latency(report.summary.ttft_ms.p99) + ); + } println!("errors:"); if report.errors.is_empty() { println!(" none"); @@ -931,6 +965,14 @@ fn print_rpm_report(report: &RpmReport, report_path: &Path) { format_optional_latency(report.summary.latency_ms.p95), format_optional_latency(report.summary.latency_ms.p99) ); + if report.summary.ttft_ms.p50.is_some() { + println!( + "ttft_ms: p50={} p95={} p99={}", + format_optional_latency(report.summary.ttft_ms.p50), + format_optional_latency(report.summary.ttft_ms.p95), + format_optional_latency(report.summary.ttft_ms.p99) + ); + } println!("errors:"); if report.errors.is_empty() { println!(" none"); diff --git a/src/config.rs b/src/config.rs index bd8ee18..9c3ab0b 100644 --- a/src/config.rs +++ b/src/config.rs @@ -40,6 +40,8 @@ pub struct ProviderConfig { pub base_url: String, pub api_token: String, pub default_model: String, + #[serde(default)] + pub stream: bool, } impl fmt::Debug for ProviderConfig { @@ -64,6 +66,7 @@ impl ProviderConfig { base_url: expand_env_refs_with(&self.base_url, &env)?, api_token: expand_env_refs_with(&self.api_token, &env)?, default_model: expand_env_refs_with(&self.default_model, &env)?, + stream: self.stream, }) } } @@ -331,6 +334,43 @@ providers: assert_eq!(provider.api_token, "anthropic-secret"); } + #[test] + fn provider_stream_defaults_false_and_can_be_enabled() { + let config = AppConfig::load_from_str_with_env( + r#" +default_provider: openai +providers: + openai: + protocol: openai + base_url: https://api.openai.test/v1 + api_token: literal-token + default_model: gpt-test + anthropic: + protocol: anthropic + base_url: https://api.anthropic.test + api_token: literal-token + default_model: claude-test + stream: true +"#, + |_| -> Result { unreachable!("config has no env refs") }, + ) + .expect("load config"); + + let openai = config + .resolved_provider_with_env(Some("openai"), |_| -> Result { + unreachable!("config has no env refs") + }) + .expect("openai provider"); + let anthropic = config + .resolved_provider_with_env(Some("anthropic"), |_| -> Result { + unreachable!("config has no env refs") + }) + .expect("anthropic provider"); + + assert!(!openai.stream); + assert!(anthropic.stream); + } + #[test] fn load_reads_yaml_from_path_without_env_refs() { let temp_dir = tempfile::tempdir().expect("create temp dir"); diff --git a/src/metrics.rs b/src/metrics.rs index 2fb3164..8f5f5ae 100644 --- a/src/metrics.rs +++ b/src/metrics.rs @@ -10,6 +10,7 @@ pub struct Metrics { total_judged: u64, errors: BTreeMap, latency: Histogram, + ttft: Histogram, } #[derive(Debug, Clone, PartialEq, Serialize)] @@ -22,6 +23,7 @@ pub struct MetricsSummary { pub total_judged: u64, pub accuracy: Option, pub latency_ms: LatencySummary, + pub ttft_ms: LatencySummary, pub errors: Vec, } @@ -56,12 +58,16 @@ impl Metrics { total_judged: 0, errors: BTreeMap::new(), latency: Histogram::new(3).expect("valid histogram precision"), + ttft: Histogram::new(3).expect("valid histogram precision"), } } - pub fn record_success(&mut self, _status: u16, latency_ms: u64) { + pub fn record_success(&mut self, _status: u16, latency_ms: u64, first_token_ms: Option) { self.success += 1; let _ = self.latency.record(latency_ms); + if let Some(ttft) = first_token_ms { + let _ = self.ttft.record(ttft); + } } pub fn record_failure(&mut self, code: impl Into) { @@ -97,6 +103,20 @@ impl Metrics { } }; + let ttft_ms = if self.ttft.is_empty() { + LatencySummary { + p50: None, + p95: None, + p99: None, + } + } else { + LatencySummary { + p50: Some(self.ttft.value_at_quantile(0.50)), + p95: Some(self.ttft.value_at_quantile(0.95)), + p99: Some(self.ttft.value_at_quantile(0.99)), + } + }; + MetricsSummary { success: self.success, failed: self.failed, @@ -106,6 +126,7 @@ impl Metrics { total_judged: self.total_judged, accuracy, latency_ms, + ttft_ms, errors: self .errors .iter() @@ -129,7 +150,7 @@ mod tests { fn tracks_success_failure_and_error_counts() { let mut metrics = Metrics::new(); - metrics.record_success(200, 120); + metrics.record_success(200, 120, None); metrics.record_failure("request_error"); metrics.record_failure("429"); metrics.record_failure("429"); @@ -169,7 +190,7 @@ mod tests { let mut metrics = Metrics::new(); for latency in 1..=100 { - metrics.record_success(200, latency); + metrics.record_success(200, latency, None); } let summary = metrics.summary(); @@ -178,4 +199,33 @@ mod tests { assert_eq!(summary.latency_ms.p95, Some(95)); assert_eq!(summary.latency_ms.p99, Some(99)); } + + #[test] + fn computes_ttft_percentiles_when_present() { + let mut metrics = Metrics::new(); + + for i in 1..=100 { + metrics.record_success(200, i * 2, Some(i)); + } + + let summary = metrics.summary(); + + assert_eq!(summary.ttft_ms.p50, Some(50)); + assert_eq!(summary.ttft_ms.p95, Some(95)); + assert_eq!(summary.ttft_ms.p99, Some(99)); + } + + #[test] + fn ttft_is_none_when_no_streaming_requests() { + let mut metrics = Metrics::new(); + + metrics.record_success(200, 100, None); + metrics.record_success(200, 200, None); + + let summary = metrics.summary(); + + assert_eq!(summary.ttft_ms.p50, None); + assert_eq!(summary.ttft_ms.p95, None); + assert_eq!(summary.ttft_ms.p99, None); + } } diff --git a/src/protocols/anthropic.rs b/src/protocols/anthropic.rs index f54cc09..ac240b0 100644 --- a/src/protocols/anthropic.rs +++ b/src/protocols/anthropic.rs @@ -1,8 +1,10 @@ use crate::runner::{ModelRequest, ModelResponse}; use anyhow::{Context, Result, bail}; +use futures::StreamExt; use reqwest::Client; use serde::Deserialize; use serde_json::json; +use std::time::Instant; pub async fn send(client: &Client, request: &ModelRequest) -> Result { let url = super::endpoint_url(&request.base_url, "/v1/messages")?; @@ -49,6 +51,86 @@ pub async fn send(client: &Client, request: &ModelRequest) -> Result Result { + let url = super::endpoint_url(&request.base_url, "/v1/messages")?; + let response = client + .post(url) + .header("x-api-key", &request.api_token) + .header("anthropic-version", "2023-06-01") + .json(&json!({ + "model": request.model, + "messages": [{"role": "user", "content": request.prompt}], + "temperature": request.temperature, + "max_tokens": request.max_tokens, + "stream": true + })) + .send() + .await + .context("failed to send Anthropic streaming request")?; + + let status = response.status(); + let status_code = status.as_u16(); + + if !status.is_success() { + let body = response + .text() + .await + .context("failed to read Anthropic error response body")?; + bail!( + "{}", + super::upstream_error_message("Anthropic", status_code, &body) + ); + } + + let started = Instant::now(); + let mut stream = response.bytes_stream(); + let mut buffer = super::SseLineBuffer::new(); + let mut text = String::new(); + let mut first_token_ms: Option = None; + let mut current_event = String::new(); + + while let Some(chunk) = stream.next().await { + let chunk = chunk.context("Anthropic stream interrupted")?; + for line in buffer.feed(&chunk) { + if let Some(event_type) = line.strip_prefix("event: ") { + current_event = event_type.to_string(); + continue; + } + if let Some(data) = line.strip_prefix("data: ") { + if current_event == "content_block_delta" { + if let Some(content) = serde_json::from_str::(data) + .ok() + .and_then(|p| p.delta.text) + .filter(|c| !c.is_empty()) + { + if first_token_ms.is_none() { + first_token_ms = Some(started.elapsed().as_millis()); + } + text.push_str(&content); + } + } else if current_event == "message_stop" { + break; + } + } + if line.is_empty() { + current_event.clear(); + } + } + } + + if text.is_empty() { + bail!("Anthropic stream completed without producing any content"); + } + + Ok(ModelResponse { + text, + status: status_code, + elapsed_ms: 0, + first_token_ms, }) } @@ -66,6 +148,16 @@ enum AnthropicContentBlock { Other, } +#[derive(Debug, Deserialize)] +struct AnthropicStreamDelta { + delta: AnthropicDeltaContent, +} + +#[derive(Debug, Deserialize)] +struct AnthropicDeltaContent { + text: Option, +} + #[cfg(test)] mod tests { use crate::runner::ModelRequest; @@ -95,13 +187,14 @@ mod tests { .await; let request = ModelRequest { - base_url: server.uri(), - api_token: "test-token".to_string(), - model: "claude-test".to_string(), - prompt: "hello".to_string(), - temperature: 0.0, - max_tokens: 1024, - }; + base_url: server.uri(), + api_token: "test-token".to_string(), + model: "claude-test".to_string(), + prompt: "hello".to_string(), + temperature: 0.0, + max_tokens: 1024, + stream: false +}; let response = super::send(&Client::new(), &request) .await @@ -125,13 +218,14 @@ mod tests { .await; let request = ModelRequest { - base_url: server.uri(), - api_token: "test-token".to_string(), - model: "claude-test".to_string(), - prompt: "hello".to_string(), - temperature: 0.0, - max_tokens: 1024, - }; + base_url: server.uri(), + api_token: "test-token".to_string(), + model: "claude-test".to_string(), + prompt: "hello".to_string(), + temperature: 0.0, + max_tokens: 1024, + stream: false +}; let error = super::send(&Client::new(), &request) .await @@ -155,13 +249,14 @@ mod tests { .await; let request = ModelRequest { - base_url: server.uri(), - api_token: "test-token".to_string(), - model: "claude-test".to_string(), - prompt: "hello".to_string(), - temperature: 0.0, - max_tokens: 1024, - }; + base_url: server.uri(), + api_token: "test-token".to_string(), + model: "claude-test".to_string(), + prompt: "hello".to_string(), + temperature: 0.0, + max_tokens: 1024, + stream: false +}; let message = super::send(&Client::new(), &request) .await @@ -189,13 +284,14 @@ mod tests { .await; let request = ModelRequest { - base_url: format!("{}/v1", server.uri()), - api_token: "test-token".to_string(), - model: "claude-test".to_string(), - prompt: "hello".to_string(), - temperature: 0.0, - max_tokens: 1024, - }; + base_url: format!("{}/v1", server.uri()), + api_token: "test-token".to_string(), + model: "claude-test".to_string(), + prompt: "hello".to_string(), + temperature: 0.0, + max_tokens: 1024, + stream: false +}; let response = super::send(&Client::new(), &request) .await diff --git a/src/protocols/mod.rs b/src/protocols/mod.rs index 8dd6b78..e8d6e9f 100644 --- a/src/protocols/mod.rs +++ b/src/protocols/mod.rs @@ -96,3 +96,27 @@ struct ErrorEnvelope { struct ErrorBody { message: Option, } + +pub(crate) struct SseLineBuffer { + buffer: String, +} + +impl SseLineBuffer { + pub fn new() -> Self { + Self { + buffer: String::new(), + } + } + + pub fn feed(&mut self, chunk: &[u8]) -> Vec { + let text = String::from_utf8_lossy(chunk); + self.buffer.push_str(&text); + let mut lines = Vec::new(); + while let Some(pos) = self.buffer.find('\n') { + let line = self.buffer[..pos].trim_end_matches('\r').to_string(); + self.buffer.drain(..=pos); + lines.push(line); + } + lines + } +} diff --git a/src/protocols/openai.rs b/src/protocols/openai.rs index f371424..c61871d 100644 --- a/src/protocols/openai.rs +++ b/src/protocols/openai.rs @@ -1,8 +1,10 @@ use crate::runner::{ModelRequest, ModelResponse}; use anyhow::{Context, Result, bail}; +use futures::StreamExt; use reqwest::Client; use serde::Deserialize; use serde_json::json; +use std::time::Instant; pub async fn send(client: &Client, request: &ModelRequest) -> Result { let url = super::endpoint_url(&request.base_url, "/chat/completions")?; @@ -47,6 +49,78 @@ pub async fn send(client: &Client, request: &ModelRequest) -> Result Result { + let url = super::endpoint_url(&request.base_url, "/chat/completions")?; + let response = client + .post(url) + .bearer_auth(&request.api_token) + .json(&json!({ + "model": request.model, + "messages": [{"role": "user", "content": request.prompt}], + "temperature": request.temperature, + "max_tokens": request.max_tokens, + "stream": true + })) + .send() + .await + .context("failed to send OpenAI streaming request")?; + + let status = response.status(); + let status_code = status.as_u16(); + + if !status.is_success() { + let body = response + .text() + .await + .context("failed to read OpenAI error response body")?; + bail!( + "{}", + super::upstream_error_message("OpenAI", status_code, &body) + ); + } + + let started = Instant::now(); + let mut stream = response.bytes_stream(); + let mut buffer = super::SseLineBuffer::new(); + let mut text = String::new(); + let mut first_token_ms: Option = None; + + while let Some(chunk) = stream.next().await { + let chunk = chunk.context("OpenAI stream interrupted")?; + for line in buffer.feed(&chunk) { + let Some(data) = line.strip_prefix("data: ") else { + continue; + }; + if data == "[DONE]" { + break; + } + if let Some(content) = serde_json::from_str::(data) + .ok() + .and_then(|p| p.choices.into_iter().next()) + .and_then(|d| d.delta.content) + .filter(|c| !c.is_empty()) + { + if first_token_ms.is_none() { + first_token_ms = Some(started.elapsed().as_millis()); + } + text.push_str(&content); + } + } + } + + if text.is_empty() { + bail!("OpenAI stream completed without producing any content"); + } + + Ok(ModelResponse { + text, + status: status_code, + elapsed_ms: 0, + first_token_ms, }) } @@ -65,6 +139,21 @@ struct OpenAiMessage { content: Option, } +#[derive(Debug, Deserialize)] +struct OpenAiStreamChunk { + choices: Vec, +} + +#[derive(Debug, Deserialize)] +struct OpenAiStreamChoice { + delta: OpenAiStreamDelta, +} + +#[derive(Debug, Deserialize)] +struct OpenAiStreamDelta { + content: Option, +} + #[cfg(test)] mod tests { use crate::runner::ModelRequest; @@ -95,13 +184,14 @@ mod tests { .await; let request = ModelRequest { - base_url: server.uri(), - api_token: "test-token".to_string(), - model: "gpt-test".to_string(), - prompt: "hello".to_string(), - temperature: 0.0, - max_tokens: 1024, - }; + base_url: server.uri(), + api_token: "test-token".to_string(), + model: "gpt-test".to_string(), + prompt: "hello".to_string(), + temperature: 0.0, + max_tokens: 1024, + stream: false +}; let response = super::send(&Client::new(), &request) .await @@ -125,13 +215,14 @@ mod tests { .await; let request = ModelRequest { - base_url: format!("{}/", server.uri()), - api_token: "test-token".to_string(), - model: "gpt-test".to_string(), - prompt: "hello".to_string(), - temperature: 0.0, - max_tokens: 1024, - }; + base_url: format!("{}/", server.uri()), + api_token: "test-token".to_string(), + model: "gpt-test".to_string(), + prompt: "hello".to_string(), + temperature: 0.0, + max_tokens: 1024, + stream: false +}; let error = super::send(&Client::new(), &request) .await @@ -154,13 +245,14 @@ mod tests { .await; let request = ModelRequest { - base_url: server.uri(), - api_token: "test-token".to_string(), - model: "gpt-test".to_string(), - prompt: "hello".to_string(), - temperature: 0.0, - max_tokens: 1024, - }; + base_url: server.uri(), + api_token: "test-token".to_string(), + model: "gpt-test".to_string(), + prompt: "hello".to_string(), + temperature: 0.0, + max_tokens: 1024, + stream: false +}; let message = super::send(&Client::new(), &request) .await @@ -190,13 +282,14 @@ mod tests { .await; let request = ModelRequest { - base_url: format!("{}/v1", server.uri()), - api_token: "test-token".to_string(), - model: "gpt-test".to_string(), - prompt: "hello".to_string(), - temperature: 0.0, - max_tokens: 1024, - }; + base_url: format!("{}/v1", server.uri()), + api_token: "test-token".to_string(), + model: "gpt-test".to_string(), + prompt: "hello".to_string(), + temperature: 0.0, + max_tokens: 1024, + stream: false +}; let response = super::send(&Client::new(), &request) .await diff --git a/src/report.rs b/src/report.rs index dafc3d6..c430a4b 100644 --- a/src/report.rs +++ b/src/report.rs @@ -38,6 +38,7 @@ pub struct BenchmarkSummaryReport { pub wrong: u64, pub failed: u64, pub latency_ms: LatencyReport, + pub ttft_ms: LatencyReport, } #[derive(Debug, Clone, Serialize)] @@ -77,6 +78,7 @@ pub struct RpmSummaryReport { pub success: u64, pub failure: u64, pub latency_ms: LatencyReport, + pub ttft_ms: LatencyReport, } #[derive(Debug, Clone, Serialize)] @@ -259,6 +261,11 @@ mod tests { p95: Some(10), p99: Some(10), }, + ttft_ms: LatencyReport { + p50: None, + p95: None, + p99: None, + }, }, errors: vec![], wrong_cases: vec![], @@ -293,6 +300,11 @@ mod tests { p95: Some(10), p99: Some(10), }, + ttft_ms: LatencyReport { + p50: None, + p95: None, + p99: None, + }, }, mode: "sustained".to_string(), mode_detail: None, @@ -329,6 +341,11 @@ mod tests { p95: Some(20), p99: Some(30), }, + ttft_ms: LatencyReport { + p50: None, + p95: None, + p99: None, + }, }, mode_detail: Some(RpmModeDetailReport { burst: Some(PhaseSummaryReport { diff --git a/src/runner.rs b/src/runner.rs index 9916812..e9bf75b 100644 --- a/src/runner.rs +++ b/src/runner.rs @@ -13,6 +13,7 @@ pub struct ModelRequest { pub prompt: String, pub temperature: f32, pub max_tokens: u32, + pub stream: bool, } impl fmt::Debug for ModelRequest { @@ -25,6 +26,7 @@ impl fmt::Debug for ModelRequest { .field("prompt", &self.prompt) .field("temperature", &self.temperature) .field("max_tokens", &self.max_tokens) + .field("stream", &self.stream) .finish() } } @@ -34,6 +36,7 @@ pub struct ModelResponse { pub text: String, pub status: u16, pub elapsed_ms: u128, + pub first_token_ms: Option, } pub async fn run_model_request( @@ -50,9 +53,16 @@ pub async fn run_model_request_with_client( request: &ModelRequest, ) -> Result { let started = Instant::now(); - let mut response = match protocol { - ProtocolKind::Openai => protocols::openai::send(client, request).await?, - ProtocolKind::Anthropic => protocols::anthropic::send(client, request).await?, + let mut response = if request.stream { + match protocol { + ProtocolKind::Openai => protocols::openai::send_stream(client, request).await?, + ProtocolKind::Anthropic => protocols::anthropic::send_stream(client, request).await?, + } + } else { + match protocol { + ProtocolKind::Openai => protocols::openai::send(client, request).await?, + ProtocolKind::Anthropic => protocols::anthropic::send(client, request).await?, + } }; response.elapsed_ms = started.elapsed().as_millis(); Ok(response) @@ -71,6 +81,7 @@ mod tests { prompt: "hello".to_string(), temperature: 0.0, max_tokens: 1024, + stream: false, }; let debug = format!("{request:?}");