From 739524628c3a2830e67a8371a9a9384af8415aa0 Mon Sep 17 00:00:00 2001 From: orangels Date: Sat, 9 May 2026 20:05:33 +0800 Subject: [PATCH 1/8] feat: report effective benchmark request params --- README.md | 2 +- docs/USAGE.zh-CN.md | 1 + docs/testing-guide.md | 1 + src/cli.rs | 187 +++++++++++++++++++++++++++++++++++++++++- src/report.rs | 15 ++++ 5 files changed, 202 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 80a8b4f..8cad482 100644 --- a/README.md +++ b/README.md @@ -305,7 +305,7 @@ Real LLM services often combine multiple limiters, such as RPM, TPM, maximum con Benchmark and RPM commands print a terminal summary with success counts, failures, latency percentiles, errors, and the report path. JSON reports are written under `reports/*.json`; the `reports` directory is ignored by git. -Benchmark reports include `wrong_cases`, with each wrong case containing the case id, question, expected answer, extracted actual answer, and raw model output. RPM reports include request counts, mode, target RPM, observed RPM, latency, error counts, and mode-specific details such as burst summaries, probe summaries, window-boundary summaries, and optional limiter inference. +Benchmark reports include `params.request`, a non-sensitive summary of the protocol-specific request body parameters that are actually sent upstream, excluding prompts and tokens. They also include `wrong_cases`, with each wrong case containing the case id, question, expected answer, extracted actual answer, and raw model output. RPM reports include request counts, mode, target RPM, observed RPM, latency, error counts, and mode-specific details such as burst summaries, probe summaries, window-boundary summaries, and optional limiter inference. When an upstream request returns a non-success HTTP status such as 400, 429, or 504, `check`, `bench`, and `rpm` automatically write a request/response debug JSON file under `outputs/debug/`. The debug file includes the full request URL, redacted request headers, full request body including the prompt, response status, response headers, and full response body. If the request fails before an HTTP response is available, for example a connect timeout, read failure, or streaming interruption counted as `request_error`, the same directory gets a `*-request-error` debug JSON with `response.status: null`, `response.error_kind: "request_error"`, and the local error message. API tokens are redacted, but prompts and model outputs are preserved for troubleshooting. diff --git a/docs/USAGE.zh-CN.md b/docs/USAGE.zh-CN.md index e400598..bd36962 100644 --- a/docs/USAGE.zh-CN.md +++ b/docs/USAGE.zh-CN.md @@ -451,6 +451,7 @@ benchmark report 包含: - benchmark - provider - model +- params.request:真实发送给上游的协议参数摘要,不包含 prompt 和 token - dataset - run 参数 - accuracy diff --git a/docs/testing-guide.md b/docs/testing-guide.md index 53d153a..5da761e 100644 --- a/docs/testing-guide.md +++ b/docs/testing-guide.md @@ -211,6 +211,7 @@ GPQA-Diamond 的 prompt 和评分按 OpenAI `simple-evals` 风格处理:要求 报告自动写入 `reports/` 目录,JSON 格式,包含: - 总体准确率(accuracy) +- 真实发送给上游的协议参数摘要(params.request,不含 prompt/token) - 每道题的对错明细(wrong_cases) - 延迟百分位(latency_ms、ttft_ms) - 错误统计(errors) diff --git a/src/cli.rs b/src/cli.rs index 79243e5..0e222e4 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -1,13 +1,13 @@ use crate::benchmarks; use crate::benchmarks::judge; -use crate::config::{AppConfig, ProviderThinkingConfig}; +use crate::config::{AppConfig, ProtocolKind, ProviderThinkingConfig}; use crate::metrics::{LatencySummary, Metrics, MetricsSummary}; use crate::report::{ BenchmarkParamsReport, BenchmarkReport, BenchmarkSummaryReport, CorrectCaseReport, DatasetReport, LatencyReport, LimiterInferenceKind, LimiterInferenceReport, PhaseSummaryReport, ProbeSecondReport, RpmModeDetailReport, RpmParamsReport, RpmReport, RpmRunReport, - RpmSummaryReport, RunReport, ThinkingParamsReport, WindowBoundaryReport, WrongCaseReport, - write_benchmark_report, write_rpm_report, + RpmSummaryReport, RunReport, SentRequestParamsReport, ThinkingParamsReport, + WindowBoundaryReport, WrongCaseReport, write_benchmark_report, write_rpm_report, }; use crate::rpm_modes::{ ProbePhase, RpmMode, ScheduledProbe, burst_schedule, sliding_window_schedule, @@ -20,6 +20,7 @@ use clap::{Parser, Subcommand}; use futures::{StreamExt, stream}; use indicatif::{ProgressBar, ProgressStyle}; use regex::Regex; +use serde_json::{Value, json}; use std::collections::BTreeMap; use std::path::{Path, PathBuf}; use std::time::{Duration, Instant}; @@ -492,6 +493,7 @@ async fn run_aime_benchmark(options: BenchmarkCommandOptions) -> Result<()> { model, stream: base_request.stream, thinking: thinking_report(base_request.thinking.as_ref()), + request: sent_request_params(protocol, &base_request), dataset, started_at, duration_ms: started.elapsed().as_millis(), @@ -603,6 +605,7 @@ async fn run_gpqa_benchmark(options: BenchmarkCommandOptions) -> Result<()> { model, stream: base_request.stream, thinking: thinking_report(base_request.thinking.as_ref()), + request: sent_request_params(protocol, &base_request), dataset, started_at, duration_ms: started.elapsed().as_millis(), @@ -1237,6 +1240,7 @@ struct BenchmarkReportInput { model: String, stream: bool, thinking: Option, + request: SentRequestParamsReport, dataset: DatasetReport, started_at: chrono::DateTime, duration_ms: u128, @@ -1256,6 +1260,7 @@ fn benchmark_report(input: BenchmarkReportInput) -> BenchmarkReport { params: BenchmarkParamsReport { stream: input.stream, thinking: input.thinking, + request: input.request, }, dataset: input.dataset, run: RunReport { @@ -1282,6 +1287,106 @@ fn benchmark_report(input: BenchmarkReportInput) -> BenchmarkReport { } } +fn sent_request_params(protocol: ProtocolKind, request: &ModelRequest) -> SentRequestParamsReport { + let protocol_name = match protocol { + ProtocolKind::Openai => "openai", + ProtocolKind::Anthropic => "anthropic", + ProtocolKind::Google => "google", + }; + SentRequestParamsReport { + protocol: protocol_name.to_string(), + body: sent_request_body_params(protocol, request), + } +} + +fn sent_request_body_params(protocol: ProtocolKind, request: &ModelRequest) -> Value { + match protocol { + ProtocolKind::Openai => { + let mut body = serde_json::Map::new(); + body.insert("temperature".to_string(), json!(request.temperature)); + body.insert("max_tokens".to_string(), json!(request.max_tokens)); + if request.stream { + body.insert("stream".to_string(), json!(true)); + } + if let Some(thinking) = &request.thinking + && thinking.enabled + { + if let Some(reasoning_effort) = thinking + .reasoning_effort + .as_ref() + .or(thinking.effort.as_ref()) + { + body.insert("reasoning_effort".to_string(), json!(reasoning_effort)); + } + if let Some(reasoning_summary) = &thinking.reasoning_summary { + body.insert("reasoning_summary".to_string(), json!(reasoning_summary)); + } + } + Value::Object(body) + } + ProtocolKind::Anthropic => { + let mut body = serde_json::Map::new(); + body.insert("max_tokens".to_string(), json!(request.max_tokens)); + if request.stream { + body.insert("stream".to_string(), json!(true)); + } + if let Some(thinking) = &request.thinking + && thinking.enabled + { + let mut thinking_body = serde_json::Map::new(); + let thinking_type = thinking.kind.as_deref().unwrap_or("enabled"); + thinking_body.insert("type".to_string(), json!(thinking_type)); + if thinking_type == "adaptive" { + if let Some(effort) = &thinking.effort { + thinking_body.insert("effort".to_string(), json!(effort)); + } + } else if let Some(budget_tokens) = thinking.budget_tokens { + thinking_body.insert("budget_tokens".to_string(), json!(budget_tokens)); + } + if let Some(display) = &thinking.display { + thinking_body.insert("display".to_string(), json!(display)); + } + body.insert("thinking".to_string(), Value::Object(thinking_body)); + } else { + body.insert("temperature".to_string(), json!(request.temperature)); + } + Value::Object(body) + } + ProtocolKind::Google => { + let mut generation_config = serde_json::Map::new(); + generation_config.insert("temperature".to_string(), json!(request.temperature)); + generation_config.insert("maxOutputTokens".to_string(), json!(request.max_tokens)); + if let Some(thinking) = &request.thinking + && thinking.enabled + { + let mut thinking_config = serde_json::Map::new(); + if let Some(display) = &thinking.display { + thinking_config.insert( + "includeThoughts".to_string(), + json!(display != "omitted" && display != "false"), + ); + } + if let Some(budget_tokens) = thinking.budget_tokens { + thinking_config.insert("thinkingBudget".to_string(), json!(budget_tokens)); + } + if let Some(effort) = &thinking.effort { + thinking_config.insert("thinkingLevel".to_string(), json!(effort)); + } + if !thinking_config.is_empty() { + generation_config + .insert("thinkingConfig".to_string(), Value::Object(thinking_config)); + } + } + let mut body = serde_json::Map::new(); + body.insert( + "generationConfig".to_string(), + Value::Object(generation_config), + ); + Value::Object(body) + } + } +} + fn latency_report(summary: &LatencySummary) -> LatencyReport { LatencyReport { p50: summary.p50, @@ -1712,6 +1817,82 @@ mod tests { assert_eq!(merged.budget_tokens, Some(20000)); } + #[test] + fn sent_anthropic_adaptive_request_params_omit_temperature_and_nulls() { + let request = ModelRequest { + base_url: "https://example.test".to_string(), + api_token: "secret".to_string(), + model: "claude-test".to_string(), + prompt: "hello".to_string(), + temperature: 0.0, + max_tokens: 32_768, + stream: true, + raw_debug: None, + thinking: Some(ThinkingConfig { + enabled: true, + kind: Some("adaptive".to_string()), + budget_tokens: None, + effort: Some("high".to_string()), + display: Some("summarized".to_string()), + reasoning_effort: None, + reasoning_summary: None, + }), + }; + + let params = sent_request_params(ProtocolKind::Anthropic, &request); + + assert_eq!(params.protocol, "anthropic"); + assert_eq!(params.body["max_tokens"], 32_768); + assert_eq!(params.body["stream"], true); + assert_eq!(params.body["thinking"]["type"], "adaptive"); + assert_eq!(params.body["thinking"]["effort"], "high"); + assert_eq!(params.body["thinking"]["display"], "summarized"); + assert!(params.body.get("temperature").is_none()); + assert!(params.body["thinking"].get("budget_tokens").is_none()); + assert!(params.body["thinking"].get("reasoning_effort").is_none()); + } + + #[test] + fn sent_google_request_params_use_generation_config_names() { + let request = ModelRequest { + base_url: "https://example.test".to_string(), + api_token: "secret".to_string(), + model: "gemini-test".to_string(), + prompt: "hello".to_string(), + temperature: 0.0, + max_tokens: 32_768, + stream: true, + raw_debug: None, + thinking: Some(ThinkingConfig { + enabled: true, + kind: None, + budget_tokens: Some(5000), + effort: Some("high".to_string()), + display: Some("summarized".to_string()), + reasoning_effort: None, + reasoning_summary: None, + }), + }; + + let params = sent_request_params(ProtocolKind::Google, &request); + + assert_eq!(params.protocol, "google"); + assert_eq!(params.body["generationConfig"]["temperature"], 0.0); + assert_eq!(params.body["generationConfig"]["maxOutputTokens"], 32_768); + assert_eq!( + params.body["generationConfig"]["thinkingConfig"]["thinkingBudget"], + 5000 + ); + assert_eq!( + params.body["generationConfig"]["thinkingConfig"]["thinkingLevel"], + "high" + ); + assert_eq!( + params.body["generationConfig"]["thinkingConfig"]["includeThoughts"], + true + ); + } + #[test] fn rpm_command_parses_window_boundary_offset() { let cli = Cli::try_parse_from([ diff --git a/src/report.rs b/src/report.rs index 19eef7a..1371575 100644 --- a/src/report.rs +++ b/src/report.rs @@ -2,6 +2,7 @@ use crate::metrics::ErrorCount; use anyhow::{Context, Result}; use chrono::{DateTime, Utc}; use serde::Serialize; +use serde_json::Value; use std::path::{Path, PathBuf}; #[derive(Debug, Clone, Serialize)] @@ -76,6 +77,13 @@ pub struct BenchmarkReport { pub struct BenchmarkParamsReport { pub stream: bool, pub thinking: Option, + pub request: SentRequestParamsReport, +} + +#[derive(Debug, Clone, Serialize)] +pub struct SentRequestParamsReport { + pub protocol: String, + pub body: Value, } #[derive(Debug, Clone, Serialize)] @@ -279,6 +287,13 @@ mod tests { params: BenchmarkParamsReport { stream: false, thinking: None, + request: SentRequestParamsReport { + protocol: "openai".to_string(), + body: serde_json::json!({ + "temperature": 0.0, + "max_tokens": 1024 + }), + }, }, dataset: DatasetReport { source: "local".to_string(), From 1243558bc8f73eedf37e71e5a6184de5cf1d5994 Mon Sep 17 00:00:00 2001 From: orangels Date: Sat, 9 May 2026 20:14:03 +0800 Subject: [PATCH 2/8] fix: include partial stream in interrupted debug logs --- src/protocols/anthropic.rs | 105 ++++++++++++++++++++++++++++++++++--- src/protocols/google.rs | 20 ++++--- src/protocols/openai.rs | 20 ++++--- 3 files changed, 121 insertions(+), 24 deletions(-) diff --git a/src/protocols/anthropic.rs b/src/protocols/anthropic.rs index 3175a35..a8164a2 100644 --- a/src/protocols/anthropic.rs +++ b/src/protocols/anthropic.rs @@ -191,18 +191,22 @@ pub async fn send_stream(client: &Client, request: &ModelRequest) -> Result chunk, + Err(error) => { + write_response_request_error_debug( request, &debug_url, request_body.clone(), + status_code, + &response_headers, + &raw_stream, format!("Anthropic stream interrupted: {error}"), - "anthropic-request-error", - ); - error - }) - .context("Anthropic stream interrupted")?; + ) + .await?; + return Err(error).context("Anthropic stream interrupted"); + } + }; for line in buffer.feed(&chunk) { raw_stream.push_str(&line); raw_stream.push('\n'); @@ -891,6 +895,91 @@ mod tests { assert!(!raw.contains("anthropic-secret-token")); } + #[tokio::test] + async fn interrupted_stream_debug_records_status_headers_and_partial_stream() { + use tokio::io::{AsyncReadExt, AsyncWriteExt}; + use tokio::net::TcpListener; + + let listener = TcpListener::bind("127.0.0.1:0") + .await + .expect("bind listener"); + let addr = listener.local_addr().expect("listener addr"); + let server = tokio::spawn(async move { + let (mut socket, _) = listener.accept().await.expect("accept request"); + let mut buffer = [0_u8; 4096]; + let _ = socket.read(&mut buffer).await.expect("read request"); + let chunk = "event: ping\ndata: {\"type\":\"ping\"}\n\n"; + let response = format!( + "HTTP/1.1 200 OK\r\n\ + content-type: text/event-stream\r\n\ + transfer-encoding: chunked\r\n\ + x-request-id: broken-stream\r\n\ + \r\n\ + {:x}\r\n{}\r\n\ + zz\r\nbroken\r\n", + chunk.len(), + chunk + ); + socket + .write_all(response.as_bytes()) + .await + .expect("write malformed response"); + }); + + let temp_dir = tempfile::tempdir().expect("create temp dir"); + let request = ModelRequest { + base_url: format!("http://{addr}"), + api_token: "anthropic-secret-token".to_string(), + model: "claude-test".to_string(), + prompt: "prompt before broken stream".to_string(), + temperature: 0.0, + max_tokens: 1024, + stream: true, + raw_debug: Some( + RawDebugConfig::new( + temp_dir.path().to_path_buf(), + "anthropic-claude-test".to_string(), + ) + .with_success_raw(false), + ), + thinking: None, + }; + + let error = super::send_stream(&Client::new(), &request) + .await + .expect_err("malformed chunked stream should fail"); + server.await.expect("server task"); + assert!(error.to_string().contains("stream interrupted")); + + let debug_files = std::fs::read_dir(temp_dir.path()) + .expect("read debug dir") + .collect::, _>>() + .expect("debug entries"); + assert_eq!(debug_files.len(), 1); + let raw = std::fs::read_to_string(debug_files[0].path()).expect("read raw debug file"); + let debug: serde_json::Value = serde_json::from_str(&raw).expect("debug json"); + + assert_eq!(debug["response"]["status"], 200); + assert_eq!( + debug["response"]["headers"]["x-request-id"], + "broken-stream" + ); + assert_eq!(debug["response"]["error_kind"], "request_error"); + assert!( + debug["response"]["body"] + .as_str() + .expect("partial stream") + .contains("event: ping") + ); + assert!( + debug["response"]["error"] + .as_str() + .expect("stream error") + .contains("Anthropic stream interrupted") + ); + assert!(!raw.contains("anthropic-secret-token")); + } + fn anthropic_request(base_url: String) -> ModelRequest { ModelRequest { base_url, diff --git a/src/protocols/google.rs b/src/protocols/google.rs index 0b892cc..228d501 100644 --- a/src/protocols/google.rs +++ b/src/protocols/google.rs @@ -184,18 +184,22 @@ pub async fn send_stream(client: &Client, request: &ModelRequest) -> Result = None; while let Some(chunk) = stream.next().await { - let chunk = chunk - .map_err(|error| { - write_request_error_debug_blocking( + let chunk = match chunk { + Ok(chunk) => chunk, + Err(error) => { + write_response_request_error_debug( request, &debug_url, request_body.clone(), + status_code, + &response_headers, + &raw_stream, format!("Google stream interrupted: {error}"), - "google-request-error", - ); - error - }) - .context("Google stream interrupted")?; + ) + .await?; + return Err(error).context("Google stream interrupted"); + } + }; for line in buffer.feed(&chunk) { raw_stream.push_str(&line); raw_stream.push('\n'); diff --git a/src/protocols/openai.rs b/src/protocols/openai.rs index 850b6e4..40b2d19 100644 --- a/src/protocols/openai.rs +++ b/src/protocols/openai.rs @@ -181,18 +181,22 @@ pub async fn send_stream(client: &Client, request: &ModelRequest) -> Result chunk, + Err(error) => { + write_response_request_error_debug( request, &debug_url, request_body.clone(), + status_code, + &response_headers, + &raw_stream, format!("OpenAI stream interrupted: {error}"), - "openai-request-error", - ); - error - }) - .context("OpenAI stream interrupted")?; + ) + .await?; + return Err(error).context("OpenAI stream interrupted"); + } + }; for line in buffer.feed(&chunk) { raw_stream.push_str(&line); raw_stream.push('\n'); From a15a62264a65657caf31307f3833e6bd8c672054 Mon Sep 17 00:00:00 2001 From: orangels Date: Sat, 9 May 2026 21:26:42 +0800 Subject: [PATCH 3/8] fix: set bench max tokens default to 64k --- src/cli.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/cli.rs b/src/cli.rs index 0e222e4..e07a2cc 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -139,7 +139,7 @@ pub enum BenchCommand { limit: Option, #[arg(long, num_args = 0..=1, default_missing_value = "true")] stream: Option, - #[arg(long, default_value_t = 32_768)] + #[arg(long, default_value_t = 64_000)] max_tokens: u32, #[arg(long)] debug_raw: bool, @@ -171,7 +171,7 @@ pub enum BenchCommand { limit: Option, #[arg(long, num_args = 0..=1, default_missing_value = "true")] stream: Option, - #[arg(long, default_value_t = 32_768)] + #[arg(long, default_value_t = 64_000)] max_tokens: u32, #[arg(long)] debug_raw: bool, @@ -1681,7 +1681,7 @@ mod tests { } #[test] - fn bench_command_defaults_max_tokens_to_32768() { + fn bench_command_defaults_max_tokens_to_64000() { let cli = Cli::try_parse_from(["lq_token_test", "bench", "gpqa-diamond"]) .expect("parse gpqa bench"); @@ -1692,7 +1692,7 @@ mod tests { panic!("expected gpqa-diamond bench command"); }; - assert_eq!(max_tokens, 32_768); + assert_eq!(max_tokens, 64_000); } #[test] From 70ced011121d0183e8ba7442049afe9a3a4fc513 Mon Sep 17 00:00:00 2001 From: orangels Date: Sun, 10 May 2026 11:43:09 +0800 Subject: [PATCH 4/8] feat: record response debug cases in reports --- src/cli.rs | 200 +++++++++++++++++++++++++++++++++++++++++++++++--- src/report.rs | 33 +++++++++ 2 files changed, 224 insertions(+), 9 deletions(-) diff --git a/src/cli.rs b/src/cli.rs index e07a2cc..2493931 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -5,15 +5,17 @@ use crate::metrics::{LatencySummary, Metrics, MetricsSummary}; use crate::report::{ BenchmarkParamsReport, BenchmarkReport, BenchmarkSummaryReport, CorrectCaseReport, DatasetReport, LatencyReport, LimiterInferenceKind, LimiterInferenceReport, PhaseSummaryReport, - ProbeSecondReport, RpmModeDetailReport, RpmParamsReport, RpmReport, RpmRunReport, - RpmSummaryReport, RunReport, SentRequestParamsReport, ThinkingParamsReport, + ProbeSecondReport, RequestDebugReport, RpmModeDetailReport, RpmParamsReport, RpmReport, + RpmRunReport, RpmSummaryReport, RunReport, SentRequestParamsReport, ThinkingParamsReport, WindowBoundaryReport, WrongCaseReport, write_benchmark_report, write_rpm_report, }; use crate::rpm_modes::{ ProbePhase, RpmMode, ScheduledProbe, burst_schedule, sliding_window_schedule, sustained_schedule, token_bucket_schedule, window_boundary_plan, }; -use crate::runner::{ModelRequest, RawDebugConfig, ThinkingConfig, run_model_request}; +use crate::runner::{ + ModelRequest, ModelResponse, RawDebugConfig, ThinkingConfig, run_model_request, +}; use anyhow::{Context, Result, bail}; use chrono::Utc; use clap::{Parser, Subcommand}; @@ -449,10 +451,21 @@ async fn run_aime_benchmark(options: BenchmarkCommandOptions) -> Result<()> { let mut metrics = Metrics::new(); let mut wrong_cases = Vec::new(); let mut correct_samples = Vec::new(); + let mut debug_requests = Vec::new(); while let Some((case, result)) = results.next().await { pb.inc(1); match result { Ok(response) => { + let prompt = case.prompt(); + if let Some(debug) = + response_debug_report(Some(case.id.clone()), None, None, prompt, &response) + { + debug_requests.push(debug); + } + if response_has_no_content(&response) { + metrics.record_failure("request_error"); + continue; + } metrics.record_success( response.status, response.elapsed_ms as u64, @@ -503,6 +516,7 @@ async fn run_aime_benchmark(options: BenchmarkCommandOptions) -> Result<()> { summary, correct_samples, wrong_cases, + debug_requests, }); let report_path = write_benchmark_report(Path::new("."), &report)?; print_benchmark_report(&report, &report_path); @@ -559,10 +573,21 @@ async fn run_gpqa_benchmark(options: BenchmarkCommandOptions) -> Result<()> { let mut metrics = Metrics::new(); let mut wrong_cases = Vec::new(); let mut correct_samples = Vec::new(); + let mut debug_requests = Vec::new(); while let Some((case, result)) = results.next().await { pb.inc(1); match result { Ok(response) => { + let prompt = case.prompt(); + if let Some(debug) = + response_debug_report(Some(case.id.clone()), None, None, prompt, &response) + { + debug_requests.push(debug); + } + if response_has_no_content(&response) { + metrics.record_failure("request_error"); + continue; + } metrics.record_success( response.status, response.elapsed_ms as u64, @@ -615,6 +640,7 @@ async fn run_gpqa_benchmark(options: BenchmarkCommandOptions) -> Result<()> { summary, correct_samples, wrong_cases, + debug_requests, }); let report_path = write_benchmark_report(Path::new("."), &report)?; print_benchmark_report(&report, &report_path); @@ -683,6 +709,8 @@ async fn run_rpm(config_path: PathBuf, options: RpmCommandOptions) -> Result<()> let started = Instant::now(); let mut metrics = Metrics::new(); let mut mode_summary = RpmModeSummaryBuilder::default(); + let mut debug_requests = Vec::new(); + let prompt_for_debug = request.prompt.clone(); let results = run_scheduled_requests( provider_config.protocol, @@ -693,14 +721,32 @@ async fn run_rpm(config_path: PathBuf, options: RpmCommandOptions) -> Result<()> .await; for result in results { - let success = result.result.is_ok(); + let success = result + .result + .as_ref() + .is_ok_and(|response| !response_has_no_content(response)); mode_summary.record(result.phase, result.second, success); match result.result { - Ok(response) => metrics.record_success( - response.status, - response.elapsed_ms as u64, - response.first_token_ms.map(|ms| ms as u64), - ), + Ok(response) => { + if let Some(debug) = response_debug_report( + None, + Some(phase_name(result.phase).to_string()), + result.second, + prompt_for_debug.clone(), + &response, + ) { + debug_requests.push(debug); + } + if response_has_no_content(&response) { + metrics.record_failure("request_error"); + continue; + } + metrics.record_success( + response.status, + response.elapsed_ms as u64, + response.first_token_ms.map(|ms| ms as u64), + ); + } Err(error) => metrics.record_failure(error_code(&error)), } } @@ -738,6 +784,7 @@ async fn run_rpm(config_path: PathBuf, options: RpmCommandOptions) -> Result<()> mode: mode_plan.mode_name.to_string(), mode_detail: mode_summary.into_report(options.mode), errors: summary.errors, + debug_requests, }; let report_path = write_rpm_report(Path::new("."), &report)?; print_rpm_report(&report, &report_path); @@ -1250,6 +1297,7 @@ struct BenchmarkReportInput { summary: MetricsSummary, correct_samples: Vec, wrong_cases: Vec, + debug_requests: Vec, } fn benchmark_report(input: BenchmarkReportInput) -> BenchmarkReport { @@ -1284,6 +1332,71 @@ fn benchmark_report(input: BenchmarkReportInput) -> BenchmarkReport { errors: input.summary.errors, correct_samples: input.correct_samples, wrong_cases: input.wrong_cases, + debug_requests: input.debug_requests, + } +} + +const NEAR_TTFT_LATENCY_THRESHOLD_MS: u128 = 1_000; + +fn response_debug_report( + id: Option, + phase: Option, + second: Option, + prompt: String, + response: &ModelResponse, +) -> Option { + let ttft_latency_delta_ms = response + .first_token_ms + .map(|ttft_ms| response.elapsed_ms.abs_diff(ttft_ms)); + let near_ttft_latency = + ttft_latency_delta_ms.is_some_and(|delta_ms| delta_ms < NEAR_TTFT_LATENCY_THRESHOLD_MS); + let has_think_tags = contains_think_tags(&response.text); + let no_content = response_has_no_content(response); + + let mut reasons = Vec::new(); + if no_content { + reasons.push("no_content"); + } + if near_ttft_latency { + reasons.push("near_ttft_latency"); + } + if has_think_tags { + reasons.push("contains_think_tags"); + } + if reasons.is_empty() { + return None; + } + + Some(RequestDebugReport { + reason: reasons.join(","), + id, + phase, + second, + prompt, + output: response.text.clone(), + latency_ms: response.elapsed_ms, + ttft_ms: response.first_token_ms, + ttft_latency_delta_ms, + has_think_tags, + }) +} + +fn response_has_no_content(response: &ModelResponse) -> bool { + response.text.trim().is_empty() +} + +fn contains_think_tags(text: &str) -> bool { + let lower = text.to_ascii_lowercase(); + lower.contains("") && lower.contains("") +} + +fn phase_name(phase: ProbePhase) -> &'static str { + match phase { + ProbePhase::Burst => "burst", + ProbePhase::RefillProbe => "refill_probe", + ProbePhase::SlidingProbe => "sliding_probe", + ProbePhase::BeforeBoundary => "before_boundary", + ProbePhase::AfterBoundary => "after_boundary", } } @@ -1893,6 +2006,75 @@ mod tests { ); } + #[test] + fn response_debug_report_records_near_ttft_latency_with_full_prompt_and_output() { + let response = ModelResponse { + text: "final answer".to_string(), + status: 200, + elapsed_ms: 1_500, + first_token_ms: Some(750), + }; + + let debug = response_debug_report( + Some("case-1".to_string()), + None, + None, + "full prompt".to_string(), + &response, + ) + .expect("near ttft/latency should be debugged"); + + assert_eq!(debug.reason, "near_ttft_latency"); + assert_eq!(debug.id.as_deref(), Some("case-1")); + assert_eq!(debug.prompt, "full prompt"); + assert_eq!(debug.output, "final answer"); + assert_eq!(debug.latency_ms, 1_500); + assert_eq!(debug.ttft_ms, Some(750)); + assert_eq!(debug.ttft_latency_delta_ms, Some(750)); + assert!(!debug.has_think_tags); + } + + #[test] + fn response_debug_report_marks_think_tags() { + let response = ModelResponse { + text: "hidden\nanswer".to_string(), + status: 200, + elapsed_ms: 5_000, + first_token_ms: Some(100), + }; + + let debug = response_debug_report(None, None, None, "prompt".to_string(), &response) + .expect("think tags should be debugged"); + + assert_eq!(debug.reason, "contains_think_tags"); + assert!(debug.has_think_tags); + assert_eq!(debug.ttft_latency_delta_ms, Some(4_900)); + } + + #[test] + fn response_debug_report_records_empty_content_as_request_debug() { + let response = ModelResponse { + text: " ".to_string(), + status: 200, + elapsed_ms: 2_000, + first_token_ms: None, + }; + + let debug = response_debug_report( + None, + Some("burst".to_string()), + Some(0), + "prompt".to_string(), + &response, + ) + .expect("empty content should be debugged"); + + assert_eq!(debug.reason, "no_content"); + assert_eq!(debug.phase.as_deref(), Some("burst")); + assert_eq!(debug.second, Some(0)); + assert!(response_has_no_content(&response)); + } + #[test] fn rpm_command_parses_window_boundary_offset() { let cli = Cli::try_parse_from([ diff --git a/src/report.rs b/src/report.rs index 1371575..33a2eba 100644 --- a/src/report.rs +++ b/src/report.rs @@ -71,6 +71,7 @@ pub struct BenchmarkReport { pub errors: Vec, pub correct_samples: Vec, pub wrong_cases: Vec, + pub debug_requests: Vec, } #[derive(Debug, Clone, Serialize)] @@ -140,6 +141,21 @@ pub struct RpmReport { pub summary: RpmSummaryReport, pub mode_detail: Option, pub errors: Vec, + pub debug_requests: Vec, +} + +#[derive(Debug, Clone, Serialize)] +pub struct RequestDebugReport { + pub reason: String, + pub id: Option, + pub phase: Option, + pub second: Option, + pub prompt: String, + pub output: String, + pub latency_ms: u128, + pub ttft_ms: Option, + pub ttft_latency_delta_ms: Option, + pub has_think_tags: bool, } #[derive(Debug, Clone, Serialize)] @@ -330,6 +346,7 @@ mod tests { errors: vec![], correct_samples: vec![], wrong_cases: vec![], + debug_requests: vec![], }; let path = write_benchmark_report(temp_dir.path(), &report).expect("write report"); @@ -380,6 +397,18 @@ mod tests { mode: "sustained".to_string(), mode_detail: None, errors: vec![], + debug_requests: vec![RequestDebugReport { + reason: "near_ttft_latency".to_string(), + id: None, + phase: Some("refill_probe".to_string()), + second: Some(1), + prompt: "Hi".to_string(), + output: "xdone".to_string(), + latency_ms: 1000, + ttft_ms: Some(1000), + ttft_latency_delta_ms: Some(0), + has_think_tags: true, + }], }; let json = serde_json::to_string(&report).expect("serialize report"); @@ -387,6 +416,9 @@ mod tests { assert!(json.contains("\"prompt\":\"Hi\"")); assert!(json.contains("\"stream\":false")); assert!(json.contains("\"duration\":\"60s\"")); + assert!(json.contains("\"debug_requests\"")); + assert!(json.contains("\"reason\":\"near_ttft_latency\"")); + assert!(json.contains("\"has_think_tags\":true")); } #[test] @@ -458,6 +490,7 @@ mod tests { }), }), errors: vec![], + debug_requests: vec![], }; let json = serde_json::to_string(&report).expect("serialize report"); From 4e0d85205a62fc52d44a7fd065f422f1329a6561 Mon Sep 17 00:00:00 2001 From: orangels Date: Sun, 10 May 2026 11:51:08 +0800 Subject: [PATCH 5/8] feat: group raw debug logs by date --- src/protocols/anthropic.rs | 58 ++++++++++++++++++-------------------- src/protocols/google.rs | 37 ++++++++++++++++-------- src/protocols/openai.rs | 44 +++++++++++++++++------------ src/runner.rs | 34 +++++++++++++++++++--- 4 files changed, 109 insertions(+), 64 deletions(-) diff --git a/src/protocols/anthropic.rs b/src/protocols/anthropic.rs index a8164a2..fbff5fa 100644 --- a/src/protocols/anthropic.rs +++ b/src/protocols/anthropic.rs @@ -418,9 +418,32 @@ fn write_request_error_debug_blocking( mod tests { use crate::runner::{ModelRequest, RawDebugConfig, ThinkingConfig}; use reqwest::Client; + use std::path::{Path, PathBuf}; use wiremock::matchers::{body_json, header, method, path}; use wiremock::{Mock, MockServer, ResponseTemplate}; + fn read_single_debug_file(root: &Path) -> String { + let mut files = Vec::new(); + collect_debug_files(root, &mut files); + assert_eq!(files.len(), 1); + std::fs::read_to_string(&files[0]).expect("read raw debug file") + } + + fn collect_debug_files(dir: &Path, files: &mut Vec) { + for entry in std::fs::read_dir(dir) + .expect("read debug dir") + .collect::, _>>() + .expect("debug entries") + { + let path = entry.path(); + if path.is_dir() { + collect_debug_files(&path, files); + } else { + files.push(path); + } + } + } + #[tokio::test] async fn extracts_first_text_block() { let server = MockServer::start().await; @@ -651,12 +674,7 @@ mod tests { .await .expect_err("non-success should fail"); - let debug_files = std::fs::read_dir(temp_dir.path()) - .expect("read debug dir") - .collect::, _>>() - .expect("debug entries"); - assert_eq!(debug_files.len(), 1); - let raw = std::fs::read_to_string(debug_files[0].path()).expect("read raw debug file"); + let raw = read_single_debug_file(temp_dir.path()); let debug: serde_json::Value = serde_json::from_str(&raw).expect("debug json"); assert_eq!(debug["request"]["headers"]["x-api-key"], "[REDACTED]"); @@ -700,12 +718,7 @@ mod tests { .await .expect_err("connection should fail"); - let debug_files = std::fs::read_dir(temp_dir.path()) - .expect("read debug dir") - .collect::, _>>() - .expect("debug entries"); - assert_eq!(debug_files.len(), 1); - let raw = std::fs::read_to_string(debug_files[0].path()).expect("read raw debug file"); + let raw = read_single_debug_file(temp_dir.path()); let debug: serde_json::Value = serde_json::from_str(&raw).expect("debug json"); assert_eq!(debug["request"]["method"], "POST"); @@ -816,12 +829,7 @@ mod tests { assert_eq!(response.text, "hi there"); assert!(response.first_token_ms.is_some()); - let debug_files = std::fs::read_dir(temp_dir.path()) - .expect("read debug dir") - .collect::, _>>() - .expect("debug entries"); - assert_eq!(debug_files.len(), 1); - let raw = std::fs::read_to_string(debug_files[0].path()).expect("read raw debug file"); + let raw = read_single_debug_file(temp_dir.path()); assert!(raw.contains("event: content_block_delta")); assert!(raw.contains("\"text\":\"hi \"")); } @@ -872,12 +880,7 @@ mod tests { .contains("completed without producing any content") ); - let debug_files = std::fs::read_dir(temp_dir.path()) - .expect("read debug dir") - .collect::, _>>() - .expect("debug entries"); - assert_eq!(debug_files.len(), 1); - let raw = std::fs::read_to_string(debug_files[0].path()).expect("read raw debug file"); + let raw = read_single_debug_file(temp_dir.path()); let debug: serde_json::Value = serde_json::from_str(&raw).expect("debug json"); assert_eq!( @@ -951,12 +954,7 @@ mod tests { server.await.expect("server task"); assert!(error.to_string().contains("stream interrupted")); - let debug_files = std::fs::read_dir(temp_dir.path()) - .expect("read debug dir") - .collect::, _>>() - .expect("debug entries"); - assert_eq!(debug_files.len(), 1); - let raw = std::fs::read_to_string(debug_files[0].path()).expect("read raw debug file"); + let raw = read_single_debug_file(temp_dir.path()); let debug: serde_json::Value = serde_json::from_str(&raw).expect("debug json"); assert_eq!(debug["response"]["status"], 200); diff --git a/src/protocols/google.rs b/src/protocols/google.rs index 228d501..6ebb0d5 100644 --- a/src/protocols/google.rs +++ b/src/protocols/google.rs @@ -415,9 +415,32 @@ struct GooglePart { mod tests { use crate::runner::{ModelRequest, RawDebugConfig, ThinkingConfig}; use reqwest::Client; + use std::path::{Path, PathBuf}; use wiremock::matchers::{body_json, header, method, path}; use wiremock::{Mock, MockServer, ResponseTemplate}; + fn read_single_debug_file(root: &Path) -> String { + let mut files = Vec::new(); + collect_debug_files(root, &mut files); + assert_eq!(files.len(), 1); + std::fs::read_to_string(&files[0]).expect("read raw debug file") + } + + fn collect_debug_files(dir: &Path, files: &mut Vec) { + for entry in std::fs::read_dir(dir) + .expect("read debug dir") + .collect::, _>>() + .expect("debug entries") + { + let path = entry.path(); + if path.is_dir() { + collect_debug_files(&path, files); + } else { + files.push(path); + } + } + } + #[tokio::test] async fn sends_generate_content_with_thinking_config_and_extracts_text() { let server = MockServer::start().await; @@ -560,12 +583,7 @@ mod tests { .await .expect_err("non-success should fail"); - let debug_files = std::fs::read_dir(temp_dir.path()) - .expect("read debug dir") - .collect::, _>>() - .expect("debug entries"); - assert_eq!(debug_files.len(), 1); - let raw = std::fs::read_to_string(debug_files[0].path()).expect("read raw debug file"); + let raw = read_single_debug_file(temp_dir.path()); let debug: serde_json::Value = serde_json::from_str(&raw).expect("debug json"); assert_eq!(debug["request"]["headers"]["x-goog-api-key"], "[REDACTED]"); @@ -606,12 +624,7 @@ mod tests { .await .expect_err("connection should fail"); - let debug_files = std::fs::read_dir(temp_dir.path()) - .expect("read debug dir") - .collect::, _>>() - .expect("debug entries"); - assert_eq!(debug_files.len(), 1); - let raw = std::fs::read_to_string(debug_files[0].path()).expect("read raw debug file"); + let raw = read_single_debug_file(temp_dir.path()); let debug: serde_json::Value = serde_json::from_str(&raw).expect("debug json"); assert_eq!(debug["request"]["method"], "POST"); diff --git a/src/protocols/openai.rs b/src/protocols/openai.rs index 40b2d19..3290ba4 100644 --- a/src/protocols/openai.rs +++ b/src/protocols/openai.rs @@ -403,9 +403,32 @@ fn write_request_error_debug_blocking( mod tests { use crate::runner::{ModelRequest, RawDebugConfig, ThinkingConfig}; use reqwest::Client; + use std::path::{Path, PathBuf}; use wiremock::matchers::{body_json, header, method, path}; use wiremock::{Mock, MockServer, ResponseTemplate}; + fn read_single_debug_file(root: &Path) -> String { + let mut files = Vec::new(); + collect_debug_files(root, &mut files); + assert_eq!(files.len(), 1); + std::fs::read_to_string(&files[0]).expect("read raw debug file") + } + + fn collect_debug_files(dir: &Path, files: &mut Vec) { + for entry in std::fs::read_dir(dir) + .expect("read debug dir") + .collect::, _>>() + .expect("debug entries") + { + let path = entry.path(); + if path.is_dir() { + collect_debug_files(&path, files); + } else { + files.push(path); + } + } + } + #[tokio::test] async fn extracts_chat_completion_text() { let server = MockServer::start().await; @@ -595,12 +618,7 @@ mod tests { .await .expect_err("non-success should fail"); - let debug_files = std::fs::read_dir(temp_dir.path()) - .expect("read debug dir") - .collect::, _>>() - .expect("debug entries"); - assert_eq!(debug_files.len(), 1); - let raw = std::fs::read_to_string(debug_files[0].path()).expect("read raw debug file"); + let raw = read_single_debug_file(temp_dir.path()); let debug: serde_json::Value = serde_json::from_str(&raw).expect("debug json"); assert_eq!(debug["request"]["method"], "POST"); @@ -651,12 +669,7 @@ mod tests { .await .expect_err("connection should fail"); - let debug_files = std::fs::read_dir(temp_dir.path()) - .expect("read debug dir") - .collect::, _>>() - .expect("debug entries"); - assert_eq!(debug_files.len(), 1); - let raw = std::fs::read_to_string(debug_files[0].path()).expect("read raw debug file"); + let raw = read_single_debug_file(temp_dir.path()); let debug: serde_json::Value = serde_json::from_str(&raw).expect("debug json"); assert_eq!(debug["request"]["method"], "POST"); @@ -768,12 +781,7 @@ mod tests { assert_eq!(response.text, "hi there"); assert!(response.first_token_ms.is_some()); - let debug_files = std::fs::read_dir(temp_dir.path()) - .expect("read debug dir") - .collect::, _>>() - .expect("debug entries"); - assert_eq!(debug_files.len(), 1); - let raw = std::fs::read_to_string(debug_files[0].path()).expect("read raw debug file"); + let raw = read_single_debug_file(temp_dir.path()); assert!(raw.contains("data: {\"choices\"")); assert!(raw.contains("data: [DONE]")); } diff --git a/src/runner.rs b/src/runner.rs index 05acc18..41684d7 100644 --- a/src/runner.rs +++ b/src/runner.rs @@ -102,23 +102,25 @@ impl RawDebugConfig { } async fn write_debug_file(&self, response_kind: &str, contents: &str) -> Result { - tokio::fs::create_dir_all(&self.output_dir) + let now = Utc::now(); + let dated_output_dir = self.output_dir.join(now.format("%Y%m%d").to_string()); + tokio::fs::create_dir_all(&dated_output_dir) .await .with_context(|| { format!( "failed to create raw debug dir {}", - self.output_dir.display() + dated_output_dir.display() ) })?; let sequence = self.counter.fetch_add(1, Ordering::Relaxed) + 1; - let timestamp = Utc::now().format("%Y%m%dT%H%M%S%.3fZ"); + let timestamp = now.format("%Y%m%dT%H%M%S%.3fZ"); let filename = format!( "{}-{}-{sequence:06}-{}.txt", self.prefix, timestamp, sanitize_filename_component(response_kind) ); - let path = self.output_dir.join(filename); + let path = dated_output_dir.join(filename); tokio::fs::write(&path, contents) .await .with_context(|| format!("failed to write raw debug response {}", path.display()))?; @@ -298,4 +300,28 @@ mod tests { assert!(debug.contains("[REDACTED]")); assert!(!debug.contains("sk-secret-token")); } + + #[tokio::test] + async fn raw_debug_writes_files_under_date_directory() { + let temp_dir = tempfile::tempdir().expect("create temp dir"); + let raw_debug = RawDebugConfig::new(temp_dir.path().join("debug"), "model/test".into()); + + let path = raw_debug + .write_response("response-kind", "body") + .await + .expect("write debug response"); + + let relative = path + .strip_prefix(temp_dir.path().join("debug")) + .expect("path under debug dir"); + let components = relative + .components() + .map(|component| component.as_os_str().to_string_lossy().into_owned()) + .collect::>(); + + assert_eq!(components.len(), 2); + assert_eq!(components[0].len(), 8); + assert!(components[0].chars().all(|ch| ch.is_ascii_digit())); + assert!(components[1].ends_with("-response-kind.txt")); + } } From 506434da6a1754bce77e6edf856457702567fa3a Mon Sep 17 00:00:00 2001 From: orangels Date: Sun, 10 May 2026 11:55:28 +0800 Subject: [PATCH 6/8] fix: use local time for raw debug logs --- src/runner.rs | 27 ++++++++++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) diff --git a/src/runner.rs b/src/runner.rs index 41684d7..1519334 100644 --- a/src/runner.rs +++ b/src/runner.rs @@ -1,7 +1,7 @@ use crate::config::ProtocolKind; use crate::protocols; use anyhow::{Context, Result}; -use chrono::Utc; +use chrono::Local; use reqwest::Client; use reqwest::header::HeaderMap; use serde::Serialize; @@ -102,7 +102,7 @@ impl RawDebugConfig { } async fn write_debug_file(&self, response_kind: &str, contents: &str) -> Result { - let now = Utc::now(); + let now = Local::now(); let dated_output_dir = self.output_dir.join(now.format("%Y%m%d").to_string()); tokio::fs::create_dir_all(&dated_output_dir) .await @@ -113,7 +113,7 @@ impl RawDebugConfig { ) })?; let sequence = self.counter.fetch_add(1, Ordering::Relaxed) + 1; - let timestamp = now.format("%Y%m%dT%H%M%S%.3fZ"); + let timestamp = now.format("%Y%m%dT%H%M%S%.3f%z"); let filename = format!( "{}-{}-{sequence:06}-{}.txt", self.prefix, @@ -324,4 +324,25 @@ mod tests { assert!(components[0].chars().all(|ch| ch.is_ascii_digit())); assert!(components[1].ends_with("-response-kind.txt")); } + + #[tokio::test] + async fn raw_debug_uses_local_time_for_directory_and_filename() { + let temp_dir = tempfile::tempdir().expect("create temp dir"); + let raw_debug = RawDebugConfig::new(temp_dir.path().join("debug"), "model/test".into()); + + let path = raw_debug + .write_response("response-kind", "body") + .await + .expect("write debug response"); + + let local_now = chrono::Local::now(); + let local_date = local_now.format("%Y%m%d").to_string(); + let local_hour_prefix = local_now.format("%Y%m%dT%H").to_string(); + let filename = path.file_name().expect("debug filename").to_string_lossy(); + + assert!(path.starts_with(temp_dir.path().join("debug").join(local_date))); + assert!(filename.contains(&local_hour_prefix)); + assert!(filename.contains(&local_now.format("%z").to_string())); + assert!(!filename.contains('Z')); + } } From 1f5bdb26f6bd9c8a19bc841b6e830c723129844a Mon Sep 17 00:00:00 2001 From: orangels Date: Sun, 10 May 2026 12:00:05 +0800 Subject: [PATCH 7/8] feat: group reports by local date --- src/report.rs | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/src/report.rs b/src/report.rs index 33a2eba..c9c2348 100644 --- a/src/report.rs +++ b/src/report.rs @@ -1,6 +1,6 @@ use crate::metrics::ErrorCount; use anyhow::{Context, Result}; -use chrono::{DateTime, Utc}; +use chrono::{DateTime, Local, Utc}; use serde::Serialize; use serde_json::Value; use std::path::{Path, PathBuf}; @@ -234,14 +234,17 @@ fn write_report( started_at: DateTime, report: &T, ) -> Result { - let reports_dir = root.join("reports"); + let local_started_at = started_at.with_timezone(&Local); + let reports_dir = root + .join("reports") + .join(local_started_at.format("%Y%m%d").to_string()); std::fs::create_dir_all(&reports_dir).with_context(|| { format!( "failed to create reports directory {}", reports_dir.display() ) })?; - let timestamp = started_at.format("%Y%m%dT%H%M%SZ"); + let timestamp = local_started_at.format("%Y%m%dT%H%M%S%z"); let filename = format!( "{}-{}-{}-{timestamp}.json", sanitize_filename_component(benchmark), @@ -296,6 +299,7 @@ mod tests { #[test] fn writes_benchmark_report_under_reports_dir() { let temp_dir = tempfile::tempdir().expect("create temp dir"); + let started_at = Utc.with_ymd_and_hms(2026, 5, 6, 1, 2, 3).unwrap(); let report = BenchmarkReport { benchmark: "aime2026".to_string(), provider: "openai".to_string(), @@ -318,7 +322,7 @@ mod tests { local_path: "data/benchmarks/aime2026/aime2026.jsonl".to_string(), }, run: RunReport { - started_at: Utc.with_ymd_and_hms(2026, 5, 6, 1, 2, 3).unwrap(), + started_at, duration_ms: 123, concurrency: 2, limit: Some(1), @@ -351,7 +355,12 @@ mod tests { let path = write_benchmark_report(temp_dir.path(), &report).expect("write report"); - assert!(path.ends_with("reports/aime2026-openai-gpt-test-20260506T010203Z.json")); + let local_started_at = started_at.with_timezone(&chrono::Local); + let expected_date = local_started_at.format("%Y%m%d").to_string(); + let expected_timestamp = local_started_at.format("%Y%m%dT%H%M%S%z").to_string(); + assert!(path.ends_with(format!( + "reports/{expected_date}/aime2026-openai-gpt-test-{expected_timestamp}.json" + ))); assert!(path.exists()); } From c34623307218359d475c174225746a00dec05fea Mon Sep 17 00:00:00 2001 From: orangels Date: Sun, 10 May 2026 13:25:46 +0800 Subject: [PATCH 8/8] feat: record stream chunk timings in reports --- src/cli.rs | 7 +++++++ src/protocols/anthropic.rs | 5 +++++ src/protocols/google.rs | 5 +++++ src/protocols/openai.rs | 5 +++++ src/report.rs | 6 ++++++ src/runner.rs | 1 + 6 files changed, 29 insertions(+) diff --git a/src/cli.rs b/src/cli.rs index 2493931..b919e6b 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -1377,6 +1377,8 @@ fn response_debug_report( latency_ms: response.elapsed_ms, ttft_ms: response.first_token_ms, ttft_latency_delta_ms, + chunk_count: response.chunk_elapsed_ms.len(), + chunk_elapsed_ms: response.chunk_elapsed_ms.clone(), has_think_tags, }) } @@ -2013,6 +2015,7 @@ mod tests { status: 200, elapsed_ms: 1_500, first_token_ms: Some(750), + chunk_elapsed_ms: vec![100, 750], }; let debug = response_debug_report( @@ -2031,6 +2034,8 @@ mod tests { assert_eq!(debug.latency_ms, 1_500); assert_eq!(debug.ttft_ms, Some(750)); assert_eq!(debug.ttft_latency_delta_ms, Some(750)); + assert_eq!(debug.chunk_count, 2); + assert_eq!(debug.chunk_elapsed_ms, vec![100, 750]); assert!(!debug.has_think_tags); } @@ -2041,6 +2046,7 @@ mod tests { status: 200, elapsed_ms: 5_000, first_token_ms: Some(100), + chunk_elapsed_ms: vec![100], }; let debug = response_debug_report(None, None, None, "prompt".to_string(), &response) @@ -2058,6 +2064,7 @@ mod tests { status: 200, elapsed_ms: 2_000, first_token_ms: None, + chunk_elapsed_ms: Vec::new(), }; let debug = response_debug_report( diff --git a/src/protocols/anthropic.rs b/src/protocols/anthropic.rs index fbff5fa..9c42bc8 100644 --- a/src/protocols/anthropic.rs +++ b/src/protocols/anthropic.rs @@ -114,6 +114,7 @@ pub async fn send(client: &Client, request: &ModelRequest) -> Result Result = None; + let mut chunk_elapsed_ms = Vec::new(); let mut current_event = String::new(); let mut done = false; @@ -207,6 +209,7 @@ pub async fn send_stream(client: &Client, request: &ModelRequest) -> Result Result Result Result = None; + let mut chunk_elapsed_ms = Vec::new(); while let Some(chunk) = stream.next().await { let chunk = match chunk { @@ -200,6 +202,7 @@ pub async fn send_stream(client: &Client, request: &ModelRequest) -> Result Result Result Result = None; + let mut chunk_elapsed_ms = Vec::new(); let mut done = false; while let Some(chunk) = stream.next().await { @@ -197,6 +199,7 @@ pub async fn send_stream(client: &Client, request: &ModelRequest) -> Result Result, pub ttft_latency_delta_ms: Option, + pub chunk_count: usize, + pub chunk_elapsed_ms: Vec, pub has_think_tags: bool, } @@ -416,6 +418,8 @@ mod tests { latency_ms: 1000, ttft_ms: Some(1000), ttft_latency_delta_ms: Some(0), + chunk_count: 2, + chunk_elapsed_ms: vec![100, 1000], has_think_tags: true, }], }; @@ -427,6 +431,8 @@ mod tests { assert!(json.contains("\"duration\":\"60s\"")); assert!(json.contains("\"debug_requests\"")); assert!(json.contains("\"reason\":\"near_ttft_latency\"")); + assert!(json.contains("\"chunk_count\":2")); + assert!(json.contains("\"chunk_elapsed_ms\":[100,1000]")); assert!(json.contains("\"has_think_tags\":true")); } diff --git a/src/runner.rs b/src/runner.rs index 1519334..bfb11ec 100644 --- a/src/runner.rs +++ b/src/runner.rs @@ -243,6 +243,7 @@ pub struct ModelResponse { pub status: u16, pub elapsed_ms: u128, pub first_token_ms: Option, + pub chunk_elapsed_ms: Vec, } pub async fn run_model_request(