| @@ -1,3 +1,4 @@ | |||
| /target | |||
| /data/benchmarks | |||
| /reports | |||
| /outputs | |||
| @@ -226,6 +226,8 @@ Benchmark and RPM commands print a terminal summary with success counts, failure | |||
| Benchmark reports include `wrong_cases`, with each wrong case containing the case id, question, expected answer, extracted actual answer, and raw model output. RPM reports include request counts, mode, target RPM, observed RPM, latency, error counts, and mode-specific details such as burst summaries, probe summaries, window-boundary summaries, and optional limiter inference. | |||
| Use `--debug-raw` with `check`, `bench`, or `rpm` to write upstream raw responses under `outputs/debug/`. Non-streaming requests save the raw JSON body, and streaming requests save the raw SSE lines. The directory is ignored by git and can help diagnose relay-side response rewriting. | |||
| ## Comparing Scores | |||
| Use these results as relay benchmark signals, not absolute proof by themselves. To compare against official scores or another run, align the same dataset and source, prompt text, temperature, `max_tokens`, sample limit, and scoring logic. Differences in any of those inputs can make the reported accuracy diverge from official numbers or other benchmark harnesses. | |||
| @@ -356,6 +356,24 @@ ls -lt reports | head | |||
| jq . reports/<报告文件>.json | |||
| ``` | |||
| ### 原始响应 Debug | |||
| 如果需要排查中转站是否改写了模型响应,可以开启 `--debug-raw`: | |||
| ```bash | |||
| cargo run -- check --provider anthropic --stream --debug-raw --prompt "hello" | |||
| cargo run -- bench aime2026 --provider anthropic --stream --debug-raw --limit 3 | |||
| cargo run -- rpm --provider anthropic --rpm 60 --duration 30s --stream --debug-raw --prompt "hello" | |||
| ``` | |||
| 开启后,原始响应会写到: | |||
| ```text | |||
| outputs/debug/ | |||
| ``` | |||
| 非流式请求保存完整 JSON body;流式请求保存原始 SSE 行,包括 `event:` 和 `data:`。文件不包含 API token,但可能包含模型输出内容,所以 `outputs/` 不会提交到 git。 | |||
| benchmark report 包含: | |||
| - benchmark | |||
| @@ -195,6 +195,25 @@ cargo run -- bench aime2026 --provider anthropic --stream | |||
| 如果 provider config 中已设置 `stream: true`,则默认使用流式,无需额外传参。CLI `--stream` 参数优先级高于 config。 | |||
| ## 原始响应 Debug 模式 | |||
| 排查中转站是否改写响应时,可以加 `--debug-raw`: | |||
| ```bash | |||
| cargo run -- check --provider anthropic --stream --debug-raw --prompt "Reply with pong." | |||
| cargo run -- bench aime2026 --provider anthropic --stream --debug-raw --limit 3 | |||
| cargo run -- rpm --provider anthropic --rpm 60 --duration 30s --stream --debug-raw --prompt "Hi" | |||
| ``` | |||
| 开启后,程序会把上游原始响应写到 `outputs/debug/`: | |||
| - 非流式请求保存完整 JSON body | |||
| - 流式请求保存原始 SSE 行,包括 `event:` 和 `data:` | |||
| - 文件不包含 API token | |||
| - 文件可能包含模型输出内容,`outputs/` 不会提交到 git | |||
| 如果 `report.json` 的 `raw_output` 中出现 `<think>`,可以查看 debug 文件判断它来自 `text_delta`,还是上游返回了其他 thinking 事件。 | |||
| --- | |||
| ## 故障排查 | |||
| @@ -13,7 +13,7 @@ use crate::rpm_modes::{ | |||
| ProbePhase, RpmMode, ScheduledProbe, burst_schedule, sliding_window_schedule, | |||
| sustained_schedule, token_bucket_schedule, window_boundary_plan, | |||
| }; | |||
| use crate::runner::{ModelRequest, run_model_request}; | |||
| use crate::runner::{ModelRequest, RawDebugConfig, run_model_request}; | |||
| use anyhow::{Context, Result, bail}; | |||
| use chrono::Utc; | |||
| use clap::{Parser, Subcommand}; | |||
| @@ -49,6 +49,8 @@ pub enum Command { | |||
| prompt: String, | |||
| #[arg(long, num_args = 0..=1, default_missing_value = "true")] | |||
| stream: Option<bool>, | |||
| #[arg(long)] | |||
| debug_raw: bool, | |||
| }, | |||
| Dataset { | |||
| #[command(subcommand)] | |||
| @@ -83,6 +85,8 @@ pub enum Command { | |||
| prompt: String, | |||
| #[arg(long, num_args = 0..=1, default_missing_value = "true")] | |||
| stream: Option<bool>, | |||
| #[arg(long)] | |||
| debug_raw: bool, | |||
| }, | |||
| } | |||
| @@ -108,6 +112,8 @@ pub enum BenchCommand { | |||
| stream: Option<bool>, | |||
| #[arg(long, default_value_t = 32768)] | |||
| max_tokens: u32, | |||
| #[arg(long)] | |||
| debug_raw: bool, | |||
| }, | |||
| GpqaDiamond { | |||
| #[arg(long, default_value = "config.yaml")] | |||
| @@ -124,6 +130,8 @@ pub enum BenchCommand { | |||
| stream: Option<bool>, | |||
| #[arg(long, default_value_t = 32768)] | |||
| max_tokens: u32, | |||
| #[arg(long)] | |||
| debug_raw: bool, | |||
| }, | |||
| } | |||
| @@ -135,17 +143,21 @@ pub async fn dispatch(cli: Cli) -> Result<()> { | |||
| model, | |||
| prompt, | |||
| stream, | |||
| debug_raw, | |||
| } => { | |||
| let config = AppConfig::load(&config)?; | |||
| let provider = config.resolved_provider(provider.as_deref())?; | |||
| let provider_name = provider_name(&config, provider.as_deref())?; | |||
| let provider = config.resolved_provider(Some(&provider_name))?; | |||
| let model = model.unwrap_or_else(|| provider.default_model.clone()); | |||
| let request = ModelRequest { | |||
| base_url: provider.base_url.clone(), | |||
| api_token: provider.api_token.clone(), | |||
| model: model.unwrap_or_else(|| provider.default_model.clone()), | |||
| model: model.clone(), | |||
| prompt, | |||
| temperature: 0.0, | |||
| max_tokens: 1024, | |||
| stream: stream.unwrap_or(provider.stream), | |||
| raw_debug: raw_debug_config(debug_raw, &provider_name, &model), | |||
| }; | |||
| let response = run_model_request(provider.protocol, request).await?; | |||
| @@ -180,6 +192,7 @@ pub async fn dispatch(cli: Cli) -> Result<()> { | |||
| concurrency, | |||
| prompt, | |||
| stream, | |||
| debug_raw, | |||
| } => { | |||
| run_rpm( | |||
| config, | |||
| @@ -195,6 +208,7 @@ pub async fn dispatch(cli: Cli) -> Result<()> { | |||
| concurrency, | |||
| prompt, | |||
| stream, | |||
| debug_raw, | |||
| }, | |||
| ) | |||
| .await | |||
| @@ -212,6 +226,7 @@ async fn dispatch_bench(command: BenchCommand) -> Result<()> { | |||
| limit, | |||
| stream, | |||
| max_tokens, | |||
| debug_raw, | |||
| } => { | |||
| run_aime_benchmark( | |||
| config, | |||
| @@ -221,6 +236,7 @@ async fn dispatch_bench(command: BenchCommand) -> Result<()> { | |||
| limit, | |||
| stream, | |||
| max_tokens, | |||
| debug_raw, | |||
| ) | |||
| .await | |||
| } | |||
| @@ -232,6 +248,7 @@ async fn dispatch_bench(command: BenchCommand) -> Result<()> { | |||
| limit, | |||
| stream, | |||
| max_tokens, | |||
| debug_raw, | |||
| } => { | |||
| run_gpqa_benchmark( | |||
| config, | |||
| @@ -241,6 +258,7 @@ async fn dispatch_bench(command: BenchCommand) -> Result<()> { | |||
| limit, | |||
| stream, | |||
| max_tokens, | |||
| debug_raw, | |||
| ) | |||
| .await | |||
| } | |||
| @@ -255,6 +273,7 @@ async fn run_aime_benchmark( | |||
| limit: Option<usize>, | |||
| stream: Option<bool>, | |||
| max_tokens: u32, | |||
| debug_raw: bool, | |||
| ) -> Result<()> { | |||
| let config = AppConfig::load(&config_path)?; | |||
| let provider_name = provider_name(&config, provider.as_deref())?; | |||
| @@ -275,6 +294,7 @@ async fn run_aime_benchmark( | |||
| let started = Instant::now(); | |||
| let mut base_request = request_template(&provider_config, &model, 0.0, max_tokens); | |||
| base_request.stream = stream.unwrap_or(provider_config.stream); | |||
| base_request.raw_debug = raw_debug_config(debug_raw, &provider_name, &model); | |||
| let protocol = provider_config.protocol; | |||
| let pb = ProgressBar::new(total); | |||
| @@ -364,6 +384,7 @@ async fn run_gpqa_benchmark( | |||
| limit: Option<usize>, | |||
| stream: Option<bool>, | |||
| max_tokens: u32, | |||
| debug_raw: bool, | |||
| ) -> Result<()> { | |||
| let config = AppConfig::load(&config_path)?; | |||
| let provider_name = provider_name(&config, provider.as_deref())?; | |||
| @@ -384,6 +405,7 @@ async fn run_gpqa_benchmark( | |||
| let started = Instant::now(); | |||
| let mut base_request = request_template(&provider_config, &model, 0.0, max_tokens); | |||
| base_request.stream = stream.unwrap_or(provider_config.stream); | |||
| base_request.raw_debug = raw_debug_config(debug_raw, &provider_name, &model); | |||
| let protocol = provider_config.protocol; | |||
| let pb = ProgressBar::new(total); | |||
| @@ -479,6 +501,7 @@ struct RpmCommandOptions { | |||
| concurrency: Option<usize>, | |||
| prompt: String, | |||
| stream: Option<bool>, | |||
| debug_raw: bool, | |||
| } | |||
| async fn run_rpm(config_path: PathBuf, options: RpmCommandOptions) -> Result<()> { | |||
| @@ -501,6 +524,7 @@ async fn run_rpm(config_path: PathBuf, options: RpmCommandOptions) -> Result<()> | |||
| let request = ModelRequest { | |||
| prompt: options.prompt.clone(), | |||
| stream: stream_enabled, | |||
| raw_debug: raw_debug_config(options.debug_raw, &provider_name, &model), | |||
| ..request_template(&provider_config, &model, 0.0, 1024) | |||
| }; | |||
| let started_at = Utc::now(); | |||
| @@ -921,6 +945,15 @@ fn provider_name(config: &AppConfig, provider: Option<&str>) -> Result<String> { | |||
| } | |||
| } | |||
| fn raw_debug_config(enabled: bool, provider: &str, model: &str) -> Option<RawDebugConfig> { | |||
| enabled.then(|| { | |||
| RawDebugConfig::new( | |||
| PathBuf::from("outputs/debug"), | |||
| format!("{provider}-{model}"), | |||
| ) | |||
| }) | |||
| } | |||
| fn request_template( | |||
| provider: &crate::config::ProviderConfig, | |||
| model: &str, | |||
| @@ -935,6 +968,7 @@ fn request_template( | |||
| temperature, | |||
| max_tokens, | |||
| stream: provider.stream, | |||
| raw_debug: None, | |||
| } | |||
| } | |||
| @@ -1297,6 +1331,26 @@ mod tests { | |||
| assert_eq!(stream, Some(false)); | |||
| } | |||
| #[test] | |||
| fn check_command_parses_debug_raw() { | |||
| let cli = Cli::try_parse_from([ | |||
| "lq_token_test", | |||
| "check", | |||
| "--provider", | |||
| "anthropic", | |||
| "--prompt", | |||
| "hello", | |||
| "--debug-raw", | |||
| ]) | |||
| .expect("parse check debug raw"); | |||
| let Command::Check { debug_raw, .. } = cli.command else { | |||
| panic!("expected check command"); | |||
| }; | |||
| assert!(debug_raw); | |||
| } | |||
| #[test] | |||
| fn rpm_command_parses_window_boundary_offset() { | |||
| let cli = Cli::try_parse_from([ | |||
| @@ -28,6 +28,12 @@ pub async fn send(client: &Client, request: &ModelRequest) -> Result<ModelRespon | |||
| .text() | |||
| .await | |||
| .context("failed to read Anthropic response body")?; | |||
| if let Some(raw_debug) = &request.raw_debug { | |||
| raw_debug | |||
| .write_response("anthropic-json", &body) | |||
| .await | |||
| .context("failed to write Anthropic raw debug response")?; | |||
| } | |||
| if !status.is_success() { | |||
| bail!( | |||
| @@ -81,6 +87,12 @@ pub async fn send_stream(client: &Client, request: &ModelRequest) -> Result<Mode | |||
| .text() | |||
| .await | |||
| .context("failed to read Anthropic error response body")?; | |||
| if let Some(raw_debug) = &request.raw_debug { | |||
| raw_debug | |||
| .write_response("anthropic-error", &body) | |||
| .await | |||
| .context("failed to write Anthropic raw debug error response")?; | |||
| } | |||
| bail!( | |||
| "{}", | |||
| super::upstream_error_message("Anthropic", status_code, &body) | |||
| @@ -90,6 +102,7 @@ pub async fn send_stream(client: &Client, request: &ModelRequest) -> Result<Mode | |||
| let mut stream = response.bytes_stream(); | |||
| let mut buffer = super::SseLineBuffer::new(); | |||
| let mut text = String::new(); | |||
| let mut raw_stream = String::new(); | |||
| let mut first_token_ms: Option<u128> = None; | |||
| let mut current_event = String::new(); | |||
| let mut done = false; | |||
| @@ -97,6 +110,8 @@ pub async fn send_stream(client: &Client, request: &ModelRequest) -> Result<Mode | |||
| while let Some(chunk) = stream.next().await { | |||
| let chunk = chunk.context("Anthropic stream interrupted")?; | |||
| for line in buffer.feed(&chunk) { | |||
| raw_stream.push_str(&line); | |||
| raw_stream.push('\n'); | |||
| if let Some(event_type) = line.strip_prefix("event: ") { | |||
| current_event = event_type.to_string(); | |||
| continue; | |||
| @@ -127,6 +142,13 @@ pub async fn send_stream(client: &Client, request: &ModelRequest) -> Result<Mode | |||
| } | |||
| } | |||
| if let Some(raw_debug) = &request.raw_debug { | |||
| raw_debug | |||
| .write_response("anthropic-sse", &raw_stream) | |||
| .await | |||
| .context("failed to write Anthropic raw debug stream")?; | |||
| } | |||
| if text.is_empty() { | |||
| bail!("Anthropic stream completed without producing any content"); | |||
| } | |||
| @@ -165,7 +187,7 @@ struct AnthropicDeltaContent { | |||
| #[cfg(test)] | |||
| mod tests { | |||
| use crate::runner::ModelRequest; | |||
| use crate::runner::{ModelRequest, RawDebugConfig}; | |||
| use reqwest::Client; | |||
| use wiremock::matchers::{body_json, header, method, path}; | |||
| use wiremock::{Mock, MockServer, ResponseTemplate}; | |||
| @@ -199,6 +221,7 @@ mod tests { | |||
| temperature: 0.0, | |||
| max_tokens: 1024, | |||
| stream: false, | |||
| raw_debug: None, | |||
| }; | |||
| let response = super::send(&Client::new(), &request) | |||
| @@ -230,6 +253,7 @@ mod tests { | |||
| temperature: 0.0, | |||
| max_tokens: 1024, | |||
| stream: false, | |||
| raw_debug: None, | |||
| }; | |||
| let error = super::send(&Client::new(), &request) | |||
| @@ -261,6 +285,7 @@ mod tests { | |||
| temperature: 0.0, | |||
| max_tokens: 1024, | |||
| stream: false, | |||
| raw_debug: None, | |||
| }; | |||
| let message = super::send(&Client::new(), &request) | |||
| @@ -296,6 +321,7 @@ mod tests { | |||
| temperature: 0.0, | |||
| max_tokens: 1024, | |||
| stream: false, | |||
| raw_debug: None, | |||
| }; | |||
| let response = super::send(&Client::new(), &request) | |||
| @@ -308,6 +334,7 @@ mod tests { | |||
| #[tokio::test] | |||
| async fn extracts_streaming_messages_text_and_ttft() { | |||
| let server = MockServer::start().await; | |||
| let temp_dir = tempfile::tempdir().expect("create temp dir"); | |||
| Mock::given(method("POST")) | |||
| .and(path("/v1/messages")) | |||
| .and(header("x-api-key", "test-token")) | |||
| @@ -342,6 +369,10 @@ mod tests { | |||
| temperature: 0.0, | |||
| max_tokens: 1024, | |||
| stream: true, | |||
| raw_debug: Some(RawDebugConfig::new( | |||
| temp_dir.path().to_path_buf(), | |||
| "anthropic-claude-test".to_string(), | |||
| )), | |||
| }; | |||
| let response = super::send_stream(&Client::new(), &request) | |||
| @@ -351,5 +382,14 @@ mod tests { | |||
| assert_eq!(response.status, 200); | |||
| assert_eq!(response.text, "hi there"); | |||
| assert!(response.first_token_ms.is_some()); | |||
| let debug_files = std::fs::read_dir(temp_dir.path()) | |||
| .expect("read debug dir") | |||
| .collect::<Result<Vec<_>, _>>() | |||
| .expect("debug entries"); | |||
| assert_eq!(debug_files.len(), 1); | |||
| let raw = std::fs::read_to_string(debug_files[0].path()).expect("read raw debug file"); | |||
| assert!(raw.contains("event: content_block_delta")); | |||
| assert!(raw.contains("\"text\":\"hi \"")); | |||
| } | |||
| } | |||
| @@ -27,6 +27,12 @@ pub async fn send(client: &Client, request: &ModelRequest) -> Result<ModelRespon | |||
| .text() | |||
| .await | |||
| .context("failed to read OpenAI response body")?; | |||
| if let Some(raw_debug) = &request.raw_debug { | |||
| raw_debug | |||
| .write_response("openai-json", &body) | |||
| .await | |||
| .context("failed to write OpenAI raw debug response")?; | |||
| } | |||
| if !status.is_success() { | |||
| bail!( | |||
| @@ -78,6 +84,12 @@ pub async fn send_stream(client: &Client, request: &ModelRequest) -> Result<Mode | |||
| .text() | |||
| .await | |||
| .context("failed to read OpenAI error response body")?; | |||
| if let Some(raw_debug) = &request.raw_debug { | |||
| raw_debug | |||
| .write_response("openai-error", &body) | |||
| .await | |||
| .context("failed to write OpenAI raw debug error response")?; | |||
| } | |||
| bail!( | |||
| "{}", | |||
| super::upstream_error_message("OpenAI", status_code, &body) | |||
| @@ -87,12 +99,15 @@ pub async fn send_stream(client: &Client, request: &ModelRequest) -> Result<Mode | |||
| let mut stream = response.bytes_stream(); | |||
| let mut buffer = super::SseLineBuffer::new(); | |||
| let mut text = String::new(); | |||
| let mut raw_stream = String::new(); | |||
| let mut first_token_ms: Option<u128> = None; | |||
| let mut done = false; | |||
| while let Some(chunk) = stream.next().await { | |||
| let chunk = chunk.context("OpenAI stream interrupted")?; | |||
| for line in buffer.feed(&chunk) { | |||
| raw_stream.push_str(&line); | |||
| raw_stream.push('\n'); | |||
| let Some(data) = line.strip_prefix("data: ") else { | |||
| continue; | |||
| }; | |||
| @@ -117,6 +132,13 @@ pub async fn send_stream(client: &Client, request: &ModelRequest) -> Result<Mode | |||
| } | |||
| } | |||
| if let Some(raw_debug) = &request.raw_debug { | |||
| raw_debug | |||
| .write_response("openai-sse", &raw_stream) | |||
| .await | |||
| .context("failed to write OpenAI raw debug stream")?; | |||
| } | |||
| if text.is_empty() { | |||
| bail!("OpenAI stream completed without producing any content"); | |||
| } | |||
| @@ -161,7 +183,7 @@ struct OpenAiStreamDelta { | |||
| #[cfg(test)] | |||
| mod tests { | |||
| use crate::runner::ModelRequest; | |||
| use crate::runner::{ModelRequest, RawDebugConfig}; | |||
| use reqwest::Client; | |||
| use wiremock::matchers::{body_json, header, method, path}; | |||
| use wiremock::{Mock, MockServer, ResponseTemplate}; | |||
| @@ -196,6 +218,7 @@ mod tests { | |||
| temperature: 0.0, | |||
| max_tokens: 1024, | |||
| stream: false, | |||
| raw_debug: None, | |||
| }; | |||
| let response = super::send(&Client::new(), &request) | |||
| @@ -227,6 +250,7 @@ mod tests { | |||
| temperature: 0.0, | |||
| max_tokens: 1024, | |||
| stream: false, | |||
| raw_debug: None, | |||
| }; | |||
| let error = super::send(&Client::new(), &request) | |||
| @@ -257,6 +281,7 @@ mod tests { | |||
| temperature: 0.0, | |||
| max_tokens: 1024, | |||
| stream: false, | |||
| raw_debug: None, | |||
| }; | |||
| let message = super::send(&Client::new(), &request) | |||
| @@ -294,6 +319,7 @@ mod tests { | |||
| temperature: 0.0, | |||
| max_tokens: 1024, | |||
| stream: false, | |||
| raw_debug: None, | |||
| }; | |||
| let response = super::send(&Client::new(), &request) | |||
| @@ -306,6 +332,7 @@ mod tests { | |||
| #[tokio::test] | |||
| async fn extracts_streaming_chat_completion_text_and_ttft() { | |||
| let server = MockServer::start().await; | |||
| let temp_dir = tempfile::tempdir().expect("create temp dir"); | |||
| Mock::given(method("POST")) | |||
| .and(path("/chat/completions")) | |||
| .and(header("authorization", "Bearer test-token")) | |||
| @@ -336,6 +363,10 @@ mod tests { | |||
| temperature: 0.0, | |||
| max_tokens: 1024, | |||
| stream: true, | |||
| raw_debug: Some(RawDebugConfig::new( | |||
| temp_dir.path().to_path_buf(), | |||
| "openai-gpt-test".to_string(), | |||
| )), | |||
| }; | |||
| let response = super::send_stream(&Client::new(), &request) | |||
| @@ -345,5 +376,14 @@ mod tests { | |||
| assert_eq!(response.status, 200); | |||
| assert_eq!(response.text, "hi there"); | |||
| assert!(response.first_token_ms.is_some()); | |||
| let debug_files = std::fs::read_dir(temp_dir.path()) | |||
| .expect("read debug dir") | |||
| .collect::<Result<Vec<_>, _>>() | |||
| .expect("debug entries"); | |||
| assert_eq!(debug_files.len(), 1); | |||
| let raw = std::fs::read_to_string(debug_files[0].path()).expect("read raw debug file"); | |||
| assert!(raw.contains("data: {\"choices\"")); | |||
| assert!(raw.contains("data: [DONE]")); | |||
| } | |||
| } | |||
| @@ -1,8 +1,12 @@ | |||
| use crate::config::ProtocolKind; | |||
| use crate::protocols; | |||
| use anyhow::Result; | |||
| use anyhow::{Context, Result}; | |||
| use chrono::Utc; | |||
| use reqwest::Client; | |||
| use std::fmt; | |||
| use std::path::PathBuf; | |||
| use std::sync::Arc; | |||
| use std::sync::atomic::{AtomicU64, Ordering}; | |||
| use std::time::Instant; | |||
| #[derive(Clone)] | |||
| @@ -14,6 +18,48 @@ pub struct ModelRequest { | |||
| pub temperature: f32, | |||
| pub max_tokens: u32, | |||
| pub stream: bool, | |||
| pub raw_debug: Option<RawDebugConfig>, | |||
| } | |||
| #[derive(Clone)] | |||
| pub struct RawDebugConfig { | |||
| output_dir: PathBuf, | |||
| prefix: String, | |||
| counter: Arc<AtomicU64>, | |||
| } | |||
| impl RawDebugConfig { | |||
| pub fn new(output_dir: PathBuf, prefix: String) -> Self { | |||
| Self { | |||
| output_dir, | |||
| prefix: sanitize_filename_component(&prefix), | |||
| counter: Arc::new(AtomicU64::new(0)), | |||
| } | |||
| } | |||
| pub async fn write_response(&self, response_kind: &str, contents: &str) -> Result<PathBuf> { | |||
| tokio::fs::create_dir_all(&self.output_dir) | |||
| .await | |||
| .with_context(|| { | |||
| format!( | |||
| "failed to create raw debug dir {}", | |||
| self.output_dir.display() | |||
| ) | |||
| })?; | |||
| let sequence = self.counter.fetch_add(1, Ordering::Relaxed) + 1; | |||
| let timestamp = Utc::now().format("%Y%m%dT%H%M%S%.3fZ"); | |||
| let filename = format!( | |||
| "{}-{}-{sequence:06}-{}.txt", | |||
| self.prefix, | |||
| timestamp, | |||
| sanitize_filename_component(response_kind) | |||
| ); | |||
| let path = self.output_dir.join(filename); | |||
| tokio::fs::write(&path, contents) | |||
| .await | |||
| .with_context(|| format!("failed to write raw debug response {}", path.display()))?; | |||
| Ok(path) | |||
| } | |||
| } | |||
| impl fmt::Debug for ModelRequest { | |||
| @@ -27,10 +73,34 @@ impl fmt::Debug for ModelRequest { | |||
| .field("temperature", &self.temperature) | |||
| .field("max_tokens", &self.max_tokens) | |||
| .field("stream", &self.stream) | |||
| .field("raw_debug", &self.raw_debug.is_some()) | |||
| .finish() | |||
| } | |||
| } | |||
| fn sanitize_filename_component(value: &str) -> String { | |||
| let sanitized = value | |||
| .chars() | |||
| .map(|character| { | |||
| if character.is_ascii_alphanumeric() || matches!(character, '.' | '-' | '_') { | |||
| character | |||
| } else { | |||
| '-' | |||
| } | |||
| }) | |||
| .collect::<String>(); | |||
| let collapsed = sanitized | |||
| .split('-') | |||
| .filter(|part| !part.is_empty()) | |||
| .collect::<Vec<_>>() | |||
| .join("-"); | |||
| if collapsed.is_empty() { | |||
| "unknown".to_string() | |||
| } else { | |||
| collapsed | |||
| } | |||
| } | |||
| #[derive(Debug, Clone, PartialEq, Eq)] | |||
| pub struct ModelResponse { | |||
| pub text: String, | |||
| @@ -82,6 +152,7 @@ mod tests { | |||
| temperature: 0.0, | |||
| max_tokens: 1024, | |||
| stream: false, | |||
| raw_debug: None, | |||
| }; | |||
| let debug = format!("{request:?}"); | |||