|
- use crate::metrics::ErrorCount;
- use anyhow::{Context, Result};
- use chrono::{DateTime, Utc};
- use serde::Serialize;
- use std::path::{Path, PathBuf};
-
- #[derive(Debug, Clone, Serialize)]
- pub struct DatasetReport {
- pub source: String,
- pub split: String,
- pub revision: Option<String>,
- pub local_path: String,
- }
-
- #[derive(Debug, Clone, Serialize)]
- pub struct RunReport {
- pub started_at: DateTime<Utc>,
- pub duration_ms: u128,
- pub concurrency: usize,
- pub limit: Option<usize>,
- pub temperature: f32,
- pub max_tokens: u32,
- }
-
- #[derive(Debug, Clone, Serialize)]
- pub struct LatencyReport {
- pub p50: Option<u64>,
- pub p95: Option<u64>,
- pub p99: Option<u64>,
- }
-
- #[derive(Debug, Clone, Serialize)]
- pub struct BenchmarkSummaryReport {
- pub accuracy: Option<f64>,
- pub success: u64,
- pub total: u64,
- pub correct: u64,
- pub wrong: u64,
- pub failed: u64,
- pub latency_ms: LatencyReport,
- pub ttft_ms: LatencyReport,
- }
-
- #[derive(Debug, Clone, Serialize)]
- pub struct WrongCaseReport {
- pub id: String,
- pub question: String,
- pub expected: String,
- pub actual: String,
- pub raw_output: String,
- }
-
- #[derive(Debug, Clone, Serialize)]
- pub struct CorrectCaseReport {
- pub id: String,
- pub question: String,
- pub expected: String,
- pub raw_output: String,
- }
-
- #[derive(Debug, Clone, Serialize)]
- pub struct BenchmarkReport {
- pub benchmark: String,
- pub provider: String,
- pub model: String,
- pub params: BenchmarkParamsReport,
- pub dataset: DatasetReport,
- pub run: RunReport,
- pub summary: BenchmarkSummaryReport,
- pub errors: Vec<ErrorCount>,
- pub correct_samples: Vec<CorrectCaseReport>,
- pub wrong_cases: Vec<WrongCaseReport>,
- }
-
- #[derive(Debug, Clone, Serialize)]
- pub struct BenchmarkParamsReport {
- pub stream: bool,
- pub thinking: Option<ThinkingParamsReport>,
- }
-
- #[derive(Debug, Clone, Serialize)]
- pub struct RpmParamsReport {
- pub prompt: String,
- pub stream: bool,
- pub thinking: Option<ThinkingParamsReport>,
- pub duration: String,
- pub burst: Option<u32>,
- pub concurrency: usize,
- pub probe_seconds: Option<u64>,
- pub window_offset_ms: u64,
- }
-
- #[derive(Debug, Clone, PartialEq, Eq, Serialize)]
- pub struct ThinkingParamsReport {
- pub enabled: bool,
- #[serde(rename = "type")]
- pub kind: Option<String>,
- pub budget_tokens: Option<u32>,
- pub effort: Option<String>,
- pub display: Option<String>,
- pub reasoning_effort: Option<String>,
- pub reasoning_summary: Option<String>,
- }
-
- #[derive(Debug, Clone, Serialize)]
- pub struct RpmRunReport {
- pub started_at: DateTime<Utc>,
- pub duration_ms: u128,
- pub target_rpm: u32,
- pub actual_rpm: Option<f64>,
- pub temperature: f32,
- pub max_tokens: u32,
- }
-
- #[derive(Debug, Clone, Serialize)]
- pub struct RpmSummaryReport {
- pub actual_requests: u64,
- pub success: u64,
- pub failure: u64,
- pub latency_ms: LatencyReport,
- pub ttft_ms: LatencyReport,
- }
-
- #[derive(Debug, Clone, Serialize)]
- pub struct RpmReport {
- pub benchmark: String,
- pub mode: String,
- pub provider: String,
- pub model: String,
- pub params: RpmParamsReport,
- pub run: RpmRunReport,
- pub summary: RpmSummaryReport,
- pub mode_detail: Option<RpmModeDetailReport>,
- pub errors: Vec<ErrorCount>,
- }
-
- #[derive(Debug, Clone, Serialize)]
- pub struct RpmModeDetailReport {
- pub burst: Option<PhaseSummaryReport>,
- pub refill_probe: Vec<ProbeSecondReport>,
- pub sliding_probe: Vec<ProbeSecondReport>,
- pub window_boundary: Option<WindowBoundaryReport>,
- pub inference: Option<LimiterInferenceReport>,
- }
-
- #[derive(Debug, Clone, Serialize)]
- pub struct PhaseSummaryReport {
- pub sent: u64,
- pub success: u64,
- pub failure: u64,
- }
-
- #[derive(Debug, Clone, Serialize)]
- pub struct ProbeSecondReport {
- pub second: u64,
- pub sent: u64,
- pub success: u64,
- pub failure: u64,
- }
-
- #[derive(Debug, Clone, Serialize)]
- pub struct WindowBoundaryReport {
- pub before: PhaseSummaryReport,
- pub after: PhaseSummaryReport,
- }
-
- #[derive(Debug, Clone, Serialize)]
- pub struct LimiterInferenceReport {
- pub likely_limiter: LimiterInferenceKind,
- pub confidence: String,
- pub signals: Vec<String>,
- }
-
- #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize)]
- #[serde(rename_all = "snake_case")]
- pub enum LimiterInferenceKind {
- TokenBucket,
- FixedWindow,
- SlidingWindow,
- Unknown,
- }
-
- pub fn write_benchmark_report(root: &Path, report: &BenchmarkReport) -> Result<PathBuf> {
- write_report(
- root,
- &report.benchmark,
- &report.provider,
- &report.model,
- report.run.started_at,
- report,
- )
- }
-
- pub fn write_rpm_report(root: &Path, report: &RpmReport) -> Result<PathBuf> {
- write_report(
- root,
- &report.benchmark,
- &report.provider,
- &report.model,
- report.run.started_at,
- report,
- )
- }
-
- fn write_report<T: Serialize>(
- root: &Path,
- benchmark: &str,
- provider: &str,
- model: &str,
- started_at: DateTime<Utc>,
- report: &T,
- ) -> Result<PathBuf> {
- let reports_dir = root.join("reports");
- std::fs::create_dir_all(&reports_dir).with_context(|| {
- format!(
- "failed to create reports directory {}",
- reports_dir.display()
- )
- })?;
- let timestamp = started_at.format("%Y%m%dT%H%M%SZ");
- let filename = format!(
- "{}-{}-{}-{timestamp}.json",
- sanitize_filename_component(benchmark),
- sanitize_filename_component(provider),
- sanitize_filename_component(model),
- );
- let path = reports_dir.join(filename);
- let contents = serde_json::to_string_pretty(report).context("failed to serialize report")?;
- std::fs::write(&path, contents)
- .with_context(|| format!("failed to write report {}", path.display()))?;
- Ok(path)
- }
-
- pub fn sanitize_filename_component(value: &str) -> String {
- let sanitized = value
- .trim()
- .chars()
- .map(|ch| {
- if ch.is_ascii_alphanumeric() || ch == '.' || ch == '_' || ch == '-' {
- ch
- } else {
- '-'
- }
- })
- .collect::<String>();
- let collapsed = sanitized
- .split('-')
- .filter(|part| !part.is_empty())
- .collect::<Vec<_>>()
- .join("-");
- if collapsed.is_empty() {
- "unknown".to_string()
- } else {
- collapsed
- }
- }
-
- #[cfg(test)]
- mod tests {
- use super::*;
- use chrono::TimeZone;
-
- #[test]
- fn sanitizes_model_names_for_filenames() {
- assert_eq!(
- sanitize_filename_component("openai/gpt-4.1:mini"),
- "openai-gpt-4.1-mini"
- );
- assert_eq!(sanitize_filename_component(" weird name "), "weird-name");
- }
-
- #[test]
- fn writes_benchmark_report_under_reports_dir() {
- let temp_dir = tempfile::tempdir().expect("create temp dir");
- let report = BenchmarkReport {
- benchmark: "aime2026".to_string(),
- provider: "openai".to_string(),
- model: "gpt/test".to_string(),
- params: BenchmarkParamsReport {
- stream: false,
- thinking: None,
- },
- dataset: DatasetReport {
- source: "local".to_string(),
- split: "train".to_string(),
- revision: None,
- local_path: "data/benchmarks/aime2026/aime2026.jsonl".to_string(),
- },
- run: RunReport {
- started_at: Utc.with_ymd_and_hms(2026, 5, 6, 1, 2, 3).unwrap(),
- duration_ms: 123,
- concurrency: 2,
- limit: Some(1),
- temperature: 0.0,
- max_tokens: 1024,
- },
- summary: BenchmarkSummaryReport {
- accuracy: Some(1.0),
- success: 1,
- total: 1,
- correct: 1,
- wrong: 0,
- failed: 0,
- latency_ms: LatencyReport {
- p50: Some(10),
- p95: Some(10),
- p99: Some(10),
- },
- ttft_ms: LatencyReport {
- p50: None,
- p95: None,
- p99: None,
- },
- },
- errors: vec![],
- correct_samples: vec![],
- wrong_cases: vec![],
- };
-
- let path = write_benchmark_report(temp_dir.path(), &report).expect("write report");
-
- assert!(path.ends_with("reports/aime2026-openai-gpt-test-20260506T010203Z.json"));
- assert!(path.exists());
- }
-
- #[test]
- fn rpm_report_serializes_params() {
- let report = RpmReport {
- benchmark: "rpm".to_string(),
- provider: "openai".to_string(),
- model: "gpt/test".to_string(),
- params: RpmParamsReport {
- prompt: "Hi".to_string(),
- stream: false,
- thinking: None,
- duration: "60s".to_string(),
- burst: None,
- concurrency: 10,
- probe_seconds: None,
- window_offset_ms: 0,
- },
- run: RpmRunReport {
- started_at: Utc.with_ymd_and_hms(2026, 5, 6, 1, 2, 3).unwrap(),
- duration_ms: 1000,
- target_rpm: 60,
- actual_rpm: Some(60.0),
- temperature: 0.0,
- max_tokens: 1024,
- },
- summary: RpmSummaryReport {
- actual_requests: 1,
- success: 1,
- failure: 0,
- latency_ms: LatencyReport {
- p50: Some(10),
- p95: Some(10),
- p99: Some(10),
- },
- ttft_ms: LatencyReport {
- p50: None,
- p95: None,
- p99: None,
- },
- },
- mode: "sustained".to_string(),
- mode_detail: None,
- errors: vec![],
- };
-
- let json = serde_json::to_string(&report).expect("serialize report");
-
- assert!(json.contains("\"prompt\":\"Hi\""));
- assert!(json.contains("\"stream\":false"));
- assert!(json.contains("\"duration\":\"60s\""));
- }
-
- #[test]
- fn rpm_report_serializes_mode_details_and_inference() {
- let report = RpmReport {
- benchmark: "rpm".to_string(),
- mode: "token-bucket".to_string(),
- provider: "anthropic".to_string(),
- model: "claude/test".to_string(),
- params: RpmParamsReport {
- prompt: "Hi".to_string(),
- stream: true,
- thinking: Some(ThinkingParamsReport {
- enabled: true,
- kind: Some("enabled".to_string()),
- budget_tokens: Some(10000),
- effort: None,
- display: Some("omitted".to_string()),
- reasoning_effort: None,
- reasoning_summary: None,
- }),
- duration: "90s".to_string(),
- burst: Some(50),
- concurrency: 10,
- probe_seconds: Some(90),
- window_offset_ms: 200,
- },
- run: RpmRunReport {
- started_at: Utc.with_ymd_and_hms(2026, 5, 6, 1, 2, 3).unwrap(),
- duration_ms: 2000,
- target_rpm: 120,
- actual_rpm: Some(120.0),
- temperature: 0.0,
- max_tokens: 1024,
- },
- summary: RpmSummaryReport {
- actual_requests: 4,
- success: 4,
- failure: 0,
- latency_ms: LatencyReport {
- p50: Some(10),
- p95: Some(20),
- p99: Some(30),
- },
- ttft_ms: LatencyReport {
- p50: None,
- p95: None,
- p99: None,
- },
- },
- mode_detail: Some(RpmModeDetailReport {
- burst: Some(PhaseSummaryReport {
- sent: 2,
- success: 2,
- failure: 0,
- }),
- refill_probe: vec![ProbeSecondReport {
- second: 1,
- sent: 2,
- success: 2,
- failure: 0,
- }],
- sliding_probe: vec![],
- window_boundary: None,
- inference: Some(LimiterInferenceReport {
- likely_limiter: LimiterInferenceKind::TokenBucket,
- confidence: "medium".to_string(),
- signals: vec!["probe success approximated refill".to_string()],
- }),
- }),
- errors: vec![],
- };
-
- let json = serde_json::to_string(&report).expect("serialize report");
-
- assert!(json.contains("\"mode\":\"token-bucket\""));
- assert!(json.contains("\"refill_probe\""));
- assert!(json.contains("\"likely_limiter\":\"token_bucket\""));
- }
- }
|