diff --git a/src/benchmarks/mod.rs b/src/benchmarks/mod.rs index e6d2745..6e57f1c 100644 --- a/src/benchmarks/mod.rs +++ b/src/benchmarks/mod.rs @@ -31,10 +31,10 @@ pub async fn fetch_dataset(dataset: &str, data_dir: &Path) -> Result { let client = reqwest::Client::new(); let mut request = client.get(spec.source_url); - if spec.use_hf_token { - if let Ok(token) = std::env::var("HF_TOKEN") { - request = request.bearer_auth(token); - } + if spec.use_hf_token + && let Ok(token) = std::env::var("HF_TOKEN") + { + request = request.bearer_auth(token); } let response = request @@ -85,12 +85,14 @@ pub async fn fetch_dataset(dataset: &str, data_dir: &Path) -> Result { Ok(spec.file_path) } +type DatasetNormalizer = fn(&[u8]) -> Result>; + struct DatasetSpec<'a> { source_url: &'a str, dir: PathBuf, file_path: PathBuf, use_hf_token: bool, - normalize: Option Result>>, + normalize: Option, } fn dataset_spec<'a>(dataset: &str, data_dir: &'a Path) -> Result> { diff --git a/src/cli.rs b/src/cli.rs index 66e8436..fb119e6 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -187,7 +187,7 @@ async fn run_aime_benchmark( let base_request = request_template(provider_config, &model, 0.0, 1024); let protocol = provider_config.protocol; - let results = stream::iter(cases.into_iter()) + let results = stream::iter(cases) .map(|case| { let mut request = base_request.clone(); request.prompt = case.prompt(); @@ -208,7 +208,7 @@ async fn run_aime_benchmark( metrics.record_success(response.status, response.elapsed_ms as u64); let actual = judge::extract_final_integer(&response.text) .unwrap_or_else(|| "no_answer".to_string()); - let correct = actual == case.answer.trim(); + let correct = judge::judge_integer(&response.text, &case.answer); metrics.record_judgement(correct); if !correct { wrong_cases.push(WrongCaseReport { @@ -225,18 +225,18 @@ async fn run_aime_benchmark( } let summary = metrics.summary(); - let report = benchmark_report( - "aime2026", - provider_name, + let report = benchmark_report(BenchmarkReportInput { + benchmark: "aime2026", + provider: provider_name, model, dataset, started_at, - started.elapsed().as_millis(), + duration_ms: started.elapsed().as_millis(), concurrency, limit, summary, wrong_cases, - ); + }); let report_path = write_benchmark_report(Path::new("."), &report)?; print_benchmark_report(&report, &report_path); Ok(()) @@ -268,7 +268,7 @@ async fn run_gpqa_benchmark( let base_request = request_template(provider_config, &model, 0.0, 1024); let protocol = provider_config.protocol; - let results = stream::iter(cases.into_iter()) + let results = stream::iter(cases) .map(|case| { let mut request = base_request.clone(); request.prompt = case.prompt(); @@ -291,7 +291,7 @@ async fn run_gpqa_benchmark( .map(|choice| choice.to_string()) .unwrap_or_else(|| "no_answer".to_string()); let expected = case.answer.to_string(); - let correct = actual == expected; + let correct = judge::judge_choice(&response.text, case.answer); metrics.record_judgement(correct); if !correct { wrong_cases.push(WrongCaseReport { @@ -308,18 +308,18 @@ async fn run_gpqa_benchmark( } let summary = metrics.summary(); - let report = benchmark_report( - "gpqa-diamond", - provider_name, + let report = benchmark_report(BenchmarkReportInput { + benchmark: "gpqa-diamond", + provider: provider_name, model, dataset, started_at, - started.elapsed().as_millis(), + duration_ms: started.elapsed().as_millis(), concurrency, limit, summary, wrong_cases, - ); + }); let report_path = write_benchmark_report(Path::new("."), &report)?; print_benchmark_report(&report, &report_path); Ok(()) @@ -438,8 +438,8 @@ fn dataset_report(config: Option<(&str, &str)>, local_path: &Path) -> DatasetRep } } -fn benchmark_report( - benchmark: &str, +struct BenchmarkReportInput { + benchmark: &'static str, provider: String, model: String, dataset: DatasetReport, @@ -449,31 +449,33 @@ fn benchmark_report( limit: Option, summary: MetricsSummary, wrong_cases: Vec, -) -> BenchmarkReport { +} + +fn benchmark_report(input: BenchmarkReportInput) -> BenchmarkReport { BenchmarkReport { - benchmark: benchmark.to_string(), - provider, - model, - dataset, + benchmark: input.benchmark.to_string(), + provider: input.provider, + model: input.model, + dataset: input.dataset, run: RunReport { - started_at, - duration_ms, - concurrency, - limit, + started_at: input.started_at, + duration_ms: input.duration_ms, + concurrency: input.concurrency, + limit: input.limit, temperature: 0.0, max_tokens: 1024, }, summary: BenchmarkSummaryReport { - accuracy: summary.accuracy, - success: summary.success, - total: summary.total, - correct: summary.correct, - wrong: summary.wrong, - failed: summary.failed, - latency_ms: latency_report(&summary.latency_ms), + accuracy: input.summary.accuracy, + success: input.summary.success, + total: input.summary.total, + correct: input.summary.correct, + wrong: input.summary.wrong, + failed: input.summary.failed, + latency_ms: latency_report(&input.summary.latency_ms), }, - errors: summary.errors, - wrong_cases, + errors: input.summary.errors, + wrong_cases: input.wrong_cases, } }