|
|
|
@@ -187,7 +187,7 @@ async fn run_aime_benchmark( |
|
|
|
let base_request = request_template(provider_config, &model, 0.0, 1024); |
|
|
|
let protocol = provider_config.protocol; |
|
|
|
|
|
|
|
let results = stream::iter(cases.into_iter()) |
|
|
|
let results = stream::iter(cases) |
|
|
|
.map(|case| { |
|
|
|
let mut request = base_request.clone(); |
|
|
|
request.prompt = case.prompt(); |
|
|
|
@@ -208,7 +208,7 @@ async fn run_aime_benchmark( |
|
|
|
metrics.record_success(response.status, response.elapsed_ms as u64); |
|
|
|
let actual = judge::extract_final_integer(&response.text) |
|
|
|
.unwrap_or_else(|| "no_answer".to_string()); |
|
|
|
let correct = actual == case.answer.trim(); |
|
|
|
let correct = judge::judge_integer(&response.text, &case.answer); |
|
|
|
metrics.record_judgement(correct); |
|
|
|
if !correct { |
|
|
|
wrong_cases.push(WrongCaseReport { |
|
|
|
@@ -225,18 +225,18 @@ async fn run_aime_benchmark( |
|
|
|
} |
|
|
|
|
|
|
|
let summary = metrics.summary(); |
|
|
|
let report = benchmark_report( |
|
|
|
"aime2026", |
|
|
|
provider_name, |
|
|
|
let report = benchmark_report(BenchmarkReportInput { |
|
|
|
benchmark: "aime2026", |
|
|
|
provider: provider_name, |
|
|
|
model, |
|
|
|
dataset, |
|
|
|
started_at, |
|
|
|
started.elapsed().as_millis(), |
|
|
|
duration_ms: started.elapsed().as_millis(), |
|
|
|
concurrency, |
|
|
|
limit, |
|
|
|
summary, |
|
|
|
wrong_cases, |
|
|
|
); |
|
|
|
}); |
|
|
|
let report_path = write_benchmark_report(Path::new("."), &report)?; |
|
|
|
print_benchmark_report(&report, &report_path); |
|
|
|
Ok(()) |
|
|
|
@@ -268,7 +268,7 @@ async fn run_gpqa_benchmark( |
|
|
|
let base_request = request_template(provider_config, &model, 0.0, 1024); |
|
|
|
let protocol = provider_config.protocol; |
|
|
|
|
|
|
|
let results = stream::iter(cases.into_iter()) |
|
|
|
let results = stream::iter(cases) |
|
|
|
.map(|case| { |
|
|
|
let mut request = base_request.clone(); |
|
|
|
request.prompt = case.prompt(); |
|
|
|
@@ -291,7 +291,7 @@ async fn run_gpqa_benchmark( |
|
|
|
.map(|choice| choice.to_string()) |
|
|
|
.unwrap_or_else(|| "no_answer".to_string()); |
|
|
|
let expected = case.answer.to_string(); |
|
|
|
let correct = actual == expected; |
|
|
|
let correct = judge::judge_choice(&response.text, case.answer); |
|
|
|
metrics.record_judgement(correct); |
|
|
|
if !correct { |
|
|
|
wrong_cases.push(WrongCaseReport { |
|
|
|
@@ -308,18 +308,18 @@ async fn run_gpqa_benchmark( |
|
|
|
} |
|
|
|
|
|
|
|
let summary = metrics.summary(); |
|
|
|
let report = benchmark_report( |
|
|
|
"gpqa-diamond", |
|
|
|
provider_name, |
|
|
|
let report = benchmark_report(BenchmarkReportInput { |
|
|
|
benchmark: "gpqa-diamond", |
|
|
|
provider: provider_name, |
|
|
|
model, |
|
|
|
dataset, |
|
|
|
started_at, |
|
|
|
started.elapsed().as_millis(), |
|
|
|
duration_ms: started.elapsed().as_millis(), |
|
|
|
concurrency, |
|
|
|
limit, |
|
|
|
summary, |
|
|
|
wrong_cases, |
|
|
|
); |
|
|
|
}); |
|
|
|
let report_path = write_benchmark_report(Path::new("."), &report)?; |
|
|
|
print_benchmark_report(&report, &report_path); |
|
|
|
Ok(()) |
|
|
|
@@ -438,8 +438,8 @@ fn dataset_report(config: Option<(&str, &str)>, local_path: &Path) -> DatasetRep |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
fn benchmark_report( |
|
|
|
benchmark: &str, |
|
|
|
struct BenchmarkReportInput { |
|
|
|
benchmark: &'static str, |
|
|
|
provider: String, |
|
|
|
model: String, |
|
|
|
dataset: DatasetReport, |
|
|
|
@@ -449,31 +449,33 @@ fn benchmark_report( |
|
|
|
limit: Option<usize>, |
|
|
|
summary: MetricsSummary, |
|
|
|
wrong_cases: Vec<WrongCaseReport>, |
|
|
|
) -> BenchmarkReport { |
|
|
|
} |
|
|
|
|
|
|
|
fn benchmark_report(input: BenchmarkReportInput) -> BenchmarkReport { |
|
|
|
BenchmarkReport { |
|
|
|
benchmark: benchmark.to_string(), |
|
|
|
provider, |
|
|
|
model, |
|
|
|
dataset, |
|
|
|
benchmark: input.benchmark.to_string(), |
|
|
|
provider: input.provider, |
|
|
|
model: input.model, |
|
|
|
dataset: input.dataset, |
|
|
|
run: RunReport { |
|
|
|
started_at, |
|
|
|
duration_ms, |
|
|
|
concurrency, |
|
|
|
limit, |
|
|
|
started_at: input.started_at, |
|
|
|
duration_ms: input.duration_ms, |
|
|
|
concurrency: input.concurrency, |
|
|
|
limit: input.limit, |
|
|
|
temperature: 0.0, |
|
|
|
max_tokens: 1024, |
|
|
|
}, |
|
|
|
summary: BenchmarkSummaryReport { |
|
|
|
accuracy: summary.accuracy, |
|
|
|
success: summary.success, |
|
|
|
total: summary.total, |
|
|
|
correct: summary.correct, |
|
|
|
wrong: summary.wrong, |
|
|
|
failed: summary.failed, |
|
|
|
latency_ms: latency_report(&summary.latency_ms), |
|
|
|
accuracy: input.summary.accuracy, |
|
|
|
success: input.summary.success, |
|
|
|
total: input.summary.total, |
|
|
|
correct: input.summary.correct, |
|
|
|
wrong: input.summary.wrong, |
|
|
|
failed: input.summary.failed, |
|
|
|
latency_ms: latency_report(&input.summary.latency_ms), |
|
|
|
}, |
|
|
|
errors: summary.errors, |
|
|
|
wrong_cases, |
|
|
|
errors: input.summary.errors, |
|
|
|
wrong_cases: input.wrong_cases, |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|