Ver a proveniência

feat: add provider thinking parameters

main
orangels há 1 semana
ascendente
cometimento
18cc7ff716
10 ficheiros alterados com 786 adições e 35 eliminações
  1. +11
    -0
      README.md
  2. +9
    -0
      config.example.yaml
  3. +26
    -0
      docs/USAGE.zh-CN.md
  4. +26
    -0
      docs/testing-guide.md
  5. +342
    -4
      src/cli.rs
  6. +96
    -0
      src/config.rs
  7. +140
    -15
      src/protocols/anthropic.rs
  8. +94
    -15
      src/protocols/openai.rs
  9. +28
    -1
      src/report.rs
  10. +14
    -0
      src/runner.rs

+ 11
- 0
README.md Ver ficheiro

@@ -30,12 +30,21 @@ providers:
base_url: "https://relay.example.com/v1"
api_token: "${OPENAI_RELAY_TOKEN}"
default_model: "gpt-4o-mini"
thinking:
enabled: false
reasoning_effort: "medium"
reasoning_summary: "auto"

anthropic:
protocol: anthropic
base_url: "https://relay.example.com"
api_token: "${ANTHROPIC_RELAY_TOKEN}"
default_model: "claude-3-5-sonnet-latest"
thinking:
enabled: false
type: "enabled"
budget_tokens: 10000
display: "omitted"

benchmarks:
data_dir: "data/benchmarks"
@@ -54,6 +63,8 @@ export OPENAI_RELAY_TOKEN="..."
export ANTHROPIC_RELAY_TOKEN="..."
```

Thinking can also be enabled per run with CLI overrides such as `--thinking true`, `--thinking-type enabled`, `--thinking-budget-tokens 10000`, `--thinking-display omitted`, `--reasoning-effort high`, and `--reasoning-summary auto`. For Anthropic, enabling thinking omits `temperature` from the upstream request.

## Dataset Fetching

Fetch AIME 2026 into the configured benchmark data directory:


+ 9
- 0
config.example.yaml Ver ficheiro

@@ -7,6 +7,10 @@ providers:
api_token: "${OPENAI_RELAY_TOKEN}"
default_model: "gpt-4o-mini"
stream: false
thinking:
enabled: false
reasoning_effort: "medium"
reasoning_summary: "auto"

anthropic:
protocol: anthropic
@@ -14,6 +18,11 @@ providers:
api_token: "${ANTHROPIC_RELAY_TOKEN}"
default_model: "claude-3-5-sonnet-latest"
stream: true
thinking:
enabled: false
type: "enabled"
budget_tokens: 10000
display: "omitted"

benchmarks:
data_dir: "data/benchmarks"


+ 26
- 0
docs/USAGE.zh-CN.md Ver ficheiro

@@ -53,12 +53,21 @@ providers:
base_url: "https://relay.example.com/v1"
api_token: "${OPENAI_RELAY_TOKEN}"
default_model: "gpt-4o-mini"
thinking:
enabled: false
reasoning_effort: "medium"
reasoning_summary: "auto"

anthropic:
protocol: anthropic
base_url: "https://relay.example.com"
api_token: "${ANTHROPIC_RELAY_TOKEN}"
default_model: "claude-3-5-sonnet-latest"
thinking:
enabled: false
type: "enabled"
budget_tokens: 10000
display: "omitted"

benchmarks:
data_dir: "data/benchmarks"
@@ -79,6 +88,23 @@ export ANTHROPIC_RELAY_TOKEN="..."

注意:工具只会解析当前使用的 provider token。比如只运行 `--provider anthropic` 时,不需要设置 `OPENAI_RELAY_TOKEN`。

thinking 参数可以写在 provider 配置里,也可以用 CLI 覆盖:

```bash
cargo run -- check --provider anthropic --prompt "hello" \
--thinking true \
--thinking-type enabled \
--thinking-budget-tokens 10000 \
--thinking-display omitted

cargo run -- check --provider openai --prompt "hello" \
--thinking true \
--reasoning-effort high \
--reasoning-summary auto
```

Anthropic 开启 thinking 时,请求体不会再发送 `temperature`。

## 3. 协议连通性测试

OpenAI-compatible:


+ 26
- 0
docs/testing-guide.md Ver ficheiro

@@ -195,6 +195,32 @@ cargo run -- bench aime2026 --provider anthropic --stream

如果 provider config 中已设置 `stream: true`,则默认使用流式,无需额外传参。CLI `--stream` 参数优先级高于 config。

## Thinking 参数

如果需要测试 reasoning / thinking 能力,可以在配置中打开,也可以通过 CLI 覆盖:

```bash
# Anthropic thinking
cargo run -- bench aime2026 \
--provider anthropic \
--model anthropic/claude-opus-4.7 \
--limit 5 \
--thinking true \
--thinking-type enabled \
--thinking-budget-tokens 10000 \
--thinking-display omitted

# OpenAI-compatible reasoning
cargo run -- bench aime2026 \
--provider openai \
--limit 5 \
--thinking true \
--reasoning-effort high \
--reasoning-summary auto
```

Anthropic 开启 thinking 后,请求体不会发送 `temperature`。benchmark 和 RPM 的 JSON report 会记录本次 thinking 参数,方便复现实验。

## 原始响应 Debug 模式

排查中转站是否改写响应时,可以加 `--debug-raw`:


+ 342
- 4
src/cli.rs Ver ficheiro

@@ -1,19 +1,19 @@
use crate::benchmarks;
use crate::benchmarks::judge;
use crate::config::AppConfig;
use crate::config::{AppConfig, ProviderThinkingConfig};
use crate::metrics::{LatencySummary, Metrics, MetricsSummary};
use crate::report::{
BenchmarkParamsReport, BenchmarkReport, BenchmarkSummaryReport, CorrectCaseReport,
DatasetReport, LatencyReport, LimiterInferenceKind, LimiterInferenceReport, PhaseSummaryReport,
ProbeSecondReport, RpmModeDetailReport, RpmParamsReport, RpmReport, RpmRunReport,
RpmSummaryReport, RunReport, WindowBoundaryReport, WrongCaseReport, write_benchmark_report,
write_rpm_report,
RpmSummaryReport, RunReport, ThinkingParamsReport, WindowBoundaryReport, WrongCaseReport,
write_benchmark_report, write_rpm_report,
};
use crate::rpm_modes::{
ProbePhase, RpmMode, ScheduledProbe, burst_schedule, sliding_window_schedule,
sustained_schedule, token_bucket_schedule, window_boundary_plan,
};
use crate::runner::{ModelRequest, RawDebugConfig, run_model_request};
use crate::runner::{ModelRequest, RawDebugConfig, ThinkingConfig, run_model_request};
use anyhow::{Context, Result, bail};
use chrono::Utc;
use clap::{Parser, Subcommand};
@@ -51,6 +51,20 @@ pub enum Command {
stream: Option<bool>,
#[arg(long)]
debug_raw: bool,
#[arg(long, num_args = 0..=1, default_missing_value = "true")]
thinking: Option<bool>,
#[arg(long)]
thinking_type: Option<String>,
#[arg(long)]
thinking_budget_tokens: Option<u32>,
#[arg(long)]
thinking_effort: Option<String>,
#[arg(long)]
thinking_display: Option<String>,
#[arg(long)]
reasoning_effort: Option<String>,
#[arg(long)]
reasoning_summary: Option<String>,
},
Dataset {
#[command(subcommand)]
@@ -87,6 +101,20 @@ pub enum Command {
stream: Option<bool>,
#[arg(long)]
debug_raw: bool,
#[arg(long, num_args = 0..=1, default_missing_value = "true")]
thinking: Option<bool>,
#[arg(long)]
thinking_type: Option<String>,
#[arg(long)]
thinking_budget_tokens: Option<u32>,
#[arg(long)]
thinking_effort: Option<String>,
#[arg(long)]
thinking_display: Option<String>,
#[arg(long)]
reasoning_effort: Option<String>,
#[arg(long)]
reasoning_summary: Option<String>,
},
}

@@ -114,6 +142,20 @@ pub enum BenchCommand {
max_tokens: u32,
#[arg(long)]
debug_raw: bool,
#[arg(long, num_args = 0..=1, default_missing_value = "true")]
thinking: Option<bool>,
#[arg(long)]
thinking_type: Option<String>,
#[arg(long)]
thinking_budget_tokens: Option<u32>,
#[arg(long)]
thinking_effort: Option<String>,
#[arg(long)]
thinking_display: Option<String>,
#[arg(long)]
reasoning_effort: Option<String>,
#[arg(long)]
reasoning_summary: Option<String>,
},
GpqaDiamond {
#[arg(long, default_value = "config.yaml")]
@@ -132,6 +174,20 @@ pub enum BenchCommand {
max_tokens: u32,
#[arg(long)]
debug_raw: bool,
#[arg(long, num_args = 0..=1, default_missing_value = "true")]
thinking: Option<bool>,
#[arg(long)]
thinking_type: Option<String>,
#[arg(long)]
thinking_budget_tokens: Option<u32>,
#[arg(long)]
thinking_effort: Option<String>,
#[arg(long)]
thinking_display: Option<String>,
#[arg(long)]
reasoning_effort: Option<String>,
#[arg(long)]
reasoning_summary: Option<String>,
},
}

@@ -144,6 +200,13 @@ pub async fn dispatch(cli: Cli) -> Result<()> {
prompt,
stream,
debug_raw,
thinking,
thinking_type,
thinking_budget_tokens,
thinking_effort,
thinking_display,
reasoning_effort,
reasoning_summary,
} => {
let config = AppConfig::load(&config)?;
let provider_name = provider_name(&config, provider.as_deref())?;
@@ -158,6 +221,18 @@ pub async fn dispatch(cli: Cli) -> Result<()> {
max_tokens: 1024,
stream: stream.unwrap_or(provider.stream),
raw_debug: raw_debug_config(debug_raw, &provider_name, &model),
thinking: merged_thinking_config(
provider.thinking.as_ref(),
ThinkingOverrides {
thinking,
thinking_type,
thinking_budget_tokens,
thinking_effort,
thinking_display,
reasoning_effort,
reasoning_summary,
},
),
};
let response = run_model_request(provider.protocol, request).await?;

@@ -193,6 +268,13 @@ pub async fn dispatch(cli: Cli) -> Result<()> {
prompt,
stream,
debug_raw,
thinking,
thinking_type,
thinking_budget_tokens,
thinking_effort,
thinking_display,
reasoning_effort,
reasoning_summary,
} => {
run_rpm(
config,
@@ -209,6 +291,13 @@ pub async fn dispatch(cli: Cli) -> Result<()> {
prompt,
stream,
debug_raw,
thinking,
thinking_type,
thinking_budget_tokens,
thinking_effort,
thinking_display,
reasoning_effort,
reasoning_summary,
},
)
.await
@@ -227,6 +316,13 @@ async fn dispatch_bench(command: BenchCommand) -> Result<()> {
stream,
max_tokens,
debug_raw,
thinking,
thinking_type,
thinking_budget_tokens,
thinking_effort,
thinking_display,
reasoning_effort,
reasoning_summary,
} => {
run_aime_benchmark(
config,
@@ -237,6 +333,15 @@ async fn dispatch_bench(command: BenchCommand) -> Result<()> {
stream,
max_tokens,
debug_raw,
ThinkingOverrides {
thinking,
thinking_type,
thinking_budget_tokens,
thinking_effort,
thinking_display,
reasoning_effort,
reasoning_summary,
},
)
.await
}
@@ -249,6 +354,13 @@ async fn dispatch_bench(command: BenchCommand) -> Result<()> {
stream,
max_tokens,
debug_raw,
thinking,
thinking_type,
thinking_budget_tokens,
thinking_effort,
thinking_display,
reasoning_effort,
reasoning_summary,
} => {
run_gpqa_benchmark(
config,
@@ -259,6 +371,15 @@ async fn dispatch_bench(command: BenchCommand) -> Result<()> {
stream,
max_tokens,
debug_raw,
ThinkingOverrides {
thinking,
thinking_type,
thinking_budget_tokens,
thinking_effort,
thinking_display,
reasoning_effort,
reasoning_summary,
},
)
.await
}
@@ -274,6 +395,7 @@ async fn run_aime_benchmark(
stream: Option<bool>,
max_tokens: u32,
debug_raw: bool,
thinking_overrides: ThinkingOverrides,
) -> Result<()> {
let config = AppConfig::load(&config_path)?;
let provider_name = provider_name(&config, provider.as_deref())?;
@@ -295,6 +417,8 @@ async fn run_aime_benchmark(
let mut base_request = request_template(&provider_config, &model, 0.0, max_tokens);
base_request.stream = stream.unwrap_or(provider_config.stream);
base_request.raw_debug = raw_debug_config(debug_raw, &provider_name, &model);
base_request.thinking =
merged_thinking_config(provider_config.thinking.as_ref(), thinking_overrides);
let protocol = provider_config.protocol;

let pb = ProgressBar::new(total);
@@ -361,6 +485,7 @@ async fn run_aime_benchmark(
provider: provider_name,
model,
stream: base_request.stream,
thinking: thinking_report(base_request.thinking.as_ref()),
dataset,
started_at,
duration_ms: started.elapsed().as_millis(),
@@ -385,6 +510,7 @@ async fn run_gpqa_benchmark(
stream: Option<bool>,
max_tokens: u32,
debug_raw: bool,
thinking_overrides: ThinkingOverrides,
) -> Result<()> {
let config = AppConfig::load(&config_path)?;
let provider_name = provider_name(&config, provider.as_deref())?;
@@ -406,6 +532,8 @@ async fn run_gpqa_benchmark(
let mut base_request = request_template(&provider_config, &model, 0.0, max_tokens);
base_request.stream = stream.unwrap_or(provider_config.stream);
base_request.raw_debug = raw_debug_config(debug_raw, &provider_name, &model);
base_request.thinking =
merged_thinking_config(provider_config.thinking.as_ref(), thinking_overrides);
let protocol = provider_config.protocol;

let pb = ProgressBar::new(total);
@@ -474,6 +602,7 @@ async fn run_gpqa_benchmark(
provider: provider_name,
model,
stream: base_request.stream,
thinking: thinking_report(base_request.thinking.as_ref()),
dataset,
started_at,
duration_ms: started.elapsed().as_millis(),
@@ -502,6 +631,13 @@ struct RpmCommandOptions {
prompt: String,
stream: Option<bool>,
debug_raw: bool,
thinking: Option<bool>,
thinking_type: Option<String>,
thinking_budget_tokens: Option<u32>,
thinking_effort: Option<String>,
thinking_display: Option<String>,
reasoning_effort: Option<String>,
reasoning_summary: Option<String>,
}

async fn run_rpm(config_path: PathBuf, options: RpmCommandOptions) -> Result<()> {
@@ -525,8 +661,21 @@ async fn run_rpm(config_path: PathBuf, options: RpmCommandOptions) -> Result<()>
prompt: options.prompt.clone(),
stream: stream_enabled,
raw_debug: raw_debug_config(options.debug_raw, &provider_name, &model),
thinking: merged_thinking_config(
provider_config.thinking.as_ref(),
ThinkingOverrides {
thinking: options.thinking,
thinking_type: options.thinking_type,
thinking_budget_tokens: options.thinking_budget_tokens,
thinking_effort: options.thinking_effort,
thinking_display: options.thinking_display,
reasoning_effort: options.reasoning_effort,
reasoning_summary: options.reasoning_summary,
},
),
..request_template(&provider_config, &model, 0.0, 1024)
};
let thinking = thinking_report(request.thinking.as_ref());
let started_at = Utc::now();
let started = Instant::now();
let mut metrics = Metrics::new();
@@ -561,6 +710,7 @@ async fn run_rpm(config_path: PathBuf, options: RpmCommandOptions) -> Result<()>
params: RpmParamsReport {
prompt: options.prompt,
stream: stream_enabled,
thinking,
duration: options.duration,
burst: options.burst,
concurrency,
@@ -954,6 +1104,84 @@ fn raw_debug_config(enabled: bool, provider: &str, model: &str) -> Option<RawDeb
})
}

#[derive(Debug, Default)]
struct ThinkingOverrides {
thinking: Option<bool>,
thinking_type: Option<String>,
thinking_budget_tokens: Option<u32>,
thinking_effort: Option<String>,
thinking_display: Option<String>,
reasoning_effort: Option<String>,
reasoning_summary: Option<String>,
}

fn merged_thinking_config(
provider: Option<&ProviderThinkingConfig>,
overrides: ThinkingOverrides,
) -> Option<ThinkingConfig> {
let has_overrides = overrides.thinking.is_some()
|| overrides.thinking_type.is_some()
|| overrides.thinking_budget_tokens.is_some()
|| overrides.thinking_effort.is_some()
|| overrides.thinking_display.is_some()
|| overrides.reasoning_effort.is_some()
|| overrides.reasoning_summary.is_some();
if provider.is_none() && !has_overrides {
return None;
}

let enabled = overrides
.thinking
.or_else(|| provider.and_then(|thinking| thinking.enabled))
.unwrap_or(true);
if !enabled {
return Some(ThinkingConfig {
enabled: false,
kind: overrides
.thinking_type
.or_else(|| provider.and_then(|thinking| thinking.kind.clone())),
budget_tokens: overrides
.thinking_budget_tokens
.or_else(|| provider.and_then(|thinking| thinking.budget_tokens)),
effort: overrides
.thinking_effort
.or_else(|| provider.and_then(|thinking| thinking.effort.clone())),
display: overrides
.thinking_display
.or_else(|| provider.and_then(|thinking| thinking.display.clone())),
reasoning_effort: overrides
.reasoning_effort
.or_else(|| provider.and_then(|thinking| thinking.reasoning_effort.clone())),
reasoning_summary: overrides
.reasoning_summary
.or_else(|| provider.and_then(|thinking| thinking.reasoning_summary.clone())),
});
}

Some(ThinkingConfig {
enabled,
kind: overrides
.thinking_type
.or_else(|| provider.and_then(|thinking| thinking.kind.clone()))
.or_else(|| Some("enabled".to_string())),
budget_tokens: overrides
.thinking_budget_tokens
.or_else(|| provider.and_then(|thinking| thinking.budget_tokens)),
effort: overrides
.thinking_effort
.or_else(|| provider.and_then(|thinking| thinking.effort.clone())),
display: overrides
.thinking_display
.or_else(|| provider.and_then(|thinking| thinking.display.clone())),
reasoning_effort: overrides
.reasoning_effort
.or_else(|| provider.and_then(|thinking| thinking.reasoning_effort.clone())),
reasoning_summary: overrides
.reasoning_summary
.or_else(|| provider.and_then(|thinking| thinking.reasoning_summary.clone())),
})
}

fn request_template(
provider: &crate::config::ProviderConfig,
model: &str,
@@ -969,6 +1197,7 @@ fn request_template(
max_tokens,
stream: provider.stream,
raw_debug: None,
thinking: merged_thinking_config(provider.thinking.as_ref(), ThinkingOverrides::default()),
}
}

@@ -989,11 +1218,24 @@ fn dataset_report(config: Option<(&str, &str)>, local_path: &Path) -> DatasetRep
}
}

fn thinking_report(thinking: Option<&ThinkingConfig>) -> Option<ThinkingParamsReport> {
thinking.map(|thinking| ThinkingParamsReport {
enabled: thinking.enabled,
kind: thinking.kind.clone(),
budget_tokens: thinking.budget_tokens,
effort: thinking.effort.clone(),
display: thinking.display.clone(),
reasoning_effort: thinking.reasoning_effort.clone(),
reasoning_summary: thinking.reasoning_summary.clone(),
})
}

struct BenchmarkReportInput {
benchmark: &'static str,
provider: String,
model: String,
stream: bool,
thinking: Option<ThinkingParamsReport>,
dataset: DatasetReport,
started_at: chrono::DateTime<Utc>,
duration_ms: u128,
@@ -1012,6 +1254,7 @@ fn benchmark_report(input: BenchmarkReportInput) -> BenchmarkReport {
model: input.model,
params: BenchmarkParamsReport {
stream: input.stream,
thinking: input.thinking,
},
dataset: input.dataset,
run: RunReport {
@@ -1351,6 +1594,101 @@ mod tests {
assert!(debug_raw);
}

#[test]
fn check_command_parses_thinking_overrides() {
let cli = Cli::try_parse_from([
"lq_token_test",
"check",
"--provider",
"anthropic",
"--prompt",
"hello",
"--thinking",
"true",
"--thinking-type",
"adaptive",
"--thinking-budget-tokens",
"10000",
"--thinking-effort",
"high",
"--thinking-display",
"omitted",
"--reasoning-effort",
"high",
"--reasoning-summary",
"auto",
])
.expect("parse thinking options");

let Command::Check {
thinking,
thinking_type,
thinking_budget_tokens,
thinking_effort,
thinking_display,
reasoning_effort,
reasoning_summary,
..
} = cli.command
else {
panic!("expected check command");
};

assert_eq!(thinking, Some(true));
assert_eq!(thinking_type.as_deref(), Some("adaptive"));
assert_eq!(thinking_budget_tokens, Some(10000));
assert_eq!(thinking_effort.as_deref(), Some("high"));
assert_eq!(thinking_display.as_deref(), Some("omitted"));
assert_eq!(reasoning_effort.as_deref(), Some("high"));
assert_eq!(reasoning_summary.as_deref(), Some("auto"));
}

#[test]
fn check_command_parses_bare_thinking_as_true() {
let cli = Cli::try_parse_from([
"lq_token_test",
"check",
"--provider",
"anthropic",
"--prompt",
"hello",
"--thinking",
])
.expect("parse bare thinking option");

let Command::Check { thinking, .. } = cli.command else {
panic!("expected check command");
};

assert_eq!(thinking, Some(true));
}

#[test]
fn disabled_thinking_report_keeps_cli_budget_override() {
let provider = ProviderThinkingConfig {
enabled: Some(true),
kind: Some("enabled".to_string()),
budget_tokens: Some(10000),
effort: None,
display: None,
reasoning_effort: None,
reasoning_summary: None,
};

let merged = merged_thinking_config(
Some(&provider),
ThinkingOverrides {
thinking: Some(false),
thinking_budget_tokens: Some(20000),
..ThinkingOverrides::default()
},
)
.expect("disabled thinking config");

assert!(!merged.enabled);
assert_eq!(merged.budget_tokens, Some(20000));
}

#[test]
fn rpm_command_parses_window_boundary_offset() {
let cli = Cli::try_parse_from([


+ 96
- 0
src/config.rs Ver ficheiro

@@ -42,6 +42,26 @@ pub struct ProviderConfig {
pub default_model: String,
#[serde(default)]
pub stream: bool,
#[serde(default)]
pub thinking: Option<ProviderThinkingConfig>,
}

#[derive(Debug, Clone, Deserialize, PartialEq, Eq)]
pub struct ProviderThinkingConfig {
#[serde(default)]
pub enabled: Option<bool>,
#[serde(default, rename = "type")]
pub kind: Option<String>,
#[serde(default)]
pub budget_tokens: Option<u32>,
#[serde(default)]
pub effort: Option<String>,
#[serde(default)]
pub display: Option<String>,
#[serde(default)]
pub reasoning_effort: Option<String>,
#[serde(default)]
pub reasoning_summary: Option<String>,
}

impl fmt::Debug for ProviderConfig {
@@ -52,6 +72,8 @@ impl fmt::Debug for ProviderConfig {
.field("base_url", &self.base_url)
.field("api_token", &"<redacted>")
.field("default_model", &self.default_model)
.field("stream", &self.stream)
.field("thinking", &self.thinking)
.finish()
}
}
@@ -67,6 +89,7 @@ impl ProviderConfig {
api_token: expand_env_refs_with(&self.api_token, &env)?,
default_model: expand_env_refs_with(&self.default_model, &env)?,
stream: self.stream,
thinking: self.thinking.clone(),
})
}
}
@@ -371,6 +394,79 @@ providers:
assert!(anthropic.stream);
}

#[test]
fn provider_loads_thinking_config() {
let config = AppConfig::load_from_str_with_env(
r#"
default_provider: anthropic
providers:
openai:
protocol: openai
base_url: https://api.openai.test/v1
api_token: literal-token
default_model: gpt-test
thinking:
enabled: true
reasoning_effort: high
reasoning_summary: auto
anthropic:
protocol: anthropic
base_url: https://api.anthropic.test
api_token: literal-token
default_model: claude-test
thinking:
enabled: true
type: enabled
budget_tokens: 10000
display: omitted
"#,
|_| -> Result<String, ()> { unreachable!("config has no env refs") },
)
.expect("load config");

let openai = config
.resolved_provider_with_env(Some("openai"), |_| -> Result<String, ()> {
unreachable!("config has no env refs")
})
.expect("openai provider");
let anthropic = config
.resolved_provider_with_env(Some("anthropic"), |_| -> Result<String, ()> {
unreachable!("config has no env refs")
})
.expect("anthropic provider");

assert_eq!(
openai.thinking.as_ref().expect("openai thinking").enabled,
Some(true)
);
assert_eq!(
openai
.thinking
.as_ref()
.expect("openai thinking")
.reasoning_effort
.as_deref(),
Some("high")
);
assert_eq!(
anthropic
.thinking
.as_ref()
.expect("anthropic thinking")
.budget_tokens,
Some(10000)
);
assert_eq!(
anthropic
.thinking
.as_ref()
.expect("anthropic thinking")
.display
.as_deref(),
Some("omitted")
);
}

#[test]
fn load_reads_yaml_from_path_without_env_refs() {
let temp_dir = tempfile::tempdir().expect("create temp dir");


+ 140
- 15
src/protocols/anthropic.rs Ver ficheiro

@@ -3,7 +3,7 @@ use anyhow::{Context, Result, bail};
use futures::StreamExt;
use reqwest::Client;
use serde::Deserialize;
use serde_json::json;
use serde_json::{Value, json};
use std::time::Instant;

pub async fn send(client: &Client, request: &ModelRequest) -> Result<ModelResponse> {
@@ -12,12 +12,7 @@ pub async fn send(client: &Client, request: &ModelRequest) -> Result<ModelRespon
.post(url)
.header("x-api-key", &request.api_token)
.header("anthropic-version", "2023-06-01")
.json(&json!({
"model": request.model,
"messages": [{"role": "user", "content": request.prompt}],
"temperature": request.temperature,
"max_tokens": request.max_tokens
}))
.json(&request_body(request, false))
.send()
.await
.context("failed to send Anthropic messages request")?;
@@ -68,13 +63,7 @@ pub async fn send_stream(client: &Client, request: &ModelRequest) -> Result<Mode
.post(url)
.header("x-api-key", &request.api_token)
.header("anthropic-version", "2023-06-01")
.json(&json!({
"model": request.model,
"messages": [{"role": "user", "content": request.prompt}],
"temperature": request.temperature,
"max_tokens": request.max_tokens,
"stream": true
}))
.json(&request_body(request, true))
.send()
.await
.context("failed to send Anthropic streaming request")?;
@@ -185,9 +174,44 @@ struct AnthropicDeltaContent {
text: Option<String>,
}

fn request_body(request: &ModelRequest, stream: bool) -> Value {
let mut body = json!({
"model": request.model,
"messages": [{"role": "user", "content": request.prompt}],
"temperature": request.temperature,
"max_tokens": request.max_tokens
});
let object = body
.as_object_mut()
.expect("Anthropic request body is an object");
if stream {
object.insert("stream".to_string(), json!(true));
}
if let Some(thinking) = &request.thinking
&& thinking.enabled
{
object.remove("temperature");
let mut thinking_body = serde_json::Map::new();
let thinking_type = thinking.kind.as_deref().unwrap_or("enabled");
thinking_body.insert("type".to_string(), json!(thinking_type));
if thinking_type == "adaptive" {
if let Some(effort) = &thinking.effort {
thinking_body.insert("effort".to_string(), json!(effort));
}
} else if let Some(budget_tokens) = thinking.budget_tokens {
thinking_body.insert("budget_tokens".to_string(), json!(budget_tokens));
}
if let Some(display) = &thinking.display {
thinking_body.insert("display".to_string(), json!(display));
}
object.insert("thinking".to_string(), Value::Object(thinking_body));
}
body
}

#[cfg(test)]
mod tests {
use crate::runner::{ModelRequest, RawDebugConfig};
use crate::runner::{ModelRequest, RawDebugConfig, ThinkingConfig};
use reqwest::Client;
use wiremock::matchers::{body_json, header, method, path};
use wiremock::{Mock, MockServer, ResponseTemplate};
@@ -222,6 +246,7 @@ mod tests {
max_tokens: 1024,
stream: false,
raw_debug: None,
thinking: None,
};

let response = super::send(&Client::new(), &request)
@@ -232,6 +257,88 @@ mod tests {
assert_eq!(response.text, "hi from claude");
}

#[tokio::test]
async fn sends_thinking_and_omits_temperature_when_enabled() {
let server = MockServer::start().await;
Mock::given(method("POST"))
.and(path("/v1/messages"))
.and(body_json(serde_json::json!({
"model": "claude-test",
"messages": [{"role": "user", "content": "hello"}],
"max_tokens": 1024,
"thinking": {
"type": "enabled",
"budget_tokens": 10000,
"display": "omitted"
}
})))
.respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
"content": [
{"type": "text", "text": "hi from claude"}
]
})))
.mount(&server)
.await;

let mut request = anthropic_request(server.uri());
request.thinking = Some(ThinkingConfig {
enabled: true,
kind: Some("enabled".to_string()),
budget_tokens: Some(10000),
effort: None,
display: Some("omitted".to_string()),
reasoning_effort: None,
reasoning_summary: None,
});

let response = super::send(&Client::new(), &request)
.await
.expect("response");

assert_eq!(response.text, "hi from claude");
}

#[tokio::test]
async fn sends_adaptive_thinking_effort_without_budget_tokens() {
let server = MockServer::start().await;
Mock::given(method("POST"))
.and(path("/v1/messages"))
.and(body_json(serde_json::json!({
"model": "claude-test",
"messages": [{"role": "user", "content": "hello"}],
"max_tokens": 1024,
"thinking": {
"type": "adaptive",
"effort": "high",
"display": "omitted"
}
})))
.respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
"content": [
{"type": "text", "text": "hi from claude"}
]
})))
.mount(&server)
.await;

let mut request = anthropic_request(server.uri());
request.thinking = Some(ThinkingConfig {
enabled: true,
kind: Some("adaptive".to_string()),
budget_tokens: Some(10000),
effort: Some("high".to_string()),
display: Some("omitted".to_string()),
reasoning_effort: None,
reasoning_summary: None,
});

let response = super::send(&Client::new(), &request)
.await
.expect("response");

assert_eq!(response.text, "hi from claude");
}

#[tokio::test]
async fn errors_on_non_success_status() {
let server = MockServer::start().await;
@@ -254,6 +361,7 @@ mod tests {
max_tokens: 1024,
stream: false,
raw_debug: None,
thinking: None,
};

let error = super::send(&Client::new(), &request)
@@ -286,6 +394,7 @@ mod tests {
max_tokens: 1024,
stream: false,
raw_debug: None,
thinking: None,
};

let message = super::send(&Client::new(), &request)
@@ -322,6 +431,7 @@ mod tests {
max_tokens: 1024,
stream: false,
raw_debug: None,
thinking: None,
};

let response = super::send(&Client::new(), &request)
@@ -373,6 +483,7 @@ mod tests {
temp_dir.path().to_path_buf(),
"anthropic-claude-test".to_string(),
)),
thinking: None,
};

let response = super::send_stream(&Client::new(), &request)
@@ -392,4 +503,18 @@ mod tests {
assert!(raw.contains("event: content_block_delta"));
assert!(raw.contains("\"text\":\"hi \""));
}

fn anthropic_request(base_url: String) -> ModelRequest {
ModelRequest {
base_url,
api_token: "test-token".to_string(),
model: "claude-test".to_string(),
prompt: "hello".to_string(),
temperature: 0.0,
max_tokens: 1024,
stream: false,
raw_debug: None,
thinking: None,
}
}
}

+ 94
- 15
src/protocols/openai.rs Ver ficheiro

@@ -3,7 +3,7 @@ use anyhow::{Context, Result, bail};
use futures::StreamExt;
use reqwest::Client;
use serde::Deserialize;
use serde_json::json;
use serde_json::{Value, json};
use std::time::Instant;

pub async fn send(client: &Client, request: &ModelRequest) -> Result<ModelResponse> {
@@ -11,12 +11,7 @@ pub async fn send(client: &Client, request: &ModelRequest) -> Result<ModelRespon
let response = client
.post(url)
.bearer_auth(&request.api_token)
.json(&json!({
"model": request.model,
"messages": [{"role": "user", "content": request.prompt}],
"temperature": request.temperature,
"max_tokens": request.max_tokens
}))
.json(&request_body(request, false))
.send()
.await
.context("failed to send OpenAI chat completion request")?;
@@ -65,13 +60,7 @@ pub async fn send_stream(client: &Client, request: &ModelRequest) -> Result<Mode
let response = client
.post(url)
.bearer_auth(&request.api_token)
.json(&json!({
"model": request.model,
"messages": [{"role": "user", "content": request.prompt}],
"temperature": request.temperature,
"max_tokens": request.max_tokens,
"stream": true
}))
.json(&request_body(request, true))
.send()
.await
.context("failed to send OpenAI streaming request")?;
@@ -181,9 +170,39 @@ struct OpenAiStreamDelta {
content: Option<String>,
}

fn request_body(request: &ModelRequest, stream: bool) -> Value {
let mut body = json!({
"model": request.model,
"messages": [{"role": "user", "content": request.prompt}],
"temperature": request.temperature,
"max_tokens": request.max_tokens
});
let object = body
.as_object_mut()
.expect("OpenAI request body is an object");
if stream {
object.insert("stream".to_string(), json!(true));
}
if let Some(thinking) = &request.thinking
&& thinking.enabled
{
if let Some(effort) = thinking
.reasoning_effort
.as_ref()
.or(thinking.effort.as_ref())
{
object.insert("reasoning_effort".to_string(), json!(effort));
}
if let Some(summary) = &thinking.reasoning_summary {
object.insert("reasoning_summary".to_string(), json!(summary));
}
}
body
}

#[cfg(test)]
mod tests {
use crate::runner::{ModelRequest, RawDebugConfig};
use crate::runner::{ModelRequest, RawDebugConfig, ThinkingConfig};
use reqwest::Client;
use wiremock::matchers::{body_json, header, method, path};
use wiremock::{Mock, MockServer, ResponseTemplate};
@@ -219,6 +238,7 @@ mod tests {
max_tokens: 1024,
stream: false,
raw_debug: None,
thinking: None,
};

let response = super::send(&Client::new(), &request)
@@ -229,6 +249,47 @@ mod tests {
assert_eq!(response.text, "hi back");
}

#[tokio::test]
async fn sends_reasoning_effort_when_thinking_is_enabled() {
let server = MockServer::start().await;
Mock::given(method("POST"))
.and(path("/chat/completions"))
.and(body_json(serde_json::json!({
"model": "gpt-test",
"messages": [{"role": "user", "content": "hello"}],
"temperature": 0.0,
"max_tokens": 1024,
"reasoning_effort": "high",
"reasoning_summary": "auto"
})))
.respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
"choices": [{
"message": {
"content": "hi back"
}
}]
})))
.mount(&server)
.await;

let mut request = openai_request(server.uri());
request.thinking = Some(ThinkingConfig {
enabled: true,
kind: None,
budget_tokens: None,
effort: Some("high".to_string()),
display: None,
reasoning_effort: Some("high".to_string()),
reasoning_summary: Some("auto".to_string()),
});

let response = super::send(&Client::new(), &request)
.await
.expect("response");

assert_eq!(response.text, "hi back");
}

#[tokio::test]
async fn errors_when_chat_completion_content_is_missing() {
let server = MockServer::start().await;
@@ -251,6 +312,7 @@ mod tests {
max_tokens: 1024,
stream: false,
raw_debug: None,
thinking: None,
};

let error = super::send(&Client::new(), &request)
@@ -282,6 +344,7 @@ mod tests {
max_tokens: 1024,
stream: false,
raw_debug: None,
thinking: None,
};

let message = super::send(&Client::new(), &request)
@@ -320,6 +383,7 @@ mod tests {
max_tokens: 1024,
stream: false,
raw_debug: None,
thinking: None,
};

let response = super::send(&Client::new(), &request)
@@ -367,6 +431,7 @@ mod tests {
temp_dir.path().to_path_buf(),
"openai-gpt-test".to_string(),
)),
thinking: None,
};

let response = super::send_stream(&Client::new(), &request)
@@ -386,4 +451,18 @@ mod tests {
assert!(raw.contains("data: {\"choices\""));
assert!(raw.contains("data: [DONE]"));
}

fn openai_request(base_url: String) -> ModelRequest {
ModelRequest {
base_url,
api_token: "test-token".to_string(),
model: "gpt-test".to_string(),
prompt: "hello".to_string(),
temperature: 0.0,
max_tokens: 1024,
stream: false,
raw_debug: None,
thinking: None,
}
}
}

+ 28
- 1
src/report.rs Ver ficheiro

@@ -75,12 +75,14 @@ pub struct BenchmarkReport {
#[derive(Debug, Clone, Serialize)]
pub struct BenchmarkParamsReport {
pub stream: bool,
pub thinking: Option<ThinkingParamsReport>,
}

#[derive(Debug, Clone, Serialize)]
pub struct RpmParamsReport {
pub prompt: String,
pub stream: bool,
pub thinking: Option<ThinkingParamsReport>,
pub duration: String,
pub burst: Option<u32>,
pub concurrency: usize,
@@ -88,6 +90,18 @@ pub struct RpmParamsReport {
pub window_offset_ms: u64,
}

#[derive(Debug, Clone, PartialEq, Eq, Serialize)]
pub struct ThinkingParamsReport {
pub enabled: bool,
#[serde(rename = "type")]
pub kind: Option<String>,
pub budget_tokens: Option<u32>,
pub effort: Option<String>,
pub display: Option<String>,
pub reasoning_effort: Option<String>,
pub reasoning_summary: Option<String>,
}

#[derive(Debug, Clone, Serialize)]
pub struct RpmRunReport {
pub started_at: DateTime<Utc>,
@@ -262,7 +276,10 @@ mod tests {
benchmark: "aime2026".to_string(),
provider: "openai".to_string(),
model: "gpt/test".to_string(),
params: BenchmarkParamsReport { stream: false },
params: BenchmarkParamsReport {
stream: false,
thinking: None,
},
dataset: DatasetReport {
source: "local".to_string(),
split: "train".to_string(),
@@ -315,6 +332,7 @@ mod tests {
params: RpmParamsReport {
prompt: "Hi".to_string(),
stream: false,
thinking: None,
duration: "60s".to_string(),
burst: None,
concurrency: 10,
@@ -366,6 +384,15 @@ mod tests {
params: RpmParamsReport {
prompt: "Hi".to_string(),
stream: true,
thinking: Some(ThinkingParamsReport {
enabled: true,
kind: Some("enabled".to_string()),
budget_tokens: Some(10000),
effort: None,
display: Some("omitted".to_string()),
reasoning_effort: None,
reasoning_summary: None,
}),
duration: "90s".to_string(),
burst: Some(50),
concurrency: 10,


+ 14
- 0
src/runner.rs Ver ficheiro

@@ -19,6 +19,18 @@ pub struct ModelRequest {
pub max_tokens: u32,
pub stream: bool,
pub raw_debug: Option<RawDebugConfig>,
pub thinking: Option<ThinkingConfig>,
}

#[derive(Debug, Clone, PartialEq, Eq)]
pub struct ThinkingConfig {
pub enabled: bool,
pub kind: Option<String>,
pub budget_tokens: Option<u32>,
pub effort: Option<String>,
pub display: Option<String>,
pub reasoning_effort: Option<String>,
pub reasoning_summary: Option<String>,
}

#[derive(Clone)]
@@ -74,6 +86,7 @@ impl fmt::Debug for ModelRequest {
.field("max_tokens", &self.max_tokens)
.field("stream", &self.stream)
.field("raw_debug", &self.raw_debug.is_some())
.field("thinking", &self.thinking)
.finish()
}
}
@@ -153,6 +166,7 @@ mod tests {
max_tokens: 1024,
stream: false,
raw_debug: None,
thinking: None,
};

let debug = format!("{request:?}");


Carregando…
Cancelar
Guardar