Browse Source

feat: add streaming (SSE) support with TTFT metrics

Support streaming responses for both OpenAI and Anthropic protocols.
Adds --stream CLI flag, provider config stream field, shared SSE line
buffer, and TTFT (time to first token) percentile tracking alongside
existing TTLT latency metrics.
main
orangels 1 week ago
parent
commit
20217b2250
11 changed files with 461 additions and 70 deletions
  1. +16
    -0
      Cargo.lock
  2. +1
    -1
      Cargo.toml
  3. +2
    -0
      config.example.yaml
  4. +49
    -7
      src/cli.rs
  5. +40
    -0
      src/config.rs
  6. +53
    -3
      src/metrics.rs
  7. +124
    -28
      src/protocols/anthropic.rs
  8. +24
    -0
      src/protocols/mod.rs
  9. +121
    -28
      src/protocols/openai.rs
  10. +17
    -0
      src/report.rs
  11. +14
    -3
      src/runner.rs

+ 16
- 0
Cargo.lock View File

@@ -1252,6 +1252,7 @@ dependencies = [
"base64 0.22.1",
"bytes",
"futures-core",
"futures-util",
"http",
"http-body",
"http-body-util",
@@ -1271,12 +1272,14 @@ dependencies = [
"sync_wrapper",
"tokio",
"tokio-rustls",
"tokio-util",
"tower",
"tower-http",
"tower-service",
"url",
"wasm-bindgen",
"wasm-bindgen-futures",
"wasm-streams",
"web-sys",
"webpki-roots",
]
@@ -1948,6 +1951,19 @@ dependencies = [
"wasmparser",
]

[[package]]
name = "wasm-streams"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "15053d8d85c7eccdbefef60f06769760a563c7f0a9d6902a13d35c7800b0ad65"
dependencies = [
"futures-util",
"js-sys",
"wasm-bindgen",
"wasm-bindgen-futures",
"web-sys",
]

[[package]]
name = "wasmparser"
version = "0.244.0"


+ 1
- 1
Cargo.toml View File

@@ -11,7 +11,7 @@ futures = "0.3"
hdrhistogram = "7"
indicatif = "0.17"
regex = "1"
reqwest = { version = "0.12", default-features = false, features = ["json", "rustls-tls"] }
reqwest = { version = "0.12", default-features = false, features = ["json", "rustls-tls", "stream"] }
serde = { version = "1", features = ["derive"] }
serde_json = "1"
serde_yaml = "0.9"


+ 2
- 0
config.example.yaml View File

@@ -6,12 +6,14 @@ providers:
base_url: "https://relay.example.com/v1"
api_token: "${OPENAI_RELAY_TOKEN}"
default_model: "gpt-4o-mini"
stream: false

anthropic:
protocol: anthropic
base_url: "https://relay.example.com"
api_token: "${ANTHROPIC_RELAY_TOKEN}"
default_model: "claude-3-5-sonnet-latest"
stream: true

benchmarks:
data_dir: "data/benchmarks"


+ 49
- 7
src/cli.rs View File

@@ -45,6 +45,8 @@ pub enum Command {
model: Option<String>,
#[arg(long)]
prompt: String,
#[arg(long)]
stream: Option<bool>,
},
Dataset {
#[command(subcommand)]
@@ -77,6 +79,8 @@ pub enum Command {
concurrency: Option<usize>,
#[arg(long)]
prompt: String,
#[arg(long)]
stream: Option<bool>,
},
}

@@ -98,6 +102,8 @@ pub enum BenchCommand {
concurrency: usize,
#[arg(long)]
limit: Option<usize>,
#[arg(long)]
stream: Option<bool>,
},
GpqaDiamond {
#[arg(long, default_value = "config.yaml")]
@@ -110,6 +116,8 @@ pub enum BenchCommand {
concurrency: usize,
#[arg(long)]
limit: Option<usize>,
#[arg(long)]
stream: Option<bool>,
},
}

@@ -120,6 +128,7 @@ pub async fn dispatch(cli: Cli) -> Result<()> {
provider,
model,
prompt,
stream,
} => {
let config = AppConfig::load(&config)?;
let provider = config.resolved_provider(provider.as_deref())?;
@@ -130,11 +139,15 @@ pub async fn dispatch(cli: Cli) -> Result<()> {
prompt,
temperature: 0.0,
max_tokens: 1024,
stream: stream.unwrap_or(provider.stream),
};
let response = run_model_request(provider.protocol, request).await?;

println!("status: {}", response.status);
println!("elapsed_ms: {}", response.elapsed_ms);
if let Some(ttft) = response.first_token_ms {
println!("first_token_ms: {}", ttft);
}
println!("{}", response.text);

Ok(())
@@ -160,6 +173,7 @@ pub async fn dispatch(cli: Cli) -> Result<()> {
window_offset_ms,
concurrency,
prompt,
stream,
} => {
run_rpm(
config,
@@ -174,6 +188,7 @@ pub async fn dispatch(cli: Cli) -> Result<()> {
window_offset_ms,
concurrency,
prompt,
stream,
},
)
.await
@@ -189,14 +204,16 @@ async fn dispatch_bench(command: BenchCommand) -> Result<()> {
model,
concurrency,
limit,
} => run_aime_benchmark(config, provider, model, concurrency, limit).await,
stream,
} => run_aime_benchmark(config, provider, model, concurrency, limit, stream).await,
BenchCommand::GpqaDiamond {
config,
provider,
model,
concurrency,
limit,
} => run_gpqa_benchmark(config, provider, model, concurrency, limit).await,
stream,
} => run_gpqa_benchmark(config, provider, model, concurrency, limit, stream).await,
}
}

@@ -206,6 +223,7 @@ async fn run_aime_benchmark(
model: Option<String>,
concurrency: usize,
limit: Option<usize>,
stream: Option<bool>,
) -> Result<()> {
let config = AppConfig::load(&config_path)?;
let provider_name = provider_name(&config, provider.as_deref())?;
@@ -223,7 +241,8 @@ async fn run_aime_benchmark(
let cases = apply_limit(loaded.cases, limit);
let started_at = Utc::now();
let started = Instant::now();
let base_request = request_template(&provider_config, &model, 0.0, 1024);
let mut base_request = request_template(&provider_config, &model, 0.0, 1024);
base_request.stream = stream.unwrap_or(provider_config.stream);
let protocol = provider_config.protocol;

let results = stream::iter(cases)
@@ -244,7 +263,7 @@ async fn run_aime_benchmark(
for (case, result) in results {
match result {
Ok(response) => {
metrics.record_success(response.status, response.elapsed_ms as u64);
metrics.record_success(response.status, response.elapsed_ms as u64, response.first_token_ms.map(|ms| ms as u64));
let actual = judge::extract_final_integer(&response.text)
.unwrap_or_else(|| "no_answer".to_string());
let correct = judge::judge_integer(&response.text, &case.answer);
@@ -287,6 +306,7 @@ async fn run_gpqa_benchmark(
model: Option<String>,
concurrency: usize,
limit: Option<usize>,
stream: Option<bool>,
) -> Result<()> {
let config = AppConfig::load(&config_path)?;
let provider_name = provider_name(&config, provider.as_deref())?;
@@ -304,7 +324,8 @@ async fn run_gpqa_benchmark(
let cases = apply_limit(loaded.cases, limit);
let started_at = Utc::now();
let started = Instant::now();
let base_request = request_template(&provider_config, &model, 0.0, 1024);
let mut base_request = request_template(&provider_config, &model, 0.0, 1024);
base_request.stream = stream.unwrap_or(provider_config.stream);
let protocol = provider_config.protocol;

let results = stream::iter(cases)
@@ -325,7 +346,7 @@ async fn run_gpqa_benchmark(
for (case, result) in results {
match result {
Ok(response) => {
metrics.record_success(response.status, response.elapsed_ms as u64);
metrics.record_success(response.status, response.elapsed_ms as u64, response.first_token_ms.map(|ms| ms as u64));
let actual = judge::extract_choice(&response.text)
.map(|choice| choice.to_string())
.unwrap_or_else(|| "no_answer".to_string());
@@ -375,6 +396,7 @@ struct RpmCommandOptions {
window_offset_ms: u64,
concurrency: Option<usize>,
prompt: String,
stream: Option<bool>,
}

async fn run_rpm(config_path: PathBuf, options: RpmCommandOptions) -> Result<()> {
@@ -394,6 +416,7 @@ async fn run_rpm(config_path: PathBuf, options: RpmCommandOptions) -> Result<()>
.unwrap_or_else(|| provider_config.default_model.clone());
let request = ModelRequest {
prompt: options.prompt,
stream: options.stream.unwrap_or(provider_config.stream),
..request_template(&provider_config, &model, 0.0, 1024)
};
let started_at = Utc::now();
@@ -413,7 +436,7 @@ async fn run_rpm(config_path: PathBuf, options: RpmCommandOptions) -> Result<()>
let success = result.result.is_ok();
mode_summary.record(result.phase, result.second, success);
match result.result {
Ok(response) => metrics.record_success(response.status, response.elapsed_ms as u64),
Ok(response) => metrics.record_success(response.status, response.elapsed_ms as u64, response.first_token_ms.map(|ms| ms as u64)),
Err(error) => metrics.record_failure(error_code(&error)),
}
}
@@ -436,6 +459,7 @@ async fn run_rpm(config_path: PathBuf, options: RpmCommandOptions) -> Result<()>
success: summary.success,
failure: summary.failed,
latency_ms: latency_report(&summary.latency_ms),
ttft_ms: latency_report(&summary.ttft_ms),
},
mode: mode_plan.mode_name.to_string(),
mode_detail: mode_summary.into_report(options.mode),
@@ -798,6 +822,7 @@ fn request_template(
prompt: String::new(),
temperature,
max_tokens,
stream: provider.stream,
}
}

@@ -853,6 +878,7 @@ fn benchmark_report(input: BenchmarkReportInput) -> BenchmarkReport {
wrong: input.summary.wrong,
failed: input.summary.failed,
latency_ms: latency_report(&input.summary.latency_ms),
ttft_ms: latency_report(&input.summary.ttft_ms),
},
errors: input.summary.errors,
wrong_cases: input.wrong_cases,
@@ -887,6 +913,14 @@ fn print_benchmark_report(report: &BenchmarkReport, report_path: &Path) {
format_optional_latency(report.summary.latency_ms.p95),
format_optional_latency(report.summary.latency_ms.p99)
);
if report.summary.ttft_ms.p50.is_some() {
println!(
"ttft_ms: p50={} p95={} p99={}",
format_optional_latency(report.summary.ttft_ms.p50),
format_optional_latency(report.summary.ttft_ms.p95),
format_optional_latency(report.summary.ttft_ms.p99)
);
}
println!("errors:");
if report.errors.is_empty() {
println!(" none");
@@ -931,6 +965,14 @@ fn print_rpm_report(report: &RpmReport, report_path: &Path) {
format_optional_latency(report.summary.latency_ms.p95),
format_optional_latency(report.summary.latency_ms.p99)
);
if report.summary.ttft_ms.p50.is_some() {
println!(
"ttft_ms: p50={} p95={} p99={}",
format_optional_latency(report.summary.ttft_ms.p50),
format_optional_latency(report.summary.ttft_ms.p95),
format_optional_latency(report.summary.ttft_ms.p99)
);
}
println!("errors:");
if report.errors.is_empty() {
println!(" none");


+ 40
- 0
src/config.rs View File

@@ -40,6 +40,8 @@ pub struct ProviderConfig {
pub base_url: String,
pub api_token: String,
pub default_model: String,
#[serde(default)]
pub stream: bool,
}

impl fmt::Debug for ProviderConfig {
@@ -64,6 +66,7 @@ impl ProviderConfig {
base_url: expand_env_refs_with(&self.base_url, &env)?,
api_token: expand_env_refs_with(&self.api_token, &env)?,
default_model: expand_env_refs_with(&self.default_model, &env)?,
stream: self.stream,
})
}
}
@@ -331,6 +334,43 @@ providers:
assert_eq!(provider.api_token, "anthropic-secret");
}

#[test]
fn provider_stream_defaults_false_and_can_be_enabled() {
let config = AppConfig::load_from_str_with_env(
r#"
default_provider: openai
providers:
openai:
protocol: openai
base_url: https://api.openai.test/v1
api_token: literal-token
default_model: gpt-test
anthropic:
protocol: anthropic
base_url: https://api.anthropic.test
api_token: literal-token
default_model: claude-test
stream: true
"#,
|_| -> Result<String, ()> { unreachable!("config has no env refs") },
)
.expect("load config");

let openai = config
.resolved_provider_with_env(Some("openai"), |_| -> Result<String, ()> {
unreachable!("config has no env refs")
})
.expect("openai provider");
let anthropic = config
.resolved_provider_with_env(Some("anthropic"), |_| -> Result<String, ()> {
unreachable!("config has no env refs")
})
.expect("anthropic provider");

assert!(!openai.stream);
assert!(anthropic.stream);
}

#[test]
fn load_reads_yaml_from_path_without_env_refs() {
let temp_dir = tempfile::tempdir().expect("create temp dir");


+ 53
- 3
src/metrics.rs View File

@@ -10,6 +10,7 @@ pub struct Metrics {
total_judged: u64,
errors: BTreeMap<String, u64>,
latency: Histogram<u64>,
ttft: Histogram<u64>,
}

#[derive(Debug, Clone, PartialEq, Serialize)]
@@ -22,6 +23,7 @@ pub struct MetricsSummary {
pub total_judged: u64,
pub accuracy: Option<f64>,
pub latency_ms: LatencySummary,
pub ttft_ms: LatencySummary,
pub errors: Vec<ErrorCount>,
}

@@ -56,12 +58,16 @@ impl Metrics {
total_judged: 0,
errors: BTreeMap::new(),
latency: Histogram::new(3).expect("valid histogram precision"),
ttft: Histogram::new(3).expect("valid histogram precision"),
}
}

pub fn record_success(&mut self, _status: u16, latency_ms: u64) {
pub fn record_success(&mut self, _status: u16, latency_ms: u64, first_token_ms: Option<u64>) {
self.success += 1;
let _ = self.latency.record(latency_ms);
if let Some(ttft) = first_token_ms {
let _ = self.ttft.record(ttft);
}
}

pub fn record_failure(&mut self, code: impl Into<String>) {
@@ -97,6 +103,20 @@ impl Metrics {
}
};

let ttft_ms = if self.ttft.is_empty() {
LatencySummary {
p50: None,
p95: None,
p99: None,
}
} else {
LatencySummary {
p50: Some(self.ttft.value_at_quantile(0.50)),
p95: Some(self.ttft.value_at_quantile(0.95)),
p99: Some(self.ttft.value_at_quantile(0.99)),
}
};

MetricsSummary {
success: self.success,
failed: self.failed,
@@ -106,6 +126,7 @@ impl Metrics {
total_judged: self.total_judged,
accuracy,
latency_ms,
ttft_ms,
errors: self
.errors
.iter()
@@ -129,7 +150,7 @@ mod tests {
fn tracks_success_failure_and_error_counts() {
let mut metrics = Metrics::new();

metrics.record_success(200, 120);
metrics.record_success(200, 120, None);
metrics.record_failure("request_error");
metrics.record_failure("429");
metrics.record_failure("429");
@@ -169,7 +190,7 @@ mod tests {
let mut metrics = Metrics::new();

for latency in 1..=100 {
metrics.record_success(200, latency);
metrics.record_success(200, latency, None);
}

let summary = metrics.summary();
@@ -178,4 +199,33 @@ mod tests {
assert_eq!(summary.latency_ms.p95, Some(95));
assert_eq!(summary.latency_ms.p99, Some(99));
}

#[test]
fn computes_ttft_percentiles_when_present() {
let mut metrics = Metrics::new();

for i in 1..=100 {
metrics.record_success(200, i * 2, Some(i));
}

let summary = metrics.summary();

assert_eq!(summary.ttft_ms.p50, Some(50));
assert_eq!(summary.ttft_ms.p95, Some(95));
assert_eq!(summary.ttft_ms.p99, Some(99));
}

#[test]
fn ttft_is_none_when_no_streaming_requests() {
let mut metrics = Metrics::new();

metrics.record_success(200, 100, None);
metrics.record_success(200, 200, None);

let summary = metrics.summary();

assert_eq!(summary.ttft_ms.p50, None);
assert_eq!(summary.ttft_ms.p95, None);
assert_eq!(summary.ttft_ms.p99, None);
}
}

+ 124
- 28
src/protocols/anthropic.rs View File

@@ -1,8 +1,10 @@
use crate::runner::{ModelRequest, ModelResponse};
use anyhow::{Context, Result, bail};
use futures::StreamExt;
use reqwest::Client;
use serde::Deserialize;
use serde_json::json;
use std::time::Instant;

pub async fn send(client: &Client, request: &ModelRequest) -> Result<ModelResponse> {
let url = super::endpoint_url(&request.base_url, "/v1/messages")?;
@@ -49,6 +51,86 @@ pub async fn send(client: &Client, request: &ModelRequest) -> Result<ModelRespon
text,
status: status_code,
elapsed_ms: 0,
first_token_ms: None,
})
}

pub async fn send_stream(client: &Client, request: &ModelRequest) -> Result<ModelResponse> {
let url = super::endpoint_url(&request.base_url, "/v1/messages")?;
let response = client
.post(url)
.header("x-api-key", &request.api_token)
.header("anthropic-version", "2023-06-01")
.json(&json!({
"model": request.model,
"messages": [{"role": "user", "content": request.prompt}],
"temperature": request.temperature,
"max_tokens": request.max_tokens,
"stream": true
}))
.send()
.await
.context("failed to send Anthropic streaming request")?;

let status = response.status();
let status_code = status.as_u16();

if !status.is_success() {
let body = response
.text()
.await
.context("failed to read Anthropic error response body")?;
bail!(
"{}",
super::upstream_error_message("Anthropic", status_code, &body)
);
}

let started = Instant::now();
let mut stream = response.bytes_stream();
let mut buffer = super::SseLineBuffer::new();
let mut text = String::new();
let mut first_token_ms: Option<u128> = None;
let mut current_event = String::new();

while let Some(chunk) = stream.next().await {
let chunk = chunk.context("Anthropic stream interrupted")?;
for line in buffer.feed(&chunk) {
if let Some(event_type) = line.strip_prefix("event: ") {
current_event = event_type.to_string();
continue;
}
if let Some(data) = line.strip_prefix("data: ") {
if current_event == "content_block_delta" {
if let Some(content) = serde_json::from_str::<AnthropicStreamDelta>(data)
.ok()
.and_then(|p| p.delta.text)
.filter(|c| !c.is_empty())
{
if first_token_ms.is_none() {
first_token_ms = Some(started.elapsed().as_millis());
}
text.push_str(&content);
}
} else if current_event == "message_stop" {
break;
}
}
if line.is_empty() {
current_event.clear();
}
}
}

if text.is_empty() {
bail!("Anthropic stream completed without producing any content");
}

Ok(ModelResponse {
text,
status: status_code,
elapsed_ms: 0,
first_token_ms,
})
}

@@ -66,6 +148,16 @@ enum AnthropicContentBlock {
Other,
}

#[derive(Debug, Deserialize)]
struct AnthropicStreamDelta {
delta: AnthropicDeltaContent,
}

#[derive(Debug, Deserialize)]
struct AnthropicDeltaContent {
text: Option<String>,
}

#[cfg(test)]
mod tests {
use crate::runner::ModelRequest;
@@ -95,13 +187,14 @@ mod tests {
.await;

let request = ModelRequest {
base_url: server.uri(),
api_token: "test-token".to_string(),
model: "claude-test".to_string(),
prompt: "hello".to_string(),
temperature: 0.0,
max_tokens: 1024,
};
base_url: server.uri(),
api_token: "test-token".to_string(),
model: "claude-test".to_string(),
prompt: "hello".to_string(),
temperature: 0.0,
max_tokens: 1024,
stream: false
};

let response = super::send(&Client::new(), &request)
.await
@@ -125,13 +218,14 @@ mod tests {
.await;

let request = ModelRequest {
base_url: server.uri(),
api_token: "test-token".to_string(),
model: "claude-test".to_string(),
prompt: "hello".to_string(),
temperature: 0.0,
max_tokens: 1024,
};
base_url: server.uri(),
api_token: "test-token".to_string(),
model: "claude-test".to_string(),
prompt: "hello".to_string(),
temperature: 0.0,
max_tokens: 1024,
stream: false
};

let error = super::send(&Client::new(), &request)
.await
@@ -155,13 +249,14 @@ mod tests {
.await;

let request = ModelRequest {
base_url: server.uri(),
api_token: "test-token".to_string(),
model: "claude-test".to_string(),
prompt: "hello".to_string(),
temperature: 0.0,
max_tokens: 1024,
};
base_url: server.uri(),
api_token: "test-token".to_string(),
model: "claude-test".to_string(),
prompt: "hello".to_string(),
temperature: 0.0,
max_tokens: 1024,
stream: false
};

let message = super::send(&Client::new(), &request)
.await
@@ -189,13 +284,14 @@ mod tests {
.await;

let request = ModelRequest {
base_url: format!("{}/v1", server.uri()),
api_token: "test-token".to_string(),
model: "claude-test".to_string(),
prompt: "hello".to_string(),
temperature: 0.0,
max_tokens: 1024,
};
base_url: format!("{}/v1", server.uri()),
api_token: "test-token".to_string(),
model: "claude-test".to_string(),
prompt: "hello".to_string(),
temperature: 0.0,
max_tokens: 1024,
stream: false
};

let response = super::send(&Client::new(), &request)
.await


+ 24
- 0
src/protocols/mod.rs View File

@@ -96,3 +96,27 @@ struct ErrorEnvelope {
struct ErrorBody {
message: Option<String>,
}

pub(crate) struct SseLineBuffer {
buffer: String,
}

impl SseLineBuffer {
pub fn new() -> Self {
Self {
buffer: String::new(),
}
}

pub fn feed(&mut self, chunk: &[u8]) -> Vec<String> {
let text = String::from_utf8_lossy(chunk);
self.buffer.push_str(&text);
let mut lines = Vec::new();
while let Some(pos) = self.buffer.find('\n') {
let line = self.buffer[..pos].trim_end_matches('\r').to_string();
self.buffer.drain(..=pos);
lines.push(line);
}
lines
}
}

+ 121
- 28
src/protocols/openai.rs View File

@@ -1,8 +1,10 @@
use crate::runner::{ModelRequest, ModelResponse};
use anyhow::{Context, Result, bail};
use futures::StreamExt;
use reqwest::Client;
use serde::Deserialize;
use serde_json::json;
use std::time::Instant;

pub async fn send(client: &Client, request: &ModelRequest) -> Result<ModelResponse> {
let url = super::endpoint_url(&request.base_url, "/chat/completions")?;
@@ -47,6 +49,78 @@ pub async fn send(client: &Client, request: &ModelRequest) -> Result<ModelRespon
text,
status: status_code,
elapsed_ms: 0,
first_token_ms: None,
})
}

pub async fn send_stream(client: &Client, request: &ModelRequest) -> Result<ModelResponse> {
let url = super::endpoint_url(&request.base_url, "/chat/completions")?;
let response = client
.post(url)
.bearer_auth(&request.api_token)
.json(&json!({
"model": request.model,
"messages": [{"role": "user", "content": request.prompt}],
"temperature": request.temperature,
"max_tokens": request.max_tokens,
"stream": true
}))
.send()
.await
.context("failed to send OpenAI streaming request")?;

let status = response.status();
let status_code = status.as_u16();

if !status.is_success() {
let body = response
.text()
.await
.context("failed to read OpenAI error response body")?;
bail!(
"{}",
super::upstream_error_message("OpenAI", status_code, &body)
);
}

let started = Instant::now();
let mut stream = response.bytes_stream();
let mut buffer = super::SseLineBuffer::new();
let mut text = String::new();
let mut first_token_ms: Option<u128> = None;

while let Some(chunk) = stream.next().await {
let chunk = chunk.context("OpenAI stream interrupted")?;
for line in buffer.feed(&chunk) {
let Some(data) = line.strip_prefix("data: ") else {
continue;
};
if data == "[DONE]" {
break;
}
if let Some(content) = serde_json::from_str::<OpenAiStreamChunk>(data)
.ok()
.and_then(|p| p.choices.into_iter().next())
.and_then(|d| d.delta.content)
.filter(|c| !c.is_empty())
{
if first_token_ms.is_none() {
first_token_ms = Some(started.elapsed().as_millis());
}
text.push_str(&content);
}
}
}

if text.is_empty() {
bail!("OpenAI stream completed without producing any content");
}

Ok(ModelResponse {
text,
status: status_code,
elapsed_ms: 0,
first_token_ms,
})
}

@@ -65,6 +139,21 @@ struct OpenAiMessage {
content: Option<String>,
}

#[derive(Debug, Deserialize)]
struct OpenAiStreamChunk {
choices: Vec<OpenAiStreamChoice>,
}

#[derive(Debug, Deserialize)]
struct OpenAiStreamChoice {
delta: OpenAiStreamDelta,
}

#[derive(Debug, Deserialize)]
struct OpenAiStreamDelta {
content: Option<String>,
}

#[cfg(test)]
mod tests {
use crate::runner::ModelRequest;
@@ -95,13 +184,14 @@ mod tests {
.await;

let request = ModelRequest {
base_url: server.uri(),
api_token: "test-token".to_string(),
model: "gpt-test".to_string(),
prompt: "hello".to_string(),
temperature: 0.0,
max_tokens: 1024,
};
base_url: server.uri(),
api_token: "test-token".to_string(),
model: "gpt-test".to_string(),
prompt: "hello".to_string(),
temperature: 0.0,
max_tokens: 1024,
stream: false
};

let response = super::send(&Client::new(), &request)
.await
@@ -125,13 +215,14 @@ mod tests {
.await;

let request = ModelRequest {
base_url: format!("{}/", server.uri()),
api_token: "test-token".to_string(),
model: "gpt-test".to_string(),
prompt: "hello".to_string(),
temperature: 0.0,
max_tokens: 1024,
};
base_url: format!("{}/", server.uri()),
api_token: "test-token".to_string(),
model: "gpt-test".to_string(),
prompt: "hello".to_string(),
temperature: 0.0,
max_tokens: 1024,
stream: false
};

let error = super::send(&Client::new(), &request)
.await
@@ -154,13 +245,14 @@ mod tests {
.await;

let request = ModelRequest {
base_url: server.uri(),
api_token: "test-token".to_string(),
model: "gpt-test".to_string(),
prompt: "hello".to_string(),
temperature: 0.0,
max_tokens: 1024,
};
base_url: server.uri(),
api_token: "test-token".to_string(),
model: "gpt-test".to_string(),
prompt: "hello".to_string(),
temperature: 0.0,
max_tokens: 1024,
stream: false
};

let message = super::send(&Client::new(), &request)
.await
@@ -190,13 +282,14 @@ mod tests {
.await;

let request = ModelRequest {
base_url: format!("{}/v1", server.uri()),
api_token: "test-token".to_string(),
model: "gpt-test".to_string(),
prompt: "hello".to_string(),
temperature: 0.0,
max_tokens: 1024,
};
base_url: format!("{}/v1", server.uri()),
api_token: "test-token".to_string(),
model: "gpt-test".to_string(),
prompt: "hello".to_string(),
temperature: 0.0,
max_tokens: 1024,
stream: false
};

let response = super::send(&Client::new(), &request)
.await


+ 17
- 0
src/report.rs View File

@@ -38,6 +38,7 @@ pub struct BenchmarkSummaryReport {
pub wrong: u64,
pub failed: u64,
pub latency_ms: LatencyReport,
pub ttft_ms: LatencyReport,
}

#[derive(Debug, Clone, Serialize)]
@@ -77,6 +78,7 @@ pub struct RpmSummaryReport {
pub success: u64,
pub failure: u64,
pub latency_ms: LatencyReport,
pub ttft_ms: LatencyReport,
}

#[derive(Debug, Clone, Serialize)]
@@ -259,6 +261,11 @@ mod tests {
p95: Some(10),
p99: Some(10),
},
ttft_ms: LatencyReport {
p50: None,
p95: None,
p99: None,
},
},
errors: vec![],
wrong_cases: vec![],
@@ -293,6 +300,11 @@ mod tests {
p95: Some(10),
p99: Some(10),
},
ttft_ms: LatencyReport {
p50: None,
p95: None,
p99: None,
},
},
mode: "sustained".to_string(),
mode_detail: None,
@@ -329,6 +341,11 @@ mod tests {
p95: Some(20),
p99: Some(30),
},
ttft_ms: LatencyReport {
p50: None,
p95: None,
p99: None,
},
},
mode_detail: Some(RpmModeDetailReport {
burst: Some(PhaseSummaryReport {


+ 14
- 3
src/runner.rs View File

@@ -13,6 +13,7 @@ pub struct ModelRequest {
pub prompt: String,
pub temperature: f32,
pub max_tokens: u32,
pub stream: bool,
}

impl fmt::Debug for ModelRequest {
@@ -25,6 +26,7 @@ impl fmt::Debug for ModelRequest {
.field("prompt", &self.prompt)
.field("temperature", &self.temperature)
.field("max_tokens", &self.max_tokens)
.field("stream", &self.stream)
.finish()
}
}
@@ -34,6 +36,7 @@ pub struct ModelResponse {
pub text: String,
pub status: u16,
pub elapsed_ms: u128,
pub first_token_ms: Option<u128>,
}

pub async fn run_model_request(
@@ -50,9 +53,16 @@ pub async fn run_model_request_with_client(
request: &ModelRequest,
) -> Result<ModelResponse> {
let started = Instant::now();
let mut response = match protocol {
ProtocolKind::Openai => protocols::openai::send(client, request).await?,
ProtocolKind::Anthropic => protocols::anthropic::send(client, request).await?,
let mut response = if request.stream {
match protocol {
ProtocolKind::Openai => protocols::openai::send_stream(client, request).await?,
ProtocolKind::Anthropic => protocols::anthropic::send_stream(client, request).await?,
}
} else {
match protocol {
ProtocolKind::Openai => protocols::openai::send(client, request).await?,
ProtocolKind::Anthropic => protocols::anthropic::send(client, request).await?,
}
};
response.elapsed_ms = started.elapsed().as_millis();
Ok(response)
@@ -71,6 +81,7 @@ mod tests {
prompt: "hello".to_string(),
temperature: 0.0,
max_tokens: 1024,
stream: false,
};

let debug = format!("{request:?}");


Loading…
Cancel
Save