Browse Source

feat: record stream chunk timings in reports

codex/lq-token-test-init
orangels 3 days ago
parent
commit
c346233072
6 changed files with 29 additions and 0 deletions
  1. +7
    -0
      src/cli.rs
  2. +5
    -0
      src/protocols/anthropic.rs
  3. +5
    -0
      src/protocols/google.rs
  4. +5
    -0
      src/protocols/openai.rs
  5. +6
    -0
      src/report.rs
  6. +1
    -0
      src/runner.rs

+ 7
- 0
src/cli.rs View File

@@ -1377,6 +1377,8 @@ fn response_debug_report(
latency_ms: response.elapsed_ms,
ttft_ms: response.first_token_ms,
ttft_latency_delta_ms,
chunk_count: response.chunk_elapsed_ms.len(),
chunk_elapsed_ms: response.chunk_elapsed_ms.clone(),
has_think_tags,
})
}
@@ -2013,6 +2015,7 @@ mod tests {
status: 200,
elapsed_ms: 1_500,
first_token_ms: Some(750),
chunk_elapsed_ms: vec![100, 750],
};

let debug = response_debug_report(
@@ -2031,6 +2034,8 @@ mod tests {
assert_eq!(debug.latency_ms, 1_500);
assert_eq!(debug.ttft_ms, Some(750));
assert_eq!(debug.ttft_latency_delta_ms, Some(750));
assert_eq!(debug.chunk_count, 2);
assert_eq!(debug.chunk_elapsed_ms, vec![100, 750]);
assert!(!debug.has_think_tags);
}

@@ -2041,6 +2046,7 @@ mod tests {
status: 200,
elapsed_ms: 5_000,
first_token_ms: Some(100),
chunk_elapsed_ms: vec![100],
};

let debug = response_debug_report(None, None, None, "prompt".to_string(), &response)
@@ -2058,6 +2064,7 @@ mod tests {
status: 200,
elapsed_ms: 2_000,
first_token_ms: None,
chunk_elapsed_ms: Vec::new(),
};

let debug = response_debug_report(


+ 5
- 0
src/protocols/anthropic.rs View File

@@ -114,6 +114,7 @@ pub async fn send(client: &Client, request: &ModelRequest) -> Result<ModelRespon
status: status_code,
elapsed_ms: 0,
first_token_ms: None,
chunk_elapsed_ms: Vec::new(),
})
}

@@ -187,6 +188,7 @@ pub async fn send_stream(client: &Client, request: &ModelRequest) -> Result<Mode
let mut text = String::new();
let mut raw_stream = String::new();
let mut first_token_ms: Option<u128> = None;
let mut chunk_elapsed_ms = Vec::new();
let mut current_event = String::new();
let mut done = false;

@@ -207,6 +209,7 @@ pub async fn send_stream(client: &Client, request: &ModelRequest) -> Result<Mode
return Err(error).context("Anthropic stream interrupted");
}
};
chunk_elapsed_ms.push(started.elapsed().as_millis());
for line in buffer.feed(&chunk) {
raw_stream.push_str(&line);
raw_stream.push('\n');
@@ -268,6 +271,7 @@ pub async fn send_stream(client: &Client, request: &ModelRequest) -> Result<Mode
status: status_code,
elapsed_ms: 0,
first_token_ms,
chunk_elapsed_ms,
})
}

@@ -828,6 +832,7 @@ mod tests {
assert_eq!(response.status, 200);
assert_eq!(response.text, "hi there");
assert!(response.first_token_ms.is_some());
assert_eq!(response.chunk_elapsed_ms.len(), 1);

let raw = read_single_debug_file(temp_dir.path());
assert!(raw.contains("event: content_block_delta"));


+ 5
- 0
src/protocols/google.rs View File

@@ -110,6 +110,7 @@ pub async fn send(client: &Client, request: &ModelRequest) -> Result<ModelRespon
status: status_code,
elapsed_ms: 0,
first_token_ms: None,
chunk_elapsed_ms: Vec::new(),
})
}

@@ -182,6 +183,7 @@ pub async fn send_stream(client: &Client, request: &ModelRequest) -> Result<Mode
let mut text = String::new();
let mut raw_stream = String::new();
let mut first_token_ms: Option<u128> = None;
let mut chunk_elapsed_ms = Vec::new();

while let Some(chunk) = stream.next().await {
let chunk = match chunk {
@@ -200,6 +202,7 @@ pub async fn send_stream(client: &Client, request: &ModelRequest) -> Result<Mode
return Err(error).context("Google stream interrupted");
}
};
chunk_elapsed_ms.push(started.elapsed().as_millis());
for line in buffer.feed(&chunk) {
raw_stream.push_str(&line);
raw_stream.push('\n');
@@ -248,6 +251,7 @@ pub async fn send_stream(client: &Client, request: &ModelRequest) -> Result<Mode
status: status_code,
elapsed_ms: 0,
first_token_ms,
chunk_elapsed_ms,
})
}

@@ -544,6 +548,7 @@ mod tests {
assert_eq!(response.status, 200);
assert_eq!(response.text, "hi there");
assert!(response.first_token_ms.is_some());
assert_eq!(response.chunk_elapsed_ms.len(), 1);
}

#[tokio::test]


+ 5
- 0
src/protocols/openai.rs View File

@@ -116,6 +116,7 @@ pub async fn send(client: &Client, request: &ModelRequest) -> Result<ModelRespon
status: status_code,
elapsed_ms: 0,
first_token_ms: None,
chunk_elapsed_ms: Vec::new(),
})
}

@@ -178,6 +179,7 @@ pub async fn send_stream(client: &Client, request: &ModelRequest) -> Result<Mode
let mut text = String::new();
let mut raw_stream = String::new();
let mut first_token_ms: Option<u128> = None;
let mut chunk_elapsed_ms = Vec::new();
let mut done = false;

while let Some(chunk) = stream.next().await {
@@ -197,6 +199,7 @@ pub async fn send_stream(client: &Client, request: &ModelRequest) -> Result<Mode
return Err(error).context("OpenAI stream interrupted");
}
};
chunk_elapsed_ms.push(started.elapsed().as_millis());
for line in buffer.feed(&chunk) {
raw_stream.push_str(&line);
raw_stream.push('\n');
@@ -252,6 +255,7 @@ pub async fn send_stream(client: &Client, request: &ModelRequest) -> Result<Mode
status: status_code,
elapsed_ms: 0,
first_token_ms,
chunk_elapsed_ms,
})
}

@@ -780,6 +784,7 @@ mod tests {
assert_eq!(response.status, 200);
assert_eq!(response.text, "hi there");
assert!(response.first_token_ms.is_some());
assert_eq!(response.chunk_elapsed_ms.len(), 1);

let raw = read_single_debug_file(temp_dir.path());
assert!(raw.contains("data: {\"choices\""));


+ 6
- 0
src/report.rs View File

@@ -155,6 +155,8 @@ pub struct RequestDebugReport {
pub latency_ms: u128,
pub ttft_ms: Option<u128>,
pub ttft_latency_delta_ms: Option<u128>,
pub chunk_count: usize,
pub chunk_elapsed_ms: Vec<u128>,
pub has_think_tags: bool,
}

@@ -416,6 +418,8 @@ mod tests {
latency_ms: 1000,
ttft_ms: Some(1000),
ttft_latency_delta_ms: Some(0),
chunk_count: 2,
chunk_elapsed_ms: vec![100, 1000],
has_think_tags: true,
}],
};
@@ -427,6 +431,8 @@ mod tests {
assert!(json.contains("\"duration\":\"60s\""));
assert!(json.contains("\"debug_requests\""));
assert!(json.contains("\"reason\":\"near_ttft_latency\""));
assert!(json.contains("\"chunk_count\":2"));
assert!(json.contains("\"chunk_elapsed_ms\":[100,1000]"));
assert!(json.contains("\"has_think_tags\":true"));
}



+ 1
- 0
src/runner.rs View File

@@ -243,6 +243,7 @@ pub struct ModelResponse {
pub status: u16,
pub elapsed_ms: u128,
pub first_token_ms: Option<u128>,
pub chunk_elapsed_ms: Vec<u128>,
}

pub async fn run_model_request(


Loading…
Cancel
Save