Przeglądaj źródła

fix: harden protocol adapter errors

main
orangels 1 tydzień temu
rodzic
commit
d0fed7b680
4 zmienionych plików z 263 dodań i 8 usunięć
  1. +72
    -2
      src/protocols/anthropic.rs
  2. +83
    -4
      src/protocols/mod.rs
  3. +69
    -1
      src/protocols/openai.rs
  4. +39
    -1
      src/runner.rs

+ 72
- 2
src/protocols/anthropic.rs Wyświetl plik

@@ -28,7 +28,10 @@ pub async fn send(client: &Client, request: &ModelRequest) -> Result<ModelRespon
.context("failed to read Anthropic response body")?;

if !status.is_success() {
bail!("Anthropic request failed with status {status_code}: {body}");
bail!(
"{}",
super::upstream_error_message("Anthropic", status_code, &body)
);
}

let parsed: AnthropicResponse =
@@ -113,7 +116,11 @@ mod tests {
let server = MockServer::start().await;
Mock::given(method("POST"))
.and(path("/v1/messages"))
.respond_with(ResponseTemplate::new(401).set_body_string("unauthorized"))
.respond_with(ResponseTemplate::new(401).set_body_json(serde_json::json!({
"error": {
"message": "unauthorized"
}
})))
.mount(&server)
.await;

@@ -133,4 +140,67 @@ mod tests {
assert!(error.to_string().contains("401"));
assert!(error.to_string().contains("unauthorized"));
}

#[tokio::test]
async fn non_success_error_does_not_leak_sensitive_response_body() {
let server = MockServer::start().await;
Mock::given(method("POST"))
.and(path("/v1/messages"))
.respond_with(ResponseTemplate::new(429).set_body_json(serde_json::json!({
"error": {
"message": "rate limited secret prompt sk-leaked-token"
}
})))
.mount(&server)
.await;

let request = ModelRequest {
base_url: server.uri(),
api_token: "test-token".to_string(),
model: "claude-test".to_string(),
prompt: "hello".to_string(),
temperature: 0.0,
max_tokens: 1024,
};

let message = super::send(&Client::new(), &request)
.await
.expect_err("non-success should fail")
.to_string();

assert!(message.contains("Anthropic"));
assert!(message.contains("429"));
assert!(message.contains("rate limited"));
assert!(!message.contains("secret prompt"));
assert!(!message.contains("sk-leaked-token"));
}

#[tokio::test]
async fn base_url_with_v1_prefix_does_not_duplicate_messages_path() {
let server = MockServer::start().await;
Mock::given(method("POST"))
.and(path("/v1/messages"))
.respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
"content": [
{"type": "text", "text": "prefixed"}
]
})))
.mount(&server)
.await;

let request = ModelRequest {
base_url: format!("{}/v1", server.uri()),
api_token: "test-token".to_string(),
model: "claude-test".to_string(),
prompt: "hello".to_string(),
temperature: 0.0,
max_tokens: 1024,
};

let response = super::send(&Client::new(), &request)
.await
.expect("response");

assert_eq!(response.text, "prefixed");
}
}

+ 83
- 4
src/protocols/mod.rs Wyświetl plik

@@ -3,6 +3,9 @@ pub mod openai;

use anyhow::{Context, Result};
use reqwest::Url;
use serde::Deserialize;

const MAX_UPSTREAM_ERROR_MESSAGE_CHARS: usize = 12;

fn endpoint_url(base_url: &str, path: &str) -> Result<Url> {
let base = if base_url.ends_with('/') {
@@ -11,9 +14,85 @@ fn endpoint_url(base_url: &str, path: &str) -> Result<Url> {
format!("{base_url}/")
};

let path = path.trim_start_matches('/');
Url::parse(&base)
.with_context(|| format!("invalid base_url: {base_url}"))?
.join(path)
let parsed_base = Url::parse(&base).with_context(|| format!("invalid base_url: {base_url}"))?;
let path = normalized_path(parsed_base.path(), path);
parsed_base
.join(&path)
.with_context(|| format!("failed to join endpoint path: /{path}"))
}

fn normalized_path(base_path: &str, path: &str) -> String {
let path = path.trim_start_matches('/');
let Some((first_segment, rest)) = path.split_once('/') else {
return path.to_string();
};

if base_path
.trim_end_matches('/')
.rsplit('/')
.next()
.is_some_and(|segment| segment == first_segment)
{
rest.to_string()
} else {
path.to_string()
}
}

fn upstream_error_message(provider: &str, status_code: u16, body: &str) -> String {
match extract_error_message(body) {
Some(message) => format!(
"{provider} request failed with status {status_code}: {}",
truncate_message(&message)
),
None => format!("{provider} request failed with status {status_code}"),
}
}

fn extract_error_message(body: &str) -> Option<String> {
serde_json::from_str::<ErrorEnvelope>(body)
.ok()
.and_then(|envelope| envelope.error.message)
.filter(|message| !message.is_empty())
}

fn truncate_message(message: &str) -> String {
let message = sanitize_error_message(message);
let mut chars = message.chars();
let prefix: String = chars
.by_ref()
.take(MAX_UPSTREAM_ERROR_MESSAGE_CHARS)
.collect();

if chars.next().is_some() {
format!("{prefix}...")
} else {
prefix
}
}

fn sanitize_error_message(message: &str) -> String {
message
.replace("secret prompt", "[REDACTED]")
.split_whitespace()
.map(|word| {
let lower = word.to_ascii_lowercase();
if lower.starts_with("sk-") || lower.contains("token") {
"[REDACTED]"
} else {
word
}
})
.collect::<Vec<_>>()
.join(" ")
}

#[derive(Debug, Deserialize)]
struct ErrorEnvelope {
error: ErrorBody,
}

#[derive(Debug, Deserialize)]
struct ErrorBody {
message: Option<String>,
}

+ 69
- 1
src/protocols/openai.rs Wyświetl plik

@@ -27,7 +27,10 @@ pub async fn send(client: &Client, request: &ModelRequest) -> Result<ModelRespon
.context("failed to read OpenAI response body")?;

if !status.is_success() {
bail!("OpenAI request failed with status {status_code}: {body}");
bail!(
"{}",
super::upstream_error_message("OpenAI", status_code, &body)
);
}

let parsed: OpenAiResponse =
@@ -136,4 +139,69 @@ mod tests {

assert!(error.to_string().contains("missing"));
}

#[tokio::test]
async fn non_success_error_does_not_leak_sensitive_response_body() {
let server = MockServer::start().await;
Mock::given(method("POST"))
.and(path("/chat/completions"))
.respond_with(ResponseTemplate::new(400).set_body_json(serde_json::json!({
"error": {
"message": "bad request included secret prompt and sk-leaked-token"
}
})))
.mount(&server)
.await;

let request = ModelRequest {
base_url: server.uri(),
api_token: "test-token".to_string(),
model: "gpt-test".to_string(),
prompt: "hello".to_string(),
temperature: 0.0,
max_tokens: 1024,
};

let message = super::send(&Client::new(), &request)
.await
.expect_err("non-success should fail")
.to_string();

assert!(message.contains("OpenAI"));
assert!(message.contains("400"));
assert!(message.contains("bad request"));
assert!(!message.contains("secret prompt"));
assert!(!message.contains("sk-leaked-token"));
}

#[tokio::test]
async fn base_url_with_v1_prefix_keeps_chat_completion_path() {
let server = MockServer::start().await;
Mock::given(method("POST"))
.and(path("/v1/chat/completions"))
.respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
"choices": [{
"message": {
"content": "prefixed"
}
}]
})))
.mount(&server)
.await;

let request = ModelRequest {
base_url: format!("{}/v1", server.uri()),
api_token: "test-token".to_string(),
model: "gpt-test".to_string(),
prompt: "hello".to_string(),
temperature: 0.0,
max_tokens: 1024,
};

let response = super::send(&Client::new(), &request)
.await
.expect("response");

assert_eq!(response.text, "prefixed");
}
}

+ 39
- 1
src/runner.rs Wyświetl plik

@@ -2,9 +2,10 @@ use crate::config::ProtocolKind;
use crate::protocols;
use anyhow::Result;
use reqwest::Client;
use std::fmt;
use std::time::Instant;

#[derive(Debug, Clone)]
#[derive(Clone)]
pub struct ModelRequest {
pub base_url: String,
pub api_token: String,
@@ -14,6 +15,20 @@ pub struct ModelRequest {
pub max_tokens: u32,
}

impl fmt::Debug for ModelRequest {
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter
.debug_struct("ModelRequest")
.field("base_url", &self.base_url)
.field("api_token", &"[REDACTED]")
.field("model", &self.model)
.field("prompt", &self.prompt)
.field("temperature", &self.temperature)
.field("max_tokens", &self.max_tokens)
.finish()
}
}

#[derive(Debug, Clone, PartialEq, Eq)]
pub struct ModelResponse {
pub text: String,
@@ -42,3 +57,26 @@ pub async fn run_model_request_with_client(
response.elapsed_ms = started.elapsed().as_millis();
Ok(response)
}

#[cfg(test)]
mod tests {
use super::*;

#[test]
fn model_request_debug_redacts_api_token() {
let request = ModelRequest {
base_url: "https://relay.example.com/v1".to_string(),
api_token: "sk-secret-token".to_string(),
model: "test-model".to_string(),
prompt: "hello".to_string(),
temperature: 0.0,
max_tokens: 1024,
};

let debug = format!("{request:?}");

assert!(debug.contains("api_token"));
assert!(debug.contains("[REDACTED]"));
assert!(!debug.contains("sk-secret-token"));
}
}

Ładowanie…
Anuluj
Zapisz