diff --git a/README.md b/README.md
index ae25177..6878da0 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,6 @@
# lq_token_test
-`lq_token_test` is a Rust CLI for checking LLM relay compatibility, running small benchmark probes, and testing request pacing against RPM targets. It supports OpenAI-compatible and Anthropic-compatible relay protocols, fetches local benchmark datasets, prints terminal summaries, and writes JSON reports for later comparison.
+`lq_token_test` is a Rust CLI for checking LLM relay compatibility, running small benchmark probes, and testing request pacing against RPM targets. It supports OpenAI-compatible, Anthropic-compatible, and Google Gemini relay protocols, fetches local benchmark datasets, prints terminal summaries, and writes JSON reports for later comparison.
## Build And Test
@@ -46,6 +46,18 @@ providers:
budget_tokens: 10000
display: "omitted"
+ google:
+ protocol: google
+ base_url: "https://generativelanguage.googleapis.com/v1beta"
+ api_token: "${GOOGLE_API_KEY}"
+ default_model: "gemini-3-pro-preview"
+ stream: true
+ thinking:
+ enabled: true
+ budget_tokens: 5000
+ effort: "high"
+ display: "summarized"
+
benchmarks:
data_dir: "data/benchmarks"
aime2026:
@@ -61,9 +73,10 @@ Values written as `${ENV_NAME}` are expanded when the config is loaded. For exam
```bash
export OPENAI_RELAY_TOKEN="..."
export ANTHROPIC_RELAY_TOKEN="..."
+export GOOGLE_API_KEY="..."
```
-Thinking can also be enabled per run with CLI overrides such as `--thinking true`, `--thinking-type enabled`, `--thinking-budget-tokens 10000`, `--thinking-display omitted`, `--reasoning-effort high`, and `--reasoning-summary auto`. For Anthropic, enabling thinking omits `temperature` from the upstream request.
+Thinking can also be enabled per run with CLI overrides such as `--thinking true`, `--thinking-type enabled`, `--thinking-budget-tokens 10000`, `--thinking-display omitted`, `--reasoning-effort high`, and `--reasoning-summary auto`. For Anthropic, enabling thinking omits `temperature` from the upstream request. For Google Gemini, `budget_tokens` maps to `generationConfig.thinkingConfig.thinkingBudget`, `effort` maps to `thinkingLevel`, and `display: summarized` enables `includeThoughts`.
## Dataset Fetching
@@ -111,6 +124,16 @@ cargo run -- check \
--prompt "Reply with the word ready."
```
+Run a simple Google Gemini relay check:
+
+```bash
+cargo run -- check \
+ --config config.yaml \
+ --provider google \
+ --model gemini-3-pro-preview \
+ --prompt "Reply with the word ready."
+```
+
The check command prints the HTTP status, elapsed milliseconds, and model text.
## Benchmarks
diff --git a/config.example.yaml b/config.example.yaml
index 3424afe..110cdee 100644
--- a/config.example.yaml
+++ b/config.example.yaml
@@ -25,6 +25,18 @@ providers:
effort: high # Anthropic adaptive 模式可用
display: summarized # summarized | omitted
+ google:
+ protocol: google
+ base_url: "https://generativelanguage.googleapis.com/v1beta"
+ api_token: "${GOOGLE_API_KEY}"
+ default_model: "gemini-3-pro-preview"
+ stream: true
+ thinking:
+ enabled: true
+ budget_tokens: 5000 # Gemini 2.5: generationConfig.thinkingConfig.thinkingBudget
+ effort: high # Gemini 3: generationConfig.thinkingConfig.thinkingLevel
+ display: summarized # summarized -> includeThoughts: true, omitted -> false
+
benchmarks:
data_dir: "data/benchmarks"
aime2026:
diff --git a/docs/USAGE.zh-CN.md b/docs/USAGE.zh-CN.md
index 63c9dbe..f441528 100644
--- a/docs/USAGE.zh-CN.md
+++ b/docs/USAGE.zh-CN.md
@@ -6,6 +6,7 @@
- OpenAI-compatible 协议
- Anthropic-compatible 协议
+- Google Gemini 协议
- YAML 配置文件
- AIME 2026 数据集
- GPQA-Diamond 数据集
@@ -69,6 +70,18 @@ providers:
budget_tokens: 10000
display: "omitted"
+ google:
+ protocol: google
+ base_url: "https://generativelanguage.googleapis.com/v1beta"
+ api_token: "${GOOGLE_API_KEY}"
+ default_model: "gemini-3-pro-preview"
+ stream: true
+ thinking:
+ enabled: true
+ budget_tokens: 5000
+ effort: "high"
+ display: "summarized"
+
benchmarks:
data_dir: "data/benchmarks"
aime2026:
@@ -84,6 +97,7 @@ benchmarks:
```bash
export OPENAI_RELAY_TOKEN="..."
export ANTHROPIC_RELAY_TOKEN="..."
+export GOOGLE_API_KEY="..."
```
注意:工具只会解析当前使用的 provider token。比如只运行 `--provider anthropic` 时,不需要设置 `OPENAI_RELAY_TOKEN`。
@@ -105,6 +119,8 @@ cargo run -- check --provider openai --prompt "hello" \
Anthropic 开启 thinking 时,请求体不会再发送 `temperature`。
+Google Gemini 开启 thinking 时,会写入 `generationConfig.thinkingConfig`:`budget_tokens` 对应 `thinkingBudget`,`effort` 对应 Gemini 3 的 `thinkingLevel`,`display: summarized` 会发送 `includeThoughts: true`,`display: omitted` 会发送 `includeThoughts: false`。
+
## 3. 协议连通性测试
OpenAI-compatible:
@@ -127,6 +143,16 @@ cargo run -- check \
--prompt "Reply with the word ready."
```
+Google Gemini:
+
+```bash
+cargo run -- check \
+ --config config.yaml \
+ --provider google \
+ --model gemini-3-pro-preview \
+ --prompt "Reply with the word ready."
+```
+
如果不传 `--model`,会使用配置里的 `default_model`。
## 4. 下载数据集
diff --git a/py_demo/gemini_api_test.py b/py_demo/gemini_api_test.py
new file mode 100644
index 0000000..55e4eb9
--- /dev/null
+++ b/py_demo/gemini_api_test.py
@@ -0,0 +1,77 @@
+from google import genai
+from google.genai import types
+import os
+
+GEMINI_API_KEY = os.environ["GEMINI_API_KEY"]
+GEMINI_BASE_URL = os.environ.get("GEMINI_BASE_URL", "https://generativelanguage.googleapis.com/v1beta")
+MODEL_ID = os.environ.get("GEMINI_MODEL", "gemini-3.1-pro-preview")
+
+
+def _make_client() -> genai.Client:
+ return genai.Client(
+ api_key=GEMINI_API_KEY,
+ http_options=types.HttpOptions(base_url=GEMINI_BASE_URL),
+ )
+
+
+def list_models():
+ client = _make_client()
+ models = client.models.list()
+ model_ids = sorted([m.name for m in models])
+ print(f"共找到 {len(model_ids)} 个模型:")
+ for model_id in model_ids:
+ print(f" - {model_id}")
+
+
+def test_model(
+ stream: bool = False,
+ thinking: bool = True,
+ prompt: str = "Hello, how are you?",
+):
+ client = _make_client()
+ thinking_config = (
+ types.ThinkingConfig(include_thoughts=True, thinking_budget=5000)
+ if thinking
+ else types.ThinkingConfig(include_thoughts=False)
+ )
+ config = types.GenerateContentConfig(thinking_config=thinking_config)
+
+ if stream:
+ in_thinking = False
+ for chunk in client.models.generate_content_stream(
+ model=MODEL_ID, contents=prompt, config=config
+ ):
+ if not chunk.candidates:
+ continue
+ for part in chunk.candidates[0].content.parts or []:
+ if part.thought:
+ if not in_thinking:
+ print("", flush=True)
+ in_thinking = True
+ print(part.text, end="", flush=True)
+ else:
+ if in_thinking:
+ print("\n\n", flush=True)
+ in_thinking = False
+ print(part.text or "", end="", flush=True)
+ if in_thinking:
+ print("\n", flush=True)
+ print()
+ else:
+ response = client.models.generate_content(
+ model=MODEL_ID, contents=prompt, config=config
+ )
+ for part in response.candidates[0].content.parts:
+ if part.thought:
+ print(f"\n{part.text}\n\n")
+ else:
+ print(part.text or "", end="")
+ print()
+
+
+if __name__ == "__main__":
+ test_model(
+ stream=True,
+ thinking=False,
+ prompt="解释什么是 MVCC,并举一个 PostgreSQL 中的应用例子,控制在 150 字内。",
+ )
diff --git a/py_demo/open_api_test.py b/py_demo/open_api_test.py
new file mode 100644
index 0000000..3f548df
--- /dev/null
+++ b/py_demo/open_api_test.py
@@ -0,0 +1,54 @@
+import os
+
+import openai
+
+OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
+OPENAI_BASE_URL = os.environ.get("OPENAI_BASE_URL", "https://lancerouter.ai/v1")
+MODEL_ID = os.environ.get("OPENAI_MODEL", "google/gemini-3.1-pro-preview")
+
+def list_models():
+ client = openai.OpenAI(api_key=OPENAI_API_KEY, base_url=OPENAI_BASE_URL)
+ models = client.models.list()
+ model_ids = sorted([m.id for m in models.data])
+ print(f"共找到 {len(model_ids)} 个模型:")
+ for model_id in model_ids:
+ print(f" - {model_id}")
+
+
+def test_model(stream: bool = False, thinking: bool = True, prompt: str = "Hello, how are you?"):
+ client = openai.OpenAI(api_key=OPENAI_API_KEY, base_url=OPENAI_BASE_URL)
+
+ extra_body = {"thinking": {"type": "enabled", "budget_tokens": 5000}} if thinking else {}
+
+ if stream:
+ response = client.chat.completions.create(
+ model=MODEL_ID,
+ messages=[{"role": "user", "content": prompt}],
+ stream=True,
+ extra_body=extra_body,
+ )
+ for chunk in response:
+ if not chunk.choices:
+ continue
+ delta = chunk.choices[0].delta
+ # 输出思考内容(thinking block)
+ if hasattr(delta, "thinking") and delta.thinking:
+ print(delta.thinking, end="", flush=True)
+ elif delta.content:
+ print(delta.content, end="", flush=True)
+ else:
+ response = client.chat.completions.create(
+ model=MODEL_ID,
+ messages=[{"role": "user", "content": prompt}],
+ extra_body=extra_body,
+ )
+ message = response.choices[0].message
+ # 输出思考内容(thinking block)
+ if hasattr(message, "thinking") and message.thinking:
+ print(f"\n{message.thinking}\n\n")
+ print(message.content)
+
+
+if __name__ == "__main__":
+ # list_models()
+ test_model(stream=True, thinking=False, prompt="解释什么是 MVCC,并举一个 PostgreSQL 中的应用例子,控制在 150 字内。")
diff --git a/src/config.rs b/src/config.rs
index 3075cc9..db94b5a 100644
--- a/src/config.rs
+++ b/src/config.rs
@@ -32,6 +32,7 @@ pub enum ConfigError {
pub enum ProtocolKind {
Openai,
Anthropic,
+ Google,
}
#[derive(Clone, Deserialize)]
@@ -357,6 +358,33 @@ providers:
assert_eq!(provider.api_token, "anthropic-secret");
}
+ #[test]
+ fn loads_google_protocol_provider() {
+ let config = AppConfig::load_from_str_with_env(
+ r#"
+default_provider: google
+providers:
+ google:
+ protocol: google
+ base_url: https://generativelanguage.googleapis.com/v1beta
+ api_token: ${GOOGLE_API_KEY}
+ default_model: gemini-3-pro-preview
+"#,
+ |_| Err(()),
+ )
+ .expect("load config should not require provider tokens");
+
+ let provider = config
+ .resolved_provider_with_env(None, |name| match name {
+ "GOOGLE_API_KEY" => Ok("google-secret".to_string()),
+ _ => Err(()),
+ })
+ .expect("google provider");
+
+ assert_eq!(provider.protocol, ProtocolKind::Google);
+ assert_eq!(provider.api_token, "google-secret");
+ }
+
#[test]
fn provider_stream_defaults_false_and_can_be_enabled() {
let config = AppConfig::load_from_str_with_env(
diff --git a/src/protocols/google.rs b/src/protocols/google.rs
new file mode 100644
index 0000000..461fe95
--- /dev/null
+++ b/src/protocols/google.rs
@@ -0,0 +1,333 @@
+use crate::runner::{ModelRequest, ModelResponse};
+use anyhow::{Context, Result, bail};
+use futures::StreamExt;
+use reqwest::Client;
+use serde::Deserialize;
+use serde_json::{Value, json};
+use std::time::Instant;
+
+pub async fn send(client: &Client, request: &ModelRequest) -> Result {
+ let url = google_endpoint(&request.base_url, &request.model, "generateContent")?;
+ let response = client
+ .post(url)
+ .header("x-goog-api-key", &request.api_token)
+ .json(&request_body(request))
+ .send()
+ .await
+ .context("failed to send Google generateContent request")?;
+
+ let status = response.status();
+ let status_code = status.as_u16();
+ let body = response
+ .text()
+ .await
+ .context("failed to read Google response body")?;
+ if let Some(raw_debug) = &request.raw_debug {
+ raw_debug
+ .write_response("google-json", &body)
+ .await
+ .context("failed to write Google raw debug response")?;
+ }
+
+ if !status.is_success() {
+ bail!(
+ "{}",
+ super::upstream_error_message("Google", status_code, &body)
+ );
+ }
+
+ let parsed: GoogleResponse =
+ serde_json::from_str(&body).context("failed to parse Google response JSON")?;
+ let text = response_text(parsed).context("Google response missing non-thought text part")?;
+
+ Ok(ModelResponse {
+ text,
+ status: status_code,
+ elapsed_ms: 0,
+ first_token_ms: None,
+ })
+}
+
+pub async fn send_stream(client: &Client, request: &ModelRequest) -> Result {
+ let url = google_endpoint(&request.base_url, &request.model, "streamGenerateContent")?;
+ let started = Instant::now();
+ let response = client
+ .post(url)
+ .header("x-goog-api-key", &request.api_token)
+ .json(&request_body(request))
+ .send()
+ .await
+ .context("failed to send Google streamGenerateContent request")?;
+
+ let status = response.status();
+ let status_code = status.as_u16();
+
+ if !status.is_success() {
+ let body = response
+ .text()
+ .await
+ .context("failed to read Google error response body")?;
+ if let Some(raw_debug) = &request.raw_debug {
+ raw_debug
+ .write_response("google-error", &body)
+ .await
+ .context("failed to write Google raw debug error response")?;
+ }
+ bail!(
+ "{}",
+ super::upstream_error_message("Google", status_code, &body)
+ );
+ }
+
+ let mut stream = response.bytes_stream();
+ let mut buffer = super::SseLineBuffer::new();
+ let mut text = String::new();
+ let mut raw_stream = String::new();
+ let mut first_token_ms: Option = None;
+
+ while let Some(chunk) = stream.next().await {
+ let chunk = chunk.context("Google stream interrupted")?;
+ for line in buffer.feed(&chunk) {
+ raw_stream.push_str(&line);
+ raw_stream.push('\n');
+ let Some(data) = line.strip_prefix("data: ") else {
+ continue;
+ };
+ let Some(content) = serde_json::from_str::(data)
+ .ok()
+ .and_then(response_text)
+ .filter(|content| !content.is_empty())
+ else {
+ continue;
+ };
+ if first_token_ms.is_none() {
+ first_token_ms = Some(started.elapsed().as_millis());
+ }
+ text.push_str(&content);
+ }
+ }
+
+ if let Some(raw_debug) = &request.raw_debug {
+ raw_debug
+ .write_response("google-sse", &raw_stream)
+ .await
+ .context("failed to write Google raw debug stream")?;
+ }
+
+ if text.is_empty() {
+ bail!("Google stream completed without producing any content");
+ }
+
+ Ok(ModelResponse {
+ text,
+ status: status_code,
+ elapsed_ms: 0,
+ first_token_ms,
+ })
+}
+
+fn google_endpoint(base_url: &str, model: &str, action: &str) -> Result {
+ let model_path = if model.starts_with("models/") {
+ model.to_string()
+ } else {
+ format!("models/{model}")
+ };
+ super::endpoint_url(base_url, &format!("/{model_path}:{action}"))
+}
+
+fn request_body(request: &ModelRequest) -> Value {
+ let mut body = json!({
+ "contents": [{
+ "role": "user",
+ "parts": [{"text": request.prompt}]
+ }],
+ "generationConfig": {
+ "temperature": request.temperature,
+ "maxOutputTokens": request.max_tokens
+ }
+ });
+
+ if let Some(thinking) = &request.thinking
+ && thinking.enabled
+ {
+ let mut thinking_config = serde_json::Map::new();
+ if let Some(display) = &thinking.display {
+ thinking_config.insert(
+ "includeThoughts".to_string(),
+ json!(display != "omitted" && display != "false"),
+ );
+ }
+ if let Some(budget_tokens) = thinking.budget_tokens {
+ thinking_config.insert("thinkingBudget".to_string(), json!(budget_tokens));
+ }
+ if let Some(effort) = &thinking.effort {
+ thinking_config.insert("thinkingLevel".to_string(), json!(effort));
+ }
+ if !thinking_config.is_empty() {
+ body["generationConfig"]["thinkingConfig"] = Value::Object(thinking_config);
+ }
+ }
+
+ body
+}
+
+fn response_text(response: GoogleResponse) -> Option {
+ let text = response
+ .candidates
+ .into_iter()
+ .flat_map(|candidate| candidate.content.parts)
+ .filter(|part| !part.thought.unwrap_or(false))
+ .filter_map(|part| part.text)
+ .filter(|text| !text.is_empty())
+ .collect::();
+ (!text.is_empty()).then_some(text)
+}
+
+#[derive(Debug, Deserialize)]
+struct GoogleResponse {
+ candidates: Vec,
+}
+
+#[derive(Debug, Deserialize)]
+struct GoogleCandidate {
+ content: GoogleContent,
+}
+
+#[derive(Debug, Deserialize)]
+struct GoogleContent {
+ parts: Vec,
+}
+
+#[derive(Debug, Deserialize)]
+struct GooglePart {
+ text: Option,
+ #[serde(default)]
+ thought: Option,
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::runner::{ModelRequest, ThinkingConfig};
+ use reqwest::Client;
+ use wiremock::matchers::{body_json, header, method, path};
+ use wiremock::{Mock, MockServer, ResponseTemplate};
+
+ #[tokio::test]
+ async fn sends_generate_content_with_thinking_config_and_extracts_text() {
+ let server = MockServer::start().await;
+ Mock::given(method("POST"))
+ .and(path("/models/gemini-test:generateContent"))
+ .and(header("x-goog-api-key", "test-token"))
+ .and(body_json(serde_json::json!({
+ "contents": [{
+ "role": "user",
+ "parts": [{"text": "hello"}]
+ }],
+ "generationConfig": {
+ "temperature": 0.0,
+ "maxOutputTokens": 1024,
+ "thinkingConfig": {
+ "includeThoughts": true,
+ "thinkingBudget": 5000,
+ "thinkingLevel": "high"
+ }
+ }
+ })))
+ .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!({
+ "candidates": [{
+ "content": {
+ "parts": [
+ {"text": "hidden thought", "thought": true},
+ {"text": "hi from gemini"}
+ ]
+ }
+ }]
+ })))
+ .mount(&server)
+ .await;
+
+ let mut request = google_request(server.uri());
+ request.thinking = Some(ThinkingConfig {
+ enabled: true,
+ kind: None,
+ budget_tokens: Some(5000),
+ effort: Some("high".to_string()),
+ display: Some("summarized".to_string()),
+ reasoning_effort: None,
+ reasoning_summary: None,
+ });
+
+ let response = super::send(&Client::new(), &request)
+ .await
+ .expect("response");
+
+ assert_eq!(response.status, 200);
+ assert_eq!(response.text, "hi from gemini");
+ }
+
+ #[tokio::test]
+ async fn sends_stream_generate_content_and_extracts_non_thought_parts() {
+ let server = MockServer::start().await;
+ Mock::given(method("POST"))
+ .and(path("/models/gemini-test:streamGenerateContent"))
+ .and(header("x-goog-api-key", "test-token"))
+ .and(body_json(serde_json::json!({
+ "contents": [{
+ "role": "user",
+ "parts": [{"text": "hello"}]
+ }],
+ "generationConfig": {
+ "temperature": 0.0,
+ "maxOutputTokens": 1024,
+ "thinkingConfig": {
+ "includeThoughts": false,
+ "thinkingBudget": 0
+ }
+ }
+ })))
+ .respond_with(
+ ResponseTemplate::new(200)
+ .insert_header("content-type", "text/event-stream")
+ .set_body_string(
+ "data: {\"candidates\":[{\"content\":{\"parts\":[{\"text\":\"hi \"}]}}]}\n\n\
+ data: {\"candidates\":[{\"content\":{\"parts\":[{\"text\":\"thinking\",\"thought\":true},{\"text\":\"there\"}]}}]}\n\n",
+ ),
+ )
+ .mount(&server)
+ .await;
+
+ let mut request = google_request(server.uri());
+ request.stream = true;
+ request.thinking = Some(ThinkingConfig {
+ enabled: true,
+ kind: None,
+ budget_tokens: Some(0),
+ effort: None,
+ display: Some("omitted".to_string()),
+ reasoning_effort: None,
+ reasoning_summary: None,
+ });
+
+ let response = super::send_stream(&Client::new(), &request)
+ .await
+ .expect("stream response");
+
+ assert_eq!(response.status, 200);
+ assert_eq!(response.text, "hi there");
+ assert!(response.first_token_ms.is_some());
+ }
+
+ fn google_request(base_url: String) -> ModelRequest {
+ ModelRequest {
+ base_url,
+ api_token: "test-token".to_string(),
+ model: "gemini-test".to_string(),
+ prompt: "hello".to_string(),
+ temperature: 0.0,
+ max_tokens: 1024,
+ stream: false,
+ raw_debug: None,
+ thinking: None,
+ }
+ }
+}
diff --git a/src/protocols/mod.rs b/src/protocols/mod.rs
index 652e510..1beeb5d 100644
--- a/src/protocols/mod.rs
+++ b/src/protocols/mod.rs
@@ -1,4 +1,5 @@
pub mod anthropic;
+pub mod google;
pub mod openai;
use anyhow::{Context, Result};
diff --git a/src/runner.rs b/src/runner.rs
index 01dfdf2..90afc18 100644
--- a/src/runner.rs
+++ b/src/runner.rs
@@ -140,11 +140,13 @@ pub async fn run_model_request_with_client(
match protocol {
ProtocolKind::Openai => protocols::openai::send_stream(client, request).await?,
ProtocolKind::Anthropic => protocols::anthropic::send_stream(client, request).await?,
+ ProtocolKind::Google => protocols::google::send_stream(client, request).await?,
}
} else {
match protocol {
ProtocolKind::Openai => protocols::openai::send(client, request).await?,
ProtocolKind::Anthropic => protocols::anthropic::send(client, request).await?,
+ ProtocolKind::Google => protocols::google::send(client, request).await?,
}
};
response.elapsed_ms = started.elapsed().as_millis();