@mmmbuto/anthmorph 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,460 @@
1
+ use crate::config::BackendProfile;
2
+ use crate::error::{ProxyError, ProxyResult};
3
+ use crate::models::{anthropic, openai};
4
+ use serde_json::{json, Value};
5
+
6
+ pub fn generate_message_id() -> String {
7
+ format!(
8
+ "msg_{:x}",
9
+ std::time::SystemTime::now()
10
+ .duration_since(std::time::UNIX_EPOCH)
11
+ .unwrap_or_default()
12
+ .as_millis()
13
+ )
14
+ }
15
+
16
+ fn extract_tool_choice(
17
+ extra: &serde_json::Map<String, serde_json::Value>,
18
+ ) -> Option<openai::ToolChoice> {
19
+ let tool_choice = extra.get("tool_choice")?;
20
+
21
+ match tool_choice.get("type").and_then(|t| t.as_str()) {
22
+ Some("auto") => Some(openai::ToolChoice::String("auto".to_string())),
23
+ Some("any") => Some(openai::ToolChoice::String("required".to_string())),
24
+ Some("tool") => tool_choice
25
+ .get("name")
26
+ .and_then(|n| n.as_str())
27
+ .map(|name| openai::ToolChoice::Object {
28
+ tool_type: "function".to_string(),
29
+ function: openai::ToolChoiceFunction {
30
+ name: name.to_string(),
31
+ },
32
+ }),
33
+ _ => None,
34
+ }
35
+ }
36
+
37
+ pub fn anthropic_to_openai(
38
+ req: anthropic::AnthropicRequest,
39
+ model: &str,
40
+ profile: BackendProfile,
41
+ ) -> ProxyResult<openai::OpenAIRequest> {
42
+ if req.max_tokens == 0 {
43
+ return Err(ProxyError::Transform(
44
+ "max_tokens must be greater than zero".to_string(),
45
+ ));
46
+ }
47
+
48
+ if req
49
+ .extra
50
+ .get("thinking")
51
+ .and_then(|v| v.get("type"))
52
+ .is_some()
53
+ && !profile.supports_reasoning()
54
+ {
55
+ return Err(ProxyError::Transform(format!(
56
+ "thinking is not supported by backend profile {}",
57
+ profile.as_str()
58
+ )));
59
+ }
60
+
61
+ let mut openai_messages = Vec::new();
62
+
63
+ if let Some(system) = req.system {
64
+ match system {
65
+ anthropic::SystemPrompt::Single(text) => {
66
+ openai_messages.push(openai::Message {
67
+ role: "system".to_string(),
68
+ content: Some(openai::MessageContent::Text(text)),
69
+ name: None,
70
+ tool_calls: None,
71
+ tool_call_id: None,
72
+ });
73
+ }
74
+ anthropic::SystemPrompt::Multiple(messages) => {
75
+ for msg in messages {
76
+ openai_messages.push(openai::Message {
77
+ role: "system".to_string(),
78
+ content: Some(openai::MessageContent::Text(msg.text)),
79
+ name: None,
80
+ tool_calls: None,
81
+ tool_call_id: None,
82
+ });
83
+ }
84
+ }
85
+ }
86
+ }
87
+
88
+ for msg in req.messages {
89
+ openai_messages.extend(convert_message(msg, profile)?);
90
+ }
91
+
92
+ let tools = req.tools.and_then(|tools| {
93
+ let filtered: Vec<_> = tools
94
+ .into_iter()
95
+ .filter(|t| t.tool_type.as_deref() != Some("BatchTool"))
96
+ .collect();
97
+
98
+ if filtered.is_empty() {
99
+ None
100
+ } else {
101
+ Some(
102
+ filtered
103
+ .into_iter()
104
+ .map(|t| openai::Tool {
105
+ tool_type: "function".to_string(),
106
+ function: openai::Function {
107
+ name: t.name,
108
+ description: t.description,
109
+ parameters: clean_schema(t.input_schema),
110
+ },
111
+ })
112
+ .collect(),
113
+ )
114
+ }
115
+ });
116
+
117
+ Ok(openai::OpenAIRequest {
118
+ model: model.to_string(),
119
+ messages: openai_messages,
120
+ max_tokens: Some(req.max_tokens),
121
+ temperature: req.temperature,
122
+ top_p: req.top_p,
123
+ top_k: if profile.supports_top_k() {
124
+ req.top_k
125
+ } else {
126
+ None
127
+ },
128
+ stop: req.stop_sequences,
129
+ stream: req.stream,
130
+ tools,
131
+ tool_choice: extract_tool_choice(&req.extra),
132
+ })
133
+ }
134
+
135
+ fn convert_message(
136
+ msg: anthropic::Message,
137
+ profile: BackendProfile,
138
+ ) -> ProxyResult<Vec<openai::Message>> {
139
+ let mut result = Vec::new();
140
+
141
+ match msg.content {
142
+ anthropic::MessageContent::Text(text) => {
143
+ result.push(openai::Message {
144
+ role: msg.role,
145
+ content: Some(openai::MessageContent::Text(text)),
146
+ tool_calls: None,
147
+ tool_call_id: None,
148
+ name: None,
149
+ });
150
+ }
151
+ anthropic::MessageContent::Blocks(blocks) => {
152
+ let mut current_content_parts = Vec::new();
153
+ let mut tool_calls = Vec::new();
154
+
155
+ for block in blocks {
156
+ match block {
157
+ anthropic::ContentBlock::Text { text } => {
158
+ current_content_parts.push(openai::ContentPart::Text { data: text });
159
+ }
160
+ anthropic::ContentBlock::Image { source } => {
161
+ let data_url = format!("data:{};base64,{}", source.media_type, source.data);
162
+ current_content_parts.push(openai::ContentPart::ImageUrl {
163
+ image_url: openai::ImageUrl { url: data_url },
164
+ });
165
+ }
166
+ anthropic::ContentBlock::ToolUse { id, name, input } => {
167
+ tool_calls.push(openai::ToolCall {
168
+ id,
169
+ call_type: "function".to_string(),
170
+ function: openai::FunctionCall {
171
+ name,
172
+ arguments: serde_json::to_string(&input)?,
173
+ },
174
+ });
175
+ }
176
+ anthropic::ContentBlock::ToolResult {
177
+ tool_use_id,
178
+ content,
179
+ is_error,
180
+ } => {
181
+ let mut text = match content {
182
+ anthropic::ToolResultContent::Text(s) => s,
183
+ anthropic::ToolResultContent::Blocks(blocks) => blocks
184
+ .iter()
185
+ .filter_map(|b| b.get("text").and_then(|t| t.as_str()))
186
+ .collect::<Vec<_>>()
187
+ .join("\n"),
188
+ };
189
+
190
+ if is_error.unwrap_or(false) {
191
+ text = format!("ERROR: {text}");
192
+ }
193
+
194
+ result.push(openai::Message {
195
+ role: "tool".to_string(),
196
+ content: Some(openai::MessageContent::Text(text)),
197
+ tool_calls: None,
198
+ tool_call_id: Some(tool_use_id),
199
+ name: None,
200
+ });
201
+ }
202
+ anthropic::ContentBlock::Thinking { thinking } => {
203
+ return Err(ProxyError::Transform(format!(
204
+ "assistant thinking blocks are not supported by backend profile {} (received {} chars)",
205
+ profile.as_str(),
206
+ thinking.len()
207
+ )));
208
+ }
209
+ anthropic::ContentBlock::Other => {}
210
+ }
211
+ }
212
+
213
+ if !current_content_parts.is_empty() || !tool_calls.is_empty() {
214
+ let content = if current_content_parts.is_empty() {
215
+ None
216
+ } else if current_content_parts.len() == 1 {
217
+ match &current_content_parts[0] {
218
+ openai::ContentPart::Text { data } => {
219
+ Some(openai::MessageContent::Text(data.clone()))
220
+ }
221
+ _ => Some(openai::MessageContent::Parts(current_content_parts)),
222
+ }
223
+ } else {
224
+ Some(openai::MessageContent::Parts(current_content_parts))
225
+ };
226
+
227
+ result.push(openai::Message {
228
+ role: msg.role,
229
+ content,
230
+ tool_calls: if tool_calls.is_empty() {
231
+ None
232
+ } else {
233
+ Some(tool_calls)
234
+ },
235
+ tool_call_id: None,
236
+ name: None,
237
+ });
238
+ }
239
+ }
240
+ }
241
+
242
+ Ok(result)
243
+ }
244
+
245
+ fn clean_schema(mut schema: Value) -> Value {
246
+ if let Some(obj) = schema.as_object_mut() {
247
+ obj.remove("format");
248
+
249
+ if let Some(properties) = obj.get_mut("properties").and_then(|v| v.as_object_mut()) {
250
+ for (_, value) in properties.iter_mut() {
251
+ *value = clean_schema(value.clone());
252
+ }
253
+ }
254
+
255
+ if let Some(items) = obj.get_mut("items") {
256
+ *items = clean_schema(items.clone());
257
+ }
258
+
259
+ for key in ["anyOf", "oneOf", "allOf"] {
260
+ if let Some(arr) = obj.get_mut(key).and_then(|v| v.as_array_mut()) {
261
+ for item in arr.iter_mut() {
262
+ *item = clean_schema(item.clone());
263
+ }
264
+ }
265
+ }
266
+ }
267
+
268
+ schema
269
+ }
270
+
271
+ pub fn openai_to_anthropic(
272
+ resp: openai::OpenAIResponse,
273
+ fallback_model: &str,
274
+ profile: BackendProfile,
275
+ ) -> ProxyResult<anthropic::AnthropicResponse> {
276
+ let choice = resp
277
+ .choices
278
+ .first()
279
+ .ok_or_else(|| ProxyError::Transform("No choices in response".to_string()))?;
280
+
281
+ let mut content = Vec::new();
282
+
283
+ if let Some(reasoning) = choice
284
+ .message
285
+ .reasoning_content
286
+ .as_ref()
287
+ .filter(|s| !s.is_empty())
288
+ {
289
+ if !profile.supports_reasoning() {
290
+ return Err(ProxyError::Transform(format!(
291
+ "backend profile {} returned reasoning content that cannot be represented safely",
292
+ profile.as_str()
293
+ )));
294
+ }
295
+ content.push(anthropic::ResponseContent::Thinking {
296
+ thinking: reasoning.clone(),
297
+ });
298
+ }
299
+ if let Some(text) = choice.message.content.as_ref().filter(|s| !s.is_empty()) {
300
+ content.push(anthropic::ResponseContent::Text { text: text.clone() });
301
+ }
302
+
303
+ if let Some(tool_calls) = &choice.message.tool_calls {
304
+ for tool_call in tool_calls {
305
+ let input: Value =
306
+ serde_json::from_str(&tool_call.function.arguments).unwrap_or_else(|_| json!({}));
307
+
308
+ content.push(anthropic::ResponseContent::ToolUse {
309
+ id: tool_call.id.clone(),
310
+ name: tool_call.function.name.clone(),
311
+ input,
312
+ });
313
+ }
314
+ }
315
+
316
+ let stop_reason = choice
317
+ .finish_reason
318
+ .as_ref()
319
+ .map(|r| match r.as_str() {
320
+ "tool_calls" => "tool_use",
321
+ "stop" => "end_turn",
322
+ "length" => "max_tokens",
323
+ _ => "end_turn",
324
+ })
325
+ .map(String::from);
326
+
327
+ Ok(anthropic::AnthropicResponse {
328
+ id: resp.id.unwrap_or_else(generate_message_id),
329
+ response_type: "message".to_string(),
330
+ role: "assistant".to_string(),
331
+ content,
332
+ model: resp.model.unwrap_or_else(|| fallback_model.to_string()),
333
+ stop_reason,
334
+ stop_sequence: None,
335
+ usage: anthropic::Usage {
336
+ input_tokens: resp.usage.prompt_tokens,
337
+ output_tokens: resp.usage.completion_tokens,
338
+ },
339
+ })
340
+ }
341
+
342
+ pub fn map_stop_reason(finish_reason: Option<&str>) -> Option<String> {
343
+ finish_reason
344
+ .map(|r| match r {
345
+ "tool_calls" => "tool_use",
346
+ "stop" => "end_turn",
347
+ "length" => "max_tokens",
348
+ _ => "end_turn",
349
+ })
350
+ .map(String::from)
351
+ }
352
+
353
+ #[cfg(test)]
354
+ mod tests {
355
+ use super::*;
356
+ use crate::models::anthropic::{
357
+ AnthropicRequest, ContentBlock, Message, MessageContent, SystemPrompt, Tool,
358
+ };
359
+
360
+ fn sample_request() -> AnthropicRequest {
361
+ AnthropicRequest {
362
+ model: "claude-sonnet-4".to_string(),
363
+ messages: vec![Message {
364
+ role: "user".to_string(),
365
+ content: MessageContent::Text("ping".to_string()),
366
+ }],
367
+ system: Some(SystemPrompt::Single("sys".to_string())),
368
+ stream: Some(true),
369
+ max_tokens: 128,
370
+ temperature: Some(0.1),
371
+ top_p: Some(0.9),
372
+ top_k: Some(40),
373
+ tools: Some(vec![Tool {
374
+ name: "weather".to_string(),
375
+ description: Some("desc".to_string()),
376
+ input_schema: json!({"type":"object","properties":{"city":{"type":"string","format":"city"}}}),
377
+ tool_type: None,
378
+ }]),
379
+ stop_sequences: Some(vec!["STOP".to_string()]),
380
+ extra: serde_json::Map::new(),
381
+ }
382
+ }
383
+
384
+ #[test]
385
+ fn strips_top_k_for_generic_profile() {
386
+ let req = sample_request();
387
+ let transformed = anthropic_to_openai(req, "model", BackendProfile::OpenaiGeneric).unwrap();
388
+ assert_eq!(transformed.top_k, None);
389
+ }
390
+
391
+ #[test]
392
+ fn keeps_top_k_for_chutes_profile() {
393
+ let req = sample_request();
394
+ let transformed = anthropic_to_openai(req, "model", BackendProfile::Chutes).unwrap();
395
+ assert_eq!(transformed.top_k, Some(40));
396
+ }
397
+
398
+ #[test]
399
+ fn rejects_assistant_thinking_history() {
400
+ let mut req = sample_request();
401
+ req.messages = vec![Message {
402
+ role: "assistant".to_string(),
403
+ content: MessageContent::Blocks(vec![ContentBlock::Thinking {
404
+ thinking: "hidden".to_string(),
405
+ }]),
406
+ }];
407
+
408
+ let err = anthropic_to_openai(req, "model", BackendProfile::Chutes).unwrap_err();
409
+ assert!(err.to_string().contains("thinking blocks"));
410
+ }
411
+
412
+ #[test]
413
+ fn maps_reasoning_to_thinking_block_for_chutes() {
414
+ let resp = openai::OpenAIResponse {
415
+ id: Some("id1".to_string()),
416
+ model: Some("backend".to_string()),
417
+ choices: vec![openai::Choice {
418
+ message: openai::ChoiceMessage {
419
+ content: Some("answer".to_string()),
420
+ tool_calls: None,
421
+ reasoning_content: Some("chain".to_string()),
422
+ },
423
+ finish_reason: Some("stop".to_string()),
424
+ }],
425
+ usage: openai::Usage {
426
+ prompt_tokens: 10,
427
+ completion_tokens: 5,
428
+ },
429
+ };
430
+
431
+ let out = openai_to_anthropic(resp, "fallback", BackendProfile::Chutes).unwrap();
432
+ match &out.content[0] {
433
+ anthropic::ResponseContent::Thinking { thinking } => assert_eq!(thinking, "chain"),
434
+ other => panic!("expected thinking block, got {other:?}"),
435
+ }
436
+ }
437
+
438
+ #[test]
439
+ fn rejects_reasoning_for_generic_profile() {
440
+ let resp = openai::OpenAIResponse {
441
+ id: Some("id1".to_string()),
442
+ model: Some("backend".to_string()),
443
+ choices: vec![openai::Choice {
444
+ message: openai::ChoiceMessage {
445
+ content: None,
446
+ tool_calls: None,
447
+ reasoning_content: Some("chain".to_string()),
448
+ },
449
+ finish_reason: Some("stop".to_string()),
450
+ }],
451
+ usage: openai::Usage {
452
+ prompt_tokens: 10,
453
+ completion_tokens: 5,
454
+ },
455
+ };
456
+
457
+ let err = openai_to_anthropic(resp, "fallback", BackendProfile::OpenaiGeneric).unwrap_err();
458
+ assert!(err.to_string().contains("reasoning content"));
459
+ }
460
+ }
@@ -0,0 +1,213 @@
1
+ use serde_json::Value;
2
+ use std::env;
3
+ use std::fs;
4
+ use std::net::TcpListener;
5
+ use std::path::PathBuf;
6
+ use std::process::{Child, Command, Stdio};
7
+ use std::thread;
8
+ use std::time::{Duration, Instant};
9
+
10
+ struct TestServer {
11
+ child: Child,
12
+ log_path: PathBuf,
13
+ port: u16,
14
+ }
15
+
16
+ impl Drop for TestServer {
17
+ fn drop(&mut self) {
18
+ let _ = self.child.kill();
19
+ let _ = self.child.wait();
20
+ }
21
+ }
22
+
23
+ fn configured_env(name: &str) -> Option<String> {
24
+ match env::var(name) {
25
+ Ok(value) if !value.trim().is_empty() => Some(value),
26
+ _ => None,
27
+ }
28
+ }
29
+
30
+ fn require_env(name: &str) -> Option<String> {
31
+ match configured_env(name) {
32
+ Some(value) => Some(value),
33
+ None => {
34
+ eprintln!("skipping real backend test: missing {name}");
35
+ None
36
+ }
37
+ }
38
+ }
39
+
40
+ fn reserve_port() -> u16 {
41
+ TcpListener::bind("127.0.0.1:0")
42
+ .expect("bind ephemeral port")
43
+ .local_addr()
44
+ .expect("read local addr")
45
+ .port()
46
+ }
47
+
48
+ fn anthmorph_bin() -> String {
49
+ env::var("CARGO_BIN_EXE_anthmorph").expect("cargo should expose anthmorph bin path")
50
+ }
51
+
52
+ fn start_server(
53
+ backend_profile: &str,
54
+ backend_url: &str,
55
+ model: &str,
56
+ api_key: &str,
57
+ ) -> TestServer {
58
+ let port = reserve_port();
59
+ let log_path = env::temp_dir().join(format!("anthmorph-itest-{backend_profile}-{port}.log"));
60
+ let log = fs::File::create(&log_path).expect("create log file");
61
+ let log_err = log.try_clone().expect("clone log file");
62
+
63
+ let child = Command::new(anthmorph_bin())
64
+ .arg("--port")
65
+ .arg(port.to_string())
66
+ .arg("--backend-profile")
67
+ .arg(backend_profile)
68
+ .arg("--backend-url")
69
+ .arg(backend_url)
70
+ .arg("--model")
71
+ .arg(model)
72
+ .arg("--api-key")
73
+ .arg(api_key)
74
+ .stdout(Stdio::from(log))
75
+ .stderr(Stdio::from(log_err))
76
+ .spawn()
77
+ .expect("spawn anthmorph");
78
+
79
+ let server = TestServer {
80
+ child,
81
+ log_path,
82
+ port,
83
+ };
84
+ wait_until_ready(&server);
85
+ server
86
+ }
87
+
88
+ fn wait_until_ready(server: &TestServer) {
89
+ let deadline = Instant::now() + Duration::from_secs(20);
90
+ while Instant::now() < deadline {
91
+ let status = Command::new("curl")
92
+ .arg("-fsS")
93
+ .arg(format!("http://127.0.0.1:{}/health", server.port))
94
+ .stdout(Stdio::null())
95
+ .stderr(Stdio::null())
96
+ .status()
97
+ .expect("run curl health");
98
+ if status.success() {
99
+ return;
100
+ }
101
+ thread::sleep(Duration::from_millis(250));
102
+ }
103
+
104
+ let log = fs::read_to_string(&server.log_path).unwrap_or_else(|_| "<missing log>".to_string());
105
+ panic!("server did not become ready\n{}", log);
106
+ }
107
+
108
+ fn post_messages(server: &TestServer, payload: &Value) -> (u16, Value) {
109
+ let body_path = env::temp_dir().join(format!("anthmorph-itest-body-{}.json", server.port));
110
+ let payload_text = serde_json::to_string(payload).expect("serialize payload");
111
+
112
+ let output = Command::new("curl")
113
+ .arg("-sS")
114
+ .arg("-o")
115
+ .arg(&body_path)
116
+ .arg("-w")
117
+ .arg("%{http_code}")
118
+ .arg(format!("http://127.0.0.1:{}/v1/messages", server.port))
119
+ .arg("-H")
120
+ .arg("content-type: application/json")
121
+ .arg("-d")
122
+ .arg(payload_text)
123
+ .output()
124
+ .expect("run curl request");
125
+
126
+ let status_text = String::from_utf8(output.stdout).expect("status utf8");
127
+ let status = status_text
128
+ .trim()
129
+ .parse::<u16>()
130
+ .expect("parse status code");
131
+ let body = fs::read_to_string(&body_path).expect("read response body");
132
+ let value: Value = serde_json::from_str(&body).unwrap_or_else(|err| {
133
+ panic!("parse response body as json failed: {err}; status={status}; body={body}")
134
+ });
135
+ (status, value)
136
+ }
137
+
138
+ fn base_payload() -> Value {
139
+ serde_json::json!({
140
+ "model": "claude-sonnet-4",
141
+ "max_tokens": 128,
142
+ "messages": [
143
+ {"role": "user", "content": "Reply with exactly: anthmorph-smoke-ok"}
144
+ ]
145
+ })
146
+ }
147
+
148
+ #[test]
149
+ fn chutes_real_backend_smoke() {
150
+ let Some(api_key) = require_env("CHUTES_API_KEY") else {
151
+ return;
152
+ };
153
+ let server = start_server(
154
+ "chutes",
155
+ &env::var("CHUTES_BASE_URL").unwrap_or_else(|_| "https://llm.chutes.ai/v1".to_string()),
156
+ &env::var("CHUTES_MODEL").unwrap_or_else(|_| "Qwen/Qwen3-Coder-Next-TEE".to_string()),
157
+ &api_key,
158
+ );
159
+
160
+ let (status, response) = post_messages(&server, &base_payload());
161
+ assert_eq!(status, 200, "unexpected status: {response}");
162
+ let text = response["content"][0]["text"]
163
+ .as_str()
164
+ .expect("text response");
165
+ assert_eq!(text.trim(), "anthmorph-smoke-ok");
166
+ }
167
+
168
+ #[test]
169
+ fn minimax_real_backend_smoke() {
170
+ let Some(api_key) = require_env("MINIMAX_API_KEY") else {
171
+ return;
172
+ };
173
+ let server = start_server(
174
+ "openai-generic",
175
+ &env::var("MINIMAX_BASE_URL").unwrap_or_else(|_| "https://api.minimax.io/v1".to_string()),
176
+ &env::var("MINIMAX_MODEL").unwrap_or_else(|_| "MiniMax-M2.5".to_string()),
177
+ &api_key,
178
+ );
179
+
180
+ let (status, response) = post_messages(&server, &base_payload());
181
+ assert_eq!(status, 200, "unexpected status: {response}");
182
+ let text = response["content"][0]["text"]
183
+ .as_str()
184
+ .expect("text response");
185
+ assert!(
186
+ text.contains("anthmorph-smoke-ok"),
187
+ "unexpected text: {text}"
188
+ );
189
+ }
190
+
191
+ #[test]
192
+ fn alibaba_coding_plan_rejected_for_chat_completions() {
193
+ let Some(api_key) = require_env("ALIBABA_CODE_API_KEY") else {
194
+ return;
195
+ };
196
+ let server = start_server(
197
+ "openai-generic",
198
+ &env::var("ALIBABA_BASE_URL")
199
+ .unwrap_or_else(|_| "https://coding-intl.dashscope.aliyuncs.com/v1".to_string()),
200
+ &env::var("ALIBABA_MODEL").unwrap_or_else(|_| "qwen3-coder-plus".to_string()),
201
+ &api_key,
202
+ );
203
+
204
+ let (status, response) = post_messages(&server, &base_payload());
205
+ assert_eq!(status, 502, "unexpected status: {response}");
206
+ let message = response["error"]["message"]
207
+ .as_str()
208
+ .expect("error message");
209
+ assert!(
210
+ message.contains("Coding Plan is currently only available for Coding Agents"),
211
+ "unexpected error message: {message}"
212
+ );
213
+ }