@mmmbuto/anthmorph 0.1.1 → 0.1.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/Cargo.lock CHANGED
@@ -4,7 +4,7 @@ version = 4
4
4
 
5
5
  [[package]]
6
6
  name = "AnthMorph"
7
- version = "0.1.1"
7
+ version = "0.1.3"
8
8
  dependencies = [
9
9
  "anyhow",
10
10
  "async-stream",
package/Cargo.toml CHANGED
@@ -1,6 +1,6 @@
1
1
  [package]
2
2
  name = "AnthMorph"
3
- version = "0.1.1"
3
+ version = "0.1.3"
4
4
  edition = "2021"
5
5
  description = "Anthropic to OpenAI-compatible proxy"
6
6
  license = "MIT"
package/README.md CHANGED
@@ -1,6 +1,6 @@
1
1
  # AnthMorph
2
2
 
3
- [![Status](https://img.shields.io/badge/Status-0.1.1-blue.svg)](#project-status)
3
+ [![Status](https://img.shields.io/badge/Status-0.1.2-blue.svg)](#project-status)
4
4
  [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](LICENSE)
5
5
  [![Rust](https://img.shields.io/badge/Rust-1.94%2B-orange.svg)](https://www.rust-lang.org)
6
6
  [![Target](https://img.shields.io/badge/Target-Termux%20%2F%20Linux-green.svg)](https://termux.dev)
@@ -19,7 +19,7 @@ Core capabilities:
19
19
 
20
20
  ## Project Status
21
21
 
22
- - Current line: `0.1.1`
22
+ - Current line: `0.1.2`
23
23
  - Primary target: `chutes.ai`
24
24
  - Secondary target: generic OpenAI-compatible endpoints
25
25
  - Tested locally against Chutes, MiniMax, and Alibaba Coding Plan rejection handling
@@ -83,7 +83,7 @@ anthmorphctl stop
83
83
  Direct binary usage is also available:
84
84
 
85
85
  ```bash
86
- anthmorph --port 3107 --backend-profile chutes --backend-url https://llm.chutes.ai/v1 --model Qwen/Qwen3-Coder-Next-TEE --api-key "$CHUTES_API_KEY"
86
+ anthmorph --port 3107 --backend-profile chutes --backend-url https://llm.chutes.ai/v1 --model Qwen/Qwen3.5-397B-A17B-TEE,zai-org/GLM-5-TEE,deepseek-ai/DeepSeek-V3.2-TEE --api-key "$CHUTES_API_KEY"
87
87
  ```
88
88
 
89
89
  ## Architecture
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@mmmbuto/anthmorph",
3
- "version": "0.1.1",
3
+ "version": "0.1.3",
4
4
  "description": "Chutes-first Anthropic /v1/messages proxy for Chutes and OpenAI-compatible backends",
5
5
  "license": "MIT",
6
6
  "author": "DioNanos <noreply@github.com>",
Binary file
@@ -216,7 +216,7 @@ init_cmd() {
216
216
  chutes)
217
217
  BACKEND_PROFILE=chutes
218
218
  BACKEND_URL=https://llm.chutes.ai/v1
219
- MODEL=Qwen/Qwen3-Coder-Next-TEE
219
+ MODEL=Qwen/Qwen3.5-397B-A17B-TEE,zai-org/GLM-5-TEE,deepseek-ai/DeepSeek-V3.2-TEE
220
220
  API_KEY_ENV=CHUTES_API_KEY
221
221
  ;;
222
222
  minimax)
@@ -11,7 +11,7 @@ case "$BACKEND" in
11
11
  chutes)
12
12
  PROFILE=chutes
13
13
  BACKEND_URL=${CHUTES_BASE_URL:-https://llm.chutes.ai/v1}
14
- MODEL=${CHUTES_MODEL:-Qwen/Qwen3-Coder-Next-TEE}
14
+ MODEL=${CHUTES_MODEL:-Qwen/Qwen3.5-397B-A17B-TEE,zai-org/GLM-5-TEE,deepseek-ai/DeepSeek-V3.2-TEE}
15
15
  API_KEY=${CHUTES_API_KEY:?CHUTES_API_KEY is required}
16
16
  ;;
17
17
  alibaba)
@@ -236,19 +236,28 @@ pub struct MessageStartData {
236
236
  #[serde(rename = "type")]
237
237
  pub message_type: String,
238
238
  pub role: String,
239
+ pub content: Vec<serde_json::Value>,
239
240
  pub model: String,
241
+ #[serde(rename = "stop_reason")]
242
+ pub stop_reason: Option<serde_json::Value>,
243
+ #[serde(rename = "stop_sequence")]
244
+ pub stop_sequence: Option<serde_json::Value>,
240
245
  pub usage: Usage,
241
246
  }
242
247
 
243
248
  #[derive(Debug, Clone, Serialize)]
244
- #[serde(tag = "type", content = "content_block")]
249
+ #[serde(tag = "type")]
245
250
  pub enum ContentBlockStartData {
246
251
  #[serde(rename = "text")]
247
252
  Text { text: String },
248
253
  #[serde(rename = "thinking")]
249
254
  Thinking { thinking: String },
250
255
  #[serde(rename = "tool_use")]
251
- ToolUse { id: String, name: String },
256
+ ToolUse {
257
+ id: String,
258
+ name: String,
259
+ input: serde_json::Value,
260
+ },
252
261
  }
253
262
 
254
263
  #[derive(Debug, Clone, Serialize)]
package/src/proxy.rs CHANGED
@@ -255,9 +255,12 @@ fn create_sse_stream(
255
255
  id: message_id.clone().unwrap_or_else(generate_message_id),
256
256
  message_type: "message".to_string(),
257
257
  role: "assistant".to_string(),
258
+ content: vec![],
258
259
  model: current_model
259
260
  .clone()
260
261
  .unwrap_or_else(|| fallback_model.clone()),
262
+ stop_reason: None,
263
+ stop_sequence: None,
261
264
  usage: anthropic::Usage {
262
265
  input_tokens: 0,
263
266
  output_tokens: 0,
@@ -413,8 +416,10 @@ impl Config {
413
416
  .ok()
414
417
  .and_then(|v| v.parse().ok())
415
418
  .unwrap_or(BackendProfile::Chutes),
416
- model: std::env::var("ANTHMORPH_MODEL")
417
- .unwrap_or_else(|_| "Qwen/Qwen3-Coder-Next-TEE".to_string()),
419
+ model: std::env::var("ANTHMORPH_MODEL").unwrap_or_else(|_| {
420
+ "Qwen/Qwen3.5-397B-A17B-TEE,zai-org/GLM-5-TEE,deepseek-ai/DeepSeek-V3.2-TEE"
421
+ .to_string()
422
+ }),
418
423
  reasoning_model: std::env::var("ANTHMORPH_REASONING_MODEL").ok(),
419
424
  api_key: std::env::var("ANTHMORPH_API_KEY").ok(),
420
425
  ingress_api_key: std::env::var("ANTHMORPH_INGRESS_API_KEY").ok(),
@@ -554,7 +559,11 @@ fn transition_to_tool(
554
559
  *active_block = Some(ActiveBlock::ToolUse(tool_index, index));
555
560
  events.push(start_block_sse(
556
561
  index,
557
- anthropic::ContentBlockStartData::ToolUse { id, name },
562
+ anthropic::ContentBlockStartData::ToolUse {
563
+ id,
564
+ name,
565
+ input: json!({}),
566
+ },
558
567
  ));
559
568
  (index, events)
560
569
  }
@@ -734,11 +743,67 @@ mod tests {
734
743
 
735
744
  let joined = output.join("");
736
745
  assert!(joined.contains("\"type\":\"tool_use\""));
746
+ assert!(joined.contains("\"input\":{}"));
737
747
  assert!(joined.contains("\"partial_json\":\"{\\\"loc\""));
738
748
  assert!(joined.contains("\"partial_json\":\"ation"));
739
749
  assert_eq!(joined.matches("event: content_block_start").count(), 1);
740
750
  }
741
751
 
752
+ #[test]
753
+ fn message_start_sse_includes_required_anthropic_fields() {
754
+ let event = anthropic::StreamEvent::MessageStart {
755
+ message: anthropic::MessageStartData {
756
+ id: "msg_test".to_string(),
757
+ message_type: "message".to_string(),
758
+ role: "assistant".to_string(),
759
+ content: vec![],
760
+ model: "glm-5.1".to_string(),
761
+ stop_reason: None,
762
+ stop_sequence: None,
763
+ usage: anthropic::Usage {
764
+ input_tokens: 0,
765
+ output_tokens: 0,
766
+ },
767
+ },
768
+ };
769
+
770
+ let serialized = sse_event("message_start", &event);
771
+ let payload = serialized
772
+ .lines()
773
+ .find_map(|line| line.strip_prefix("data: "))
774
+ .expect("message_start data line");
775
+ let parsed: serde_json::Value = serde_json::from_str(payload).expect("valid json");
776
+
777
+ assert_eq!(parsed["message"]["type"], "message");
778
+ assert_eq!(parsed["message"]["role"], "assistant");
779
+ assert_eq!(parsed["message"]["content"], json!([]));
780
+ assert!(parsed["message"]["stop_reason"].is_null());
781
+ assert!(parsed["message"]["stop_sequence"].is_null());
782
+ }
783
+
784
+ #[test]
785
+ fn content_block_start_tool_use_has_flat_anthropic_shape() {
786
+ let payload = start_block_sse(
787
+ 0,
788
+ anthropic::ContentBlockStartData::ToolUse {
789
+ id: "toolu_123".to_string(),
790
+ name: "mcp__memory__memory_read".to_string(),
791
+ input: json!({}),
792
+ },
793
+ )
794
+ .lines()
795
+ .find_map(|line| line.strip_prefix("data: "))
796
+ .expect("content_block_start data line")
797
+ .to_string();
798
+
799
+ let parsed: serde_json::Value = serde_json::from_str(&payload).expect("valid json");
800
+ assert_eq!(parsed["content_block"]["type"], "tool_use");
801
+ assert_eq!(parsed["content_block"]["id"], "toolu_123");
802
+ assert_eq!(parsed["content_block"]["name"], "mcp__memory__memory_read");
803
+ assert_eq!(parsed["content_block"]["input"], json!({}));
804
+ assert!(parsed["content_block"].get("content_block").is_none());
805
+ }
806
+
742
807
  #[test]
743
808
  fn extracts_multi_line_sse_data() {
744
809
  let block = "event: message\ndata: first\ndata: second\n";
package/src/transform.rs CHANGED
@@ -61,27 +61,23 @@ pub fn anthropic_to_openai(
61
61
  let mut openai_messages = Vec::new();
62
62
 
63
63
  if let Some(system) = req.system {
64
- match system {
65
- anthropic::SystemPrompt::Single(text) => {
66
- openai_messages.push(openai::Message {
67
- role: "system".to_string(),
68
- content: Some(openai::MessageContent::Text(text)),
69
- name: None,
70
- tool_calls: None,
71
- tool_call_id: None,
72
- });
73
- }
74
- anthropic::SystemPrompt::Multiple(messages) => {
75
- for msg in messages {
76
- openai_messages.push(openai::Message {
77
- role: "system".to_string(),
78
- content: Some(openai::MessageContent::Text(msg.text)),
79
- name: None,
80
- tool_calls: None,
81
- tool_call_id: None,
82
- });
83
- }
84
- }
64
+ let system_text = match system {
65
+ anthropic::SystemPrompt::Single(text) => text,
66
+ anthropic::SystemPrompt::Multiple(messages) => messages
67
+ .into_iter()
68
+ .map(|msg| msg.text)
69
+ .collect::<Vec<_>>()
70
+ .join("\n\n"),
71
+ };
72
+
73
+ if !system_text.is_empty() {
74
+ openai_messages.push(openai::Message {
75
+ role: "system".to_string(),
76
+ content: Some(openai::MessageContent::Text(system_text)),
77
+ name: None,
78
+ tool_calls: None,
79
+ tool_call_id: None,
80
+ });
85
81
  }
86
82
  }
87
83
 
@@ -354,7 +350,7 @@ pub fn map_stop_reason(finish_reason: Option<&str>) -> Option<String> {
354
350
  mod tests {
355
351
  use super::*;
356
352
  use crate::models::anthropic::{
357
- AnthropicRequest, ContentBlock, Message, MessageContent, SystemPrompt, Tool,
353
+ AnthropicRequest, ContentBlock, Message, MessageContent, SystemMessage, SystemPrompt, Tool,
358
354
  };
359
355
 
360
356
  fn sample_request() -> AnthropicRequest {
@@ -409,6 +405,41 @@ mod tests {
409
405
  assert!(err.to_string().contains("thinking blocks"));
410
406
  }
411
407
 
408
+ #[test]
409
+ fn collapses_multiple_system_prompts_into_single_openai_message() {
410
+ let req = AnthropicRequest {
411
+ model: "claude".to_string(),
412
+ messages: vec![Message {
413
+ role: "user".to_string(),
414
+ content: MessageContent::Text("hi".to_string()),
415
+ }],
416
+ system: Some(SystemPrompt::Multiple(vec![
417
+ SystemMessage {
418
+ text: "one".to_string(),
419
+ },
420
+ SystemMessage {
421
+ text: "two".to_string(),
422
+ },
423
+ ])),
424
+ max_tokens: 64,
425
+ temperature: None,
426
+ top_p: None,
427
+ top_k: None,
428
+ stop_sequences: None,
429
+ stream: None,
430
+ tools: None,
431
+ extra: Default::default(),
432
+ };
433
+
434
+ let out = anthropic_to_openai(req, "model", BackendProfile::Chutes).unwrap();
435
+ assert_eq!(out.messages[0].role, "system");
436
+ match out.messages[0].content.as_ref().unwrap() {
437
+ openai::MessageContent::Text(text) => assert_eq!(text, "one\n\ntwo"),
438
+ other => panic!("expected text system prompt, got {other:?}"),
439
+ }
440
+ assert_eq!(out.messages[1].role, "user");
441
+ }
442
+
412
443
  #[test]
413
444
  fn maps_reasoning_to_thinking_block_for_chutes() {
414
445
  let resp = openai::OpenAIResponse {
@@ -153,7 +153,7 @@ fn chutes_real_backend_smoke() {
153
153
  let server = start_server(
154
154
  "chutes",
155
155
  &env::var("CHUTES_BASE_URL").unwrap_or_else(|_| "https://llm.chutes.ai/v1".to_string()),
156
- &env::var("CHUTES_MODEL").unwrap_or_else(|_| "Qwen/Qwen3-Coder-Next-TEE".to_string()),
156
+ &env::var("CHUTES_MODEL").unwrap_or_else(|_| "deepseek-ai/DeepSeek-V3.2-TEE".to_string()),
157
157
  &api_key,
158
158
  );
159
159