zarz 0.3.1-alpha → 0.3.4-alpha

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/Cargo.lock CHANGED
@@ -2706,7 +2706,7 @@ dependencies = [
2706
2706
 
2707
2707
  [[package]]
2708
2708
  name = "zarzcli"
2709
- version = "0.3.1-ALPHA"
2709
+ version = "0.3.4-ALPHA"
2710
2710
  dependencies = [
2711
2711
  "anyhow",
2712
2712
  "async-trait",
package/Cargo.toml CHANGED
@@ -1,6 +1,6 @@
1
1
  [package]
2
2
  name = "zarzcli"
3
- version = "0.3.1-ALPHA"
3
+ version = "0.3.4-ALPHA"
4
4
  edition = "2024"
5
5
 
6
6
  [dependencies]
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "zarz",
3
- "version": "0.3.1-alpha",
3
+ "version": "0.3.4-alpha",
4
4
  "description": "Fast AI coding assistant for terminal built with Rust",
5
5
  "main": "bin/zarz.js",
6
6
  "bin": {
package/src/executor.rs CHANGED
@@ -5,6 +5,7 @@ use tokio::process::Command;
5
5
 
6
6
  pub struct CommandExecutor;
7
7
 
8
+ #[allow(dead_code)]
8
9
  #[derive(Debug)]
9
10
  pub struct CommandResult {
10
11
  pub stdout: String,
@@ -14,6 +15,7 @@ pub struct CommandResult {
14
15
  }
15
16
 
16
17
  impl CommandExecutor {
18
+ #[allow(dead_code)]
17
19
  pub async fn execute(command: &str) -> Result<CommandResult> {
18
20
  let (shell, flag) = if cfg!(target_os = "windows") {
19
21
  ("cmd", "/C")
package/src/main.rs CHANGED
@@ -2,12 +2,12 @@ mod cli;
2
2
  mod config;
3
3
  mod mcp;
4
4
  mod providers;
5
- mod executor;
6
- mod fs_ops;
7
- mod intelligence;
8
- mod repl;
9
- mod session;
10
- mod conversation_store;
5
+ mod executor;
6
+ mod fs_ops;
7
+ mod intelligence;
8
+ mod repl;
9
+ mod session;
10
+ mod conversation_store;
11
11
 
12
12
  use std::{
13
13
  collections::HashMap,
@@ -109,7 +109,7 @@ async fn run(cli: Cli) -> Result<()> {
109
109
  }
110
110
  }
111
111
 
112
- let tagline_lines = ["v0.3.0-Alpha", "Type /help for available commands, /exit to exit"];
112
+ let tagline_lines = ["v0.3.4-Alpha", "Type /help for available commands, /exit to exit"];
113
113
 
114
114
  for (index, line) in tagline_lines.iter().enumerate() {
115
115
  if index > 0 {
@@ -233,6 +233,8 @@ async fn handle_quick_ask(
233
233
  user_prompt,
234
234
  max_output_tokens: resolve_max_tokens(),
235
235
  temperature: resolve_temperature(),
236
+ messages: None,
237
+ tools: None,
236
238
  };
237
239
 
238
240
  let response = provider.complete(&request).await?;
@@ -305,6 +307,8 @@ async fn handle_ask(args: AskArgs, config: &config::Config) -> Result<()> {
305
307
  user_prompt,
306
308
  max_output_tokens: resolve_max_tokens(),
307
309
  temperature: resolve_temperature(),
310
+ messages: None,
311
+ tools: None,
308
312
  };
309
313
 
310
314
  let response = provider.complete(&request).await?;
@@ -378,6 +382,8 @@ async fn handle_rewrite(args: RewriteArgs, config: &config::Config) -> Result<()
378
382
  user_prompt,
379
383
  max_output_tokens: resolve_max_tokens(),
380
384
  temperature: resolve_rewrite_temperature(),
385
+ messages: None,
386
+ tools: None,
381
387
  };
382
388
 
383
389
  let response = provider.complete(&request).await?;
@@ -1,4 +1,4 @@
1
- use anyhow::{anyhow, Context, Result};
1
+ use anyhow::{Context, Result};
2
2
  use bytes::Bytes;
3
3
  use futures::stream::StreamExt;
4
4
  use reqwest::Client;
@@ -70,16 +70,25 @@ impl AnthropicClient {
70
70
  serde_json::Value::String(system_prompt.clone()),
71
71
  );
72
72
  }
73
- payload.insert(
74
- "messages".to_string(),
75
- json!([{
76
- "role": "user",
77
- "content": [{
78
- "type": "text",
79
- "text": request.user_prompt
80
- }]
81
- }]),
82
- );
73
+
74
+ if let Some(tools) = &request.tools {
75
+ payload.insert("tools".to_string(), serde_json::Value::Array(tools.clone()));
76
+ }
77
+
78
+ if let Some(messages) = &request.messages {
79
+ payload.insert("messages".to_string(), serde_json::Value::Array(messages.clone()));
80
+ } else {
81
+ payload.insert(
82
+ "messages".to_string(),
83
+ json!([{
84
+ "role": "user",
85
+ "content": [{
86
+ "type": "text",
87
+ "text": request.user_prompt
88
+ }]
89
+ }]),
90
+ );
91
+ }
83
92
 
84
93
  let response = self
85
94
  .http
@@ -96,14 +105,26 @@ impl AnthropicClient {
96
105
  .json()
97
106
  .await
98
107
  .context("Failed to decode Anthropic response")?;
99
- let text = parsed
100
- .content
101
- .into_iter()
102
- .find_map(|block| match block {
103
- AnthropicResponseBlock::Text { text, .. } => Some(text),
104
- })
105
- .ok_or_else(|| anyhow!("Anthropic response did not include text content"))?;
106
- Ok(CompletionResponse { text })
108
+
109
+ let mut text = String::new();
110
+ let mut tool_calls = Vec::new();
111
+
112
+ for block in parsed.content {
113
+ match block {
114
+ AnthropicResponseBlock::Text { text: t } => {
115
+ text.push_str(&t);
116
+ }
117
+ AnthropicResponseBlock::ToolUse { id, name, input } => {
118
+ tool_calls.push(super::ToolCall { id, name, input });
119
+ }
120
+ }
121
+ }
122
+
123
+ Ok(CompletionResponse {
124
+ text,
125
+ tool_calls,
126
+ stop_reason: parsed.stop_reason,
127
+ })
107
128
  }
108
129
 
109
130
  #[allow(dead_code)]
@@ -204,11 +225,18 @@ struct StreamDelta {
204
225
  #[derive(Debug, Deserialize)]
205
226
  struct AnthropicResponse {
206
227
  content: Vec<AnthropicResponseBlock>,
228
+ stop_reason: Option<String>,
207
229
  }
208
230
 
209
- #[derive(Debug, Deserialize)]
231
+ #[derive(Debug, Deserialize, Clone)]
210
232
  #[serde(tag = "type")]
211
- enum AnthropicResponseBlock {
233
+ pub enum AnthropicResponseBlock {
212
234
  #[serde(rename = "text")]
213
235
  Text { text: String },
236
+ #[serde(rename = "tool_use")]
237
+ ToolUse {
238
+ id: String,
239
+ name: String,
240
+ input: serde_json::Value,
241
+ },
214
242
  }
@@ -51,24 +51,43 @@ impl GlmClient {
51
51
  }
52
52
 
53
53
  pub async fn complete(&self, request: &CompletionRequest) -> Result<CompletionResponse> {
54
- let mut messages = Vec::new();
55
- if let Some(system) = &request.system_prompt {
54
+ let messages = if let Some(msgs) = &request.messages {
55
+ msgs.clone()
56
+ } else {
57
+ let mut messages = Vec::new();
58
+ if let Some(system) = &request.system_prompt {
59
+ messages.push(json!({
60
+ "role": "system",
61
+ "content": system,
62
+ }));
63
+ }
56
64
  messages.push(json!({
57
- "role": "system",
58
- "content": system,
65
+ "role": "user",
66
+ "content": request.user_prompt,
59
67
  }));
60
- }
61
- messages.push(json!({
62
- "role": "user",
63
- "content": request.user_prompt,
64
- }));
68
+ messages
69
+ };
65
70
 
66
- let payload = json!({
71
+ let mut payload = json!({
67
72
  "model": request.model,
68
73
  "max_tokens": request.max_output_tokens,
69
74
  "messages": messages,
70
75
  });
71
76
 
77
+ if let Some(tools) = &request.tools {
78
+ let glm_tools: Vec<_> = tools.iter().map(|tool| {
79
+ json!({
80
+ "type": "function",
81
+ "function": {
82
+ "name": tool["name"],
83
+ "description": tool["description"],
84
+ "parameters": tool["input_schema"]
85
+ }
86
+ })
87
+ }).collect();
88
+ payload["tools"] = json!(glm_tools);
89
+ }
90
+
72
91
  // Construct full endpoint URL
73
92
  let full_url = format!("{}/chat/completions", self.endpoint);
74
93
 
@@ -95,13 +114,27 @@ impl GlmClient {
95
114
  .await
96
115
  .context("Failed to decode GLM response")?;
97
116
 
98
- let text = parsed
99
- .choices
100
- .into_iter()
101
- .find_map(|choice| choice.message.content)
102
- .ok_or_else(|| anyhow!("GLM response did not include content"))?;
117
+ let first_choice = parsed.choices.into_iter().next()
118
+ .ok_or_else(|| anyhow!("GLM response did not include any choices"))?;
119
+
120
+ let text = first_choice.message.content.unwrap_or_default();
121
+ let mut tool_calls = Vec::new();
103
122
 
104
- Ok(CompletionResponse { text })
123
+ if let Some(calls) = first_choice.message.tool_calls {
124
+ for call in calls {
125
+ tool_calls.push(super::ToolCall {
126
+ id: call.id,
127
+ name: call.function.name,
128
+ input: call.function.arguments,
129
+ });
130
+ }
131
+ }
132
+
133
+ Ok(CompletionResponse {
134
+ text,
135
+ tool_calls,
136
+ stop_reason: first_choice.finish_reason,
137
+ })
105
138
  }
106
139
 
107
140
  #[allow(dead_code)]
@@ -201,9 +234,26 @@ struct GlmResponse {
201
234
  #[derive(Debug, Deserialize)]
202
235
  struct GlmChoice {
203
236
  message: GlmMessage,
237
+ finish_reason: Option<String>,
204
238
  }
205
239
 
206
240
  #[derive(Debug, Deserialize)]
207
241
  struct GlmMessage {
208
242
  content: Option<String>,
243
+ tool_calls: Option<Vec<GlmToolCall>>,
244
+ }
245
+
246
+ #[derive(Debug, Deserialize)]
247
+ struct GlmToolCall {
248
+ id: String,
249
+ #[serde(rename = "type")]
250
+ #[allow(dead_code)]
251
+ call_type: String,
252
+ function: GlmFunction,
253
+ }
254
+
255
+ #[derive(Debug, Deserialize)]
256
+ struct GlmFunction {
257
+ name: String,
258
+ arguments: serde_json::Value,
209
259
  }
@@ -2,6 +2,7 @@ use anyhow::Result;
2
2
  use async_trait::async_trait;
3
3
  use futures::Stream;
4
4
  use std::pin::Pin;
5
+ use serde_json::Value;
5
6
 
6
7
  use crate::cli::Provider;
7
8
 
@@ -16,11 +17,23 @@ pub struct CompletionRequest {
16
17
  pub user_prompt: String,
17
18
  pub max_output_tokens: u32,
18
19
  pub temperature: f32,
20
+ pub messages: Option<Vec<Value>>,
21
+ pub tools: Option<Vec<Value>>,
19
22
  }
20
23
 
21
24
  #[derive(Debug, Clone)]
22
25
  pub struct CompletionResponse {
23
26
  pub text: String,
27
+ pub tool_calls: Vec<ToolCall>,
28
+ #[allow(dead_code)]
29
+ pub stop_reason: Option<String>,
30
+ }
31
+
32
+ #[derive(Debug, Clone)]
33
+ pub struct ToolCall {
34
+ pub id: String,
35
+ pub name: String,
36
+ pub input: serde_json::Value,
24
37
  }
25
38
 
26
39
  #[allow(dead_code)]
@@ -50,25 +50,44 @@ impl OpenAiClient {
50
50
  }
51
51
 
52
52
  pub async fn complete(&self, request: &CompletionRequest) -> Result<CompletionResponse> {
53
- let mut messages = Vec::new();
54
- if let Some(system) = &request.system_prompt {
53
+ let messages = if let Some(msgs) = &request.messages {
54
+ msgs.clone()
55
+ } else {
56
+ let mut messages = Vec::new();
57
+ if let Some(system) = &request.system_prompt {
58
+ messages.push(json!({
59
+ "role": "system",
60
+ "content": system,
61
+ }));
62
+ }
55
63
  messages.push(json!({
56
- "role": "system",
57
- "content": system,
64
+ "role": "user",
65
+ "content": request.user_prompt,
58
66
  }));
59
- }
60
- messages.push(json!({
61
- "role": "user",
62
- "content": request.user_prompt,
63
- }));
67
+ messages
68
+ };
64
69
 
65
- let payload = json!({
70
+ let mut payload = json!({
66
71
  "model": request.model,
67
72
  "max_tokens": request.max_output_tokens,
68
73
  "temperature": request.temperature,
69
74
  "messages": messages,
70
75
  });
71
76
 
77
+ if let Some(tools) = &request.tools {
78
+ let openai_tools: Vec<_> = tools.iter().map(|tool| {
79
+ json!({
80
+ "type": "function",
81
+ "function": {
82
+ "name": tool["name"],
83
+ "description": tool["description"],
84
+ "parameters": tool["input_schema"]
85
+ }
86
+ })
87
+ }).collect();
88
+ payload["tools"] = json!(openai_tools);
89
+ }
90
+
72
91
  let response = self
73
92
  .http
74
93
  .post(&self.endpoint)
@@ -85,13 +104,27 @@ impl OpenAiClient {
85
104
  .await
86
105
  .context("Failed to decode OpenAI response")?;
87
106
 
88
- let text = parsed
89
- .choices
90
- .into_iter()
91
- .find_map(|choice| choice.message.content)
92
- .ok_or_else(|| anyhow!("OpenAI response did not include content"))?;
107
+ let first_choice = parsed.choices.into_iter().next()
108
+ .ok_or_else(|| anyhow!("OpenAI response did not include any choices"))?;
109
+
110
+ let text = first_choice.message.content.unwrap_or_default();
111
+ let mut tool_calls = Vec::new();
93
112
 
94
- Ok(CompletionResponse { text })
113
+ if let Some(calls) = first_choice.message.tool_calls {
114
+ for call in calls {
115
+ tool_calls.push(super::ToolCall {
116
+ id: call.id,
117
+ name: call.function.name,
118
+ input: call.function.arguments,
119
+ });
120
+ }
121
+ }
122
+
123
+ Ok(CompletionResponse {
124
+ text,
125
+ tool_calls,
126
+ stop_reason: first_choice.finish_reason,
127
+ })
95
128
  }
96
129
 
97
130
  #[allow(dead_code)]
@@ -189,9 +222,26 @@ struct OpenAiResponse {
189
222
  #[derive(Debug, Deserialize)]
190
223
  struct OpenAiChoice {
191
224
  message: OpenAiMessage,
225
+ finish_reason: Option<String>,
192
226
  }
193
227
 
194
228
  #[derive(Debug, Deserialize)]
195
229
  struct OpenAiMessage {
196
230
  content: Option<String>,
231
+ tool_calls: Option<Vec<OpenAiToolCall>>,
232
+ }
233
+
234
+ #[derive(Debug, Deserialize)]
235
+ struct OpenAiToolCall {
236
+ id: String,
237
+ #[serde(rename = "type")]
238
+ #[allow(dead_code)]
239
+ call_type: String,
240
+ function: OpenAiFunction,
241
+ }
242
+
243
+ #[derive(Debug, Deserialize)]
244
+ struct OpenAiFunction {
245
+ name: String,
246
+ arguments: serde_json::Value,
197
247
  }