zarz 0.3.4-alpha → 0.5.0-alpha

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,103 +0,0 @@
1
- use anyhow::Result;
2
- use async_trait::async_trait;
3
- use futures::Stream;
4
- use std::pin::Pin;
5
- use serde_json::Value;
6
-
7
- use crate::cli::Provider;
8
-
9
- mod anthropic;
10
- mod openai;
11
- mod glm;
12
-
13
- #[derive(Debug, Clone)]
14
- pub struct CompletionRequest {
15
- pub model: String,
16
- pub system_prompt: Option<String>,
17
- pub user_prompt: String,
18
- pub max_output_tokens: u32,
19
- pub temperature: f32,
20
- pub messages: Option<Vec<Value>>,
21
- pub tools: Option<Vec<Value>>,
22
- }
23
-
24
- #[derive(Debug, Clone)]
25
- pub struct CompletionResponse {
26
- pub text: String,
27
- pub tool_calls: Vec<ToolCall>,
28
- #[allow(dead_code)]
29
- pub stop_reason: Option<String>,
30
- }
31
-
32
- #[derive(Debug, Clone)]
33
- pub struct ToolCall {
34
- pub id: String,
35
- pub name: String,
36
- pub input: serde_json::Value,
37
- }
38
-
39
- #[allow(dead_code)]
40
- pub type StreamChunk = Result<String>;
41
- #[allow(dead_code)]
42
- pub type CompletionStream = Pin<Box<dyn Stream<Item = StreamChunk> + Send>>;
43
-
44
- #[async_trait]
45
- pub trait CompletionProvider: Send + Sync {
46
- async fn complete(&self, request: &CompletionRequest) -> Result<CompletionResponse>;
47
- #[allow(dead_code)]
48
- async fn complete_stream(&self, request: &CompletionRequest) -> Result<CompletionStream>;
49
- }
50
-
51
- pub enum ProviderClient {
52
- Anthropic(anthropic::AnthropicClient),
53
- OpenAi(openai::OpenAiClient),
54
- Glm(glm::GlmClient),
55
- }
56
-
57
- impl ProviderClient {
58
- pub fn new(
59
- provider: Provider,
60
- api_key: Option<String>,
61
- endpoint_override: Option<String>,
62
- timeout_override: Option<u64>,
63
- ) -> Result<Self> {
64
- match provider {
65
- Provider::Anthropic => Ok(Self::Anthropic(
66
- anthropic::AnthropicClient::from_env(api_key, endpoint_override, timeout_override)?,
67
- )),
68
- Provider::OpenAi => Ok(Self::OpenAi(
69
- openai::OpenAiClient::from_env(api_key, endpoint_override, timeout_override)?,
70
- )),
71
- Provider::Glm => Ok(Self::Glm(
72
- glm::GlmClient::from_env(api_key, endpoint_override, timeout_override)?,
73
- )),
74
- }
75
- }
76
-
77
- pub fn name(&self) -> &'static str {
78
- match self {
79
- ProviderClient::Anthropic(_) => "anthropic",
80
- ProviderClient::OpenAi(_) => "openai",
81
- ProviderClient::Glm(_) => "glm",
82
- }
83
- }
84
- }
85
-
86
- #[async_trait]
87
- impl CompletionProvider for ProviderClient {
88
- async fn complete(&self, request: &CompletionRequest) -> Result<CompletionResponse> {
89
- match self {
90
- ProviderClient::Anthropic(client) => client.complete(request).await,
91
- ProviderClient::OpenAi(client) => client.complete(request).await,
92
- ProviderClient::Glm(client) => client.complete(request).await,
93
- }
94
- }
95
-
96
- async fn complete_stream(&self, request: &CompletionRequest) -> Result<CompletionStream> {
97
- match self {
98
- ProviderClient::Anthropic(client) => client.complete_stream(request).await,
99
- ProviderClient::OpenAi(client) => client.complete_stream(request).await,
100
- ProviderClient::Glm(client) => client.complete_stream(request).await,
101
- }
102
- }
103
- }
@@ -1,247 +0,0 @@
1
- use anyhow::{anyhow, Context, Result};
2
- use bytes::Bytes;
3
- use futures::stream::StreamExt;
4
- use reqwest::Client;
5
- use serde::Deserialize;
6
- use serde_json::json;
7
-
8
- use super::{CompletionRequest, CompletionResponse, CompletionStream};
9
-
10
- const DEFAULT_ENDPOINT: &str = "https://api.openai.com/v1/chat/completions";
11
-
12
- pub struct OpenAiClient {
13
- http: Client,
14
- endpoint: String,
15
- api_key: String,
16
- }
17
-
18
- impl OpenAiClient {
19
- pub fn from_env(
20
- api_key_override: Option<String>,
21
- endpoint_override: Option<String>,
22
- timeout_override: Option<u64>,
23
- ) -> Result<Self> {
24
- let api_key = api_key_override
25
- .or_else(|| std::env::var("OPENAI_API_KEY").ok())
26
- .ok_or_else(|| anyhow::anyhow!("OPENAI_API_KEY is required. Please set it in ~/.zarz/config.toml or as an environment variable"))?;
27
- let endpoint = endpoint_override
28
- .or_else(|| std::env::var("OPENAI_API_URL").ok())
29
- .unwrap_or_else(|| DEFAULT_ENDPOINT.to_string());
30
-
31
- let timeout_secs = timeout_override
32
- .or_else(|| {
33
- std::env::var("OPENAI_TIMEOUT_SECS")
34
- .ok()
35
- .and_then(|raw| raw.parse::<u64>().ok())
36
- })
37
- .unwrap_or(120);
38
-
39
- let http = Client::builder()
40
- .user_agent("zarz-cli/0.1")
41
- .timeout(std::time::Duration::from_secs(timeout_secs))
42
- .build()
43
- .context("Failed to build HTTP client for OpenAI")?;
44
-
45
- Ok(Self {
46
- http,
47
- endpoint,
48
- api_key,
49
- })
50
- }
51
-
52
- pub async fn complete(&self, request: &CompletionRequest) -> Result<CompletionResponse> {
53
- let messages = if let Some(msgs) = &request.messages {
54
- msgs.clone()
55
- } else {
56
- let mut messages = Vec::new();
57
- if let Some(system) = &request.system_prompt {
58
- messages.push(json!({
59
- "role": "system",
60
- "content": system,
61
- }));
62
- }
63
- messages.push(json!({
64
- "role": "user",
65
- "content": request.user_prompt,
66
- }));
67
- messages
68
- };
69
-
70
- let mut payload = json!({
71
- "model": request.model,
72
- "max_tokens": request.max_output_tokens,
73
- "temperature": request.temperature,
74
- "messages": messages,
75
- });
76
-
77
- if let Some(tools) = &request.tools {
78
- let openai_tools: Vec<_> = tools.iter().map(|tool| {
79
- json!({
80
- "type": "function",
81
- "function": {
82
- "name": tool["name"],
83
- "description": tool["description"],
84
- "parameters": tool["input_schema"]
85
- }
86
- })
87
- }).collect();
88
- payload["tools"] = json!(openai_tools);
89
- }
90
-
91
- let response = self
92
- .http
93
- .post(&self.endpoint)
94
- .bearer_auth(&self.api_key)
95
- .json(&payload)
96
- .send()
97
- .await
98
- .context("OpenAI request failed")?;
99
-
100
- let response = response.error_for_status().context("OpenAI returned an error status")?;
101
-
102
- let parsed: OpenAiResponse = response
103
- .json()
104
- .await
105
- .context("Failed to decode OpenAI response")?;
106
-
107
- let first_choice = parsed.choices.into_iter().next()
108
- .ok_or_else(|| anyhow!("OpenAI response did not include any choices"))?;
109
-
110
- let text = first_choice.message.content.unwrap_or_default();
111
- let mut tool_calls = Vec::new();
112
-
113
- if let Some(calls) = first_choice.message.tool_calls {
114
- for call in calls {
115
- tool_calls.push(super::ToolCall {
116
- id: call.id,
117
- name: call.function.name,
118
- input: call.function.arguments,
119
- });
120
- }
121
- }
122
-
123
- Ok(CompletionResponse {
124
- text,
125
- tool_calls,
126
- stop_reason: first_choice.finish_reason,
127
- })
128
- }
129
-
130
- #[allow(dead_code)]
131
- pub async fn complete_stream(&self, request: &CompletionRequest) -> Result<CompletionStream> {
132
- let mut messages = Vec::new();
133
- if let Some(system) = &request.system_prompt {
134
- messages.push(json!({
135
- "role": "system",
136
- "content": system,
137
- }));
138
- }
139
- messages.push(json!({
140
- "role": "user",
141
- "content": request.user_prompt,
142
- }));
143
-
144
- let payload = json!({
145
- "model": request.model,
146
- "max_tokens": request.max_output_tokens,
147
- "temperature": request.temperature,
148
- "messages": messages,
149
- "stream": true,
150
- });
151
-
152
- let response = self
153
- .http
154
- .post(&self.endpoint)
155
- .bearer_auth(&self.api_key)
156
- .json(&payload)
157
- .send()
158
- .await
159
- .context("OpenAI streaming request failed")?;
160
-
161
- let response = response
162
- .error_for_status()
163
- .context("OpenAI returned an error status")?;
164
-
165
- let stream = response.bytes_stream();
166
- let text_stream = stream.map(|result| {
167
- let bytes = result?;
168
- parse_openai_sse_chunk(&bytes)
169
- });
170
-
171
- Ok(Box::pin(text_stream))
172
- }
173
- }
174
-
175
- #[allow(dead_code)]
176
- fn parse_openai_sse_chunk(bytes: &Bytes) -> Result<String> {
177
- let text = String::from_utf8_lossy(bytes);
178
- let mut result = String::new();
179
-
180
- for line in text.lines() {
181
- if let Some(data) = line.strip_prefix("data: ") {
182
- if data == "[DONE]" {
183
- break;
184
- }
185
-
186
- if let Ok(chunk) = serde_json::from_str::<StreamChunk>(data) {
187
- if let Some(choice) = chunk.choices.first() {
188
- if let Some(content) = &choice.delta.content {
189
- result.push_str(content);
190
- }
191
- }
192
- }
193
- }
194
- }
195
-
196
- Ok(result)
197
- }
198
-
199
- #[allow(dead_code)]
200
- #[derive(Debug, Deserialize)]
201
- struct StreamChunk {
202
- choices: Vec<StreamChoice>,
203
- }
204
-
205
- #[allow(dead_code)]
206
- #[derive(Debug, Deserialize)]
207
- struct StreamChoice {
208
- delta: StreamDelta,
209
- }
210
-
211
- #[allow(dead_code)]
212
- #[derive(Debug, Deserialize)]
213
- struct StreamDelta {
214
- content: Option<String>,
215
- }
216
-
217
- #[derive(Debug, Deserialize)]
218
- struct OpenAiResponse {
219
- choices: Vec<OpenAiChoice>,
220
- }
221
-
222
- #[derive(Debug, Deserialize)]
223
- struct OpenAiChoice {
224
- message: OpenAiMessage,
225
- finish_reason: Option<String>,
226
- }
227
-
228
- #[derive(Debug, Deserialize)]
229
- struct OpenAiMessage {
230
- content: Option<String>,
231
- tool_calls: Option<Vec<OpenAiToolCall>>,
232
- }
233
-
234
- #[derive(Debug, Deserialize)]
235
- struct OpenAiToolCall {
236
- id: String,
237
- #[serde(rename = "type")]
238
- #[allow(dead_code)]
239
- call_type: String,
240
- function: OpenAiFunction,
241
- }
242
-
243
- #[derive(Debug, Deserialize)]
244
- struct OpenAiFunction {
245
- name: String,
246
- arguments: serde_json::Value,
247
- }