@heysalad/cheri-cli 0.10.0 → 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,258 @@
1
+ // MCP (Model Context Protocol) client
2
+ // Connects to MCP servers via stdio transport, discovers tools, and calls them
3
+ import { spawn } from "child_process";
4
+ import { log } from "../logger.js";
5
+
6
+ const JSONRPC_VERSION = "2.0";
7
+ let requestId = 0;
8
+
9
+ /**
10
+ * MCP Server connection
11
+ */
12
+ export class McpServer {
13
+ constructor(name, command, args = [], env = {}) {
14
+ this.name = name;
15
+ this.command = command;
16
+ this.args = args;
17
+ this.env = { ...process.env, ...env };
18
+ this.process = null;
19
+ this.tools = [];
20
+ this.resources = [];
21
+ this.pendingRequests = new Map();
22
+ this._buffer = "";
23
+ }
24
+
25
+ async connect() {
26
+ return new Promise((resolve, reject) => {
27
+ this.process = spawn(this.command, this.args, {
28
+ env: this.env,
29
+ stdio: ["pipe", "pipe", "pipe"],
30
+ });
31
+
32
+ this.process.stdout.on("data", (data) => {
33
+ this._buffer += data.toString();
34
+ this._processBuffer();
35
+ });
36
+
37
+ this.process.stderr.on("data", (data) => {
38
+ log.dim(`[mcp:${this.name}] ${data.toString().trim()}`);
39
+ });
40
+
41
+ this.process.on("error", (err) => {
42
+ reject(new Error(`MCP server ${this.name} failed to start: ${err.message}`));
43
+ });
44
+
45
+ this.process.on("close", (code) => {
46
+ for (const [, { reject: r }] of this.pendingRequests) {
47
+ r(new Error(`MCP server ${this.name} exited with code ${code}`));
48
+ }
49
+ this.pendingRequests.clear();
50
+ });
51
+
52
+ // Initialize the connection
53
+ setTimeout(async () => {
54
+ try {
55
+ await this._initialize();
56
+ resolve();
57
+ } catch (err) {
58
+ reject(err);
59
+ }
60
+ }, 500);
61
+ });
62
+ }
63
+
64
+ async _initialize() {
65
+ const result = await this._request("initialize", {
66
+ protocolVersion: "2024-11-05",
67
+ capabilities: {},
68
+ clientInfo: { name: "cheri", version: "0.10.0" },
69
+ });
70
+
71
+ // Send initialized notification
72
+ this._notify("notifications/initialized", {});
73
+
74
+ // Discover tools
75
+ try {
76
+ const toolsResult = await this._request("tools/list", {});
77
+ this.tools = toolsResult.tools || [];
78
+ } catch {
79
+ this.tools = [];
80
+ }
81
+
82
+ // Discover resources
83
+ try {
84
+ const resourcesResult = await this._request("resources/list", {});
85
+ this.resources = resourcesResult.resources || [];
86
+ } catch {
87
+ this.resources = [];
88
+ }
89
+
90
+ return result;
91
+ }
92
+
93
+ async callTool(name, args = {}) {
94
+ return this._request("tools/call", { name, arguments: args });
95
+ }
96
+
97
+ async readResource(uri) {
98
+ return this._request("resources/read", { uri });
99
+ }
100
+
101
+ _request(method, params) {
102
+ return new Promise((resolve, reject) => {
103
+ const id = ++requestId;
104
+ const timeout = setTimeout(() => {
105
+ this.pendingRequests.delete(id);
106
+ reject(new Error(`MCP request ${method} timed out`));
107
+ }, 30000);
108
+
109
+ this.pendingRequests.set(id, {
110
+ resolve: (result) => { clearTimeout(timeout); resolve(result); },
111
+ reject: (err) => { clearTimeout(timeout); reject(err); },
112
+ });
113
+
114
+ const message = JSON.stringify({
115
+ jsonrpc: JSONRPC_VERSION,
116
+ id,
117
+ method,
118
+ params,
119
+ });
120
+
121
+ try {
122
+ this.process.stdin.write(message + "\n");
123
+ } catch (err) {
124
+ this.pendingRequests.delete(id);
125
+ clearTimeout(timeout);
126
+ reject(new Error(`Failed to write to MCP server '${this.name}': ${err.message}`));
127
+ }
128
+ });
129
+ }
130
+
131
+ _notify(method, params) {
132
+ const message = JSON.stringify({
133
+ jsonrpc: JSONRPC_VERSION,
134
+ method,
135
+ params,
136
+ });
137
+ try {
138
+ this.process.stdin.write(message + "\n");
139
+ } catch {}
140
+ }
141
+
142
+ _processBuffer() {
143
+ const lines = this._buffer.split("\n");
144
+ this._buffer = lines.pop() || "";
145
+
146
+ for (const line of lines) {
147
+ if (!line.trim()) continue;
148
+ try {
149
+ const msg = JSON.parse(line);
150
+ if (msg.id && this.pendingRequests.has(msg.id)) {
151
+ const { resolve, reject } = this.pendingRequests.get(msg.id);
152
+ this.pendingRequests.delete(msg.id);
153
+ if (msg.error) {
154
+ reject(new Error(msg.error.message || "MCP error"));
155
+ } else {
156
+ resolve(msg.result);
157
+ }
158
+ }
159
+ } catch (err) {
160
+ log.dim(`[mcp:${this.name}] Failed to parse response: ${line.slice(0, 100)}`);
161
+ }
162
+ }
163
+ }
164
+
165
+ getToolDefinitions() {
166
+ return this.tools.map(t => ({
167
+ name: `mcp_${this.name}_${t.name}`,
168
+ description: `[MCP:${this.name}] ${t.description || t.name}`,
169
+ parameters: t.inputSchema || { type: "object", properties: {} },
170
+ mcpServer: this.name,
171
+ mcpToolName: t.name,
172
+ }));
173
+ }
174
+
175
+ disconnect() {
176
+ if (this.process) {
177
+ try { this.process.kill(); } catch {}
178
+ this.process = null;
179
+ }
180
+ }
181
+ }
182
+
183
+ /**
184
+ * MCP Manager — manages multiple MCP server connections
185
+ */
186
+ export class McpManager {
187
+ constructor() {
188
+ this.servers = new Map();
189
+ }
190
+
191
+ async addServer(name, config) {
192
+ const server = new McpServer(
193
+ name,
194
+ config.command,
195
+ config.args || [],
196
+ config.env || {}
197
+ );
198
+
199
+ try {
200
+ await server.connect();
201
+ this.servers.set(name, server);
202
+ log.success(`MCP server '${name}' connected (${server.tools.length} tools)`);
203
+ return server;
204
+ } catch (err) {
205
+ log.warn(`MCP server '${name}' failed: ${err.message}`);
206
+ return null;
207
+ }
208
+ }
209
+
210
+ getAllToolDefinitions() {
211
+ const tools = [];
212
+ for (const server of this.servers.values()) {
213
+ tools.push(...server.getToolDefinitions());
214
+ }
215
+ return tools;
216
+ }
217
+
218
+ async callTool(fullName, args) {
219
+ // Find the server by checking registered tool names (avoids underscore ambiguity)
220
+ let server = null;
221
+ let toolName = null;
222
+ for (const s of this.servers.values()) {
223
+ const def = s.getToolDefinitions().find(t => t.name === fullName);
224
+ if (def) { server = s; toolName = def.mcpToolName; break; }
225
+ }
226
+ if (!server || !toolName) {
227
+ // Fallback: try regex parse
228
+ const match = fullName.match(/^mcp_([^_]+)_(.+)$/);
229
+ if (!match) return { error: `Invalid MCP tool name: ${fullName}` };
230
+ const [, serverName, tn] = match;
231
+ server = this.servers.get(serverName);
232
+ toolName = tn;
233
+ if (!server) return { error: `MCP server '${serverName}' not connected` };
234
+ }
235
+
236
+ try {
237
+ const result = await server.callTool(toolName, args);
238
+ // Extract text content from MCP response
239
+ if (result.content) {
240
+ return { result: result.content.map(c => c.text || JSON.stringify(c)).join("\n") };
241
+ }
242
+ return result;
243
+ } catch (err) {
244
+ return { error: err.message };
245
+ }
246
+ }
247
+
248
+ isMcpTool(name) {
249
+ return name.startsWith("mcp_");
250
+ }
251
+
252
+ disconnectAll() {
253
+ for (const server of this.servers.values()) {
254
+ server.disconnect();
255
+ }
256
+ this.servers.clear();
257
+ }
258
+ }
@@ -0,0 +1,153 @@
1
+ // Multi-agent orchestration — spawn child agents for subtasks
2
+ import { log } from "./logger.js";
3
+
4
+ /**
5
+ * SubAgent — a lightweight agent that runs a focused subtask
6
+ * with its own message history but shares the parent's tools and API
7
+ */
8
+ export class SubAgent {
9
+ constructor(name, systemPrompt, options = {}) {
10
+ this.name = name;
11
+ this.systemPrompt = systemPrompt;
12
+ this.maxIterations = options.maxIterations || 10;
13
+ this.tools = options.tools || [];
14
+ this.messages = [{ role: "system", content: systemPrompt }];
15
+ this.status = "idle"; // idle, running, done, error
16
+ this.result = null;
17
+ }
18
+ }
19
+
20
+ /**
21
+ * Agent orchestrator — manages parent + child agents
22
+ */
23
+ export class AgentOrchestrator {
24
+ constructor(chatFn, executeTool) {
25
+ this.chatFn = chatFn; // async function(messages, tools) => response
26
+ this.executeTool = executeTool; // async function(name, args) => result
27
+ this.agents = new Map();
28
+ this.parentId = "main";
29
+ }
30
+
31
+ /**
32
+ * Spawn a child agent for a subtask
33
+ */
34
+ createAgent(name, task, options = {}) {
35
+ const systemPrompt = `You are a sub-agent named "${name}" focused on a specific task.
36
+ Your task: ${task}
37
+
38
+ Complete this task using the available tools, then provide a concise summary of what you did and the results.
39
+ Be focused and efficient. Do not ask questions — make reasonable decisions.
40
+ Current working directory: ${process.cwd()}`;
41
+
42
+ const agent = new SubAgent(name, systemPrompt, {
43
+ maxIterations: options.maxIterations || 8,
44
+ tools: options.tools,
45
+ });
46
+
47
+ this.agents.set(name, agent);
48
+ return agent;
49
+ }
50
+
51
+ /**
52
+ * Run a child agent to completion
53
+ */
54
+ async runAgent(name, userMessage, allTools, parseSSEStream) {
55
+ const agent = this.agents.get(name);
56
+ if (!agent) throw new Error(`Agent '${name}' not found`);
57
+
58
+ agent.status = "running";
59
+ agent.messages.push({ role: "user", content: userMessage });
60
+
61
+ log.dim(` [${name}] Starting subtask...`);
62
+
63
+ try {
64
+ for (let i = 0; i < agent.maxIterations; i++) {
65
+ const response = await this.chatFn(agent.messages, allTools);
66
+
67
+ let fullText = "";
68
+ const toolCalls = {};
69
+
70
+ for await (const chunk of parseSSEStream(response)) {
71
+ const delta = chunk.choices?.[0]?.delta;
72
+ const finishReason = chunk.choices?.[0]?.finish_reason;
73
+
74
+ if (delta?.content) fullText += delta.content;
75
+
76
+ if (delta?.tool_calls) {
77
+ for (const tc of delta.tool_calls) {
78
+ const idx = tc.index;
79
+ if (!toolCalls[idx]) toolCalls[idx] = { id: tc.id || "", name: "", arguments: "" };
80
+ if (tc.id) toolCalls[idx].id = tc.id;
81
+ if (tc.function?.name) toolCalls[idx].name = tc.function.name;
82
+ if (tc.function?.arguments) toolCalls[idx].arguments += tc.function.arguments;
83
+ }
84
+ }
85
+
86
+ if (finishReason) break;
87
+ }
88
+
89
+ const toolCallList = Object.values(toolCalls);
90
+
91
+ if (toolCallList.length === 0) {
92
+ agent.messages.push({ role: "assistant", content: fullText });
93
+ agent.status = "done";
94
+ agent.result = fullText;
95
+ log.dim(` [${name}] Completed.`);
96
+ return fullText;
97
+ }
98
+
99
+ const assistantMsg = { role: "assistant", content: fullText || null };
100
+ assistantMsg.tool_calls = toolCallList.map(tc => ({
101
+ id: tc.id, type: "function", function: { name: tc.name, arguments: tc.arguments },
102
+ }));
103
+ agent.messages.push(assistantMsg);
104
+
105
+ // Execute tools (in parallel)
106
+ const toolPromises = toolCallList.map(async (tc) => {
107
+ let input = {};
108
+ try { input = JSON.parse(tc.arguments); } catch {}
109
+ log.dim(` [${name}] → ${tc.name}`);
110
+ const result = await this.executeTool(tc.name, input);
111
+ return { id: tc.id, result };
112
+ });
113
+
114
+ const toolResults = await Promise.all(toolPromises);
115
+
116
+ for (const { id, result } of toolResults) {
117
+ const resultStr = JSON.stringify(result);
118
+ const truncated = resultStr.length > 6000 ? resultStr.slice(0, 6000) + "...(truncated)" : resultStr;
119
+ agent.messages.push({ role: "tool", tool_call_id: id, content: truncated });
120
+ }
121
+ }
122
+
123
+ agent.status = "done";
124
+ agent.result = "Reached maximum iterations";
125
+ return agent.result;
126
+ } catch (err) {
127
+ agent.status = "error";
128
+ agent.result = err.message;
129
+ log.warn(` [${name}] Error: ${err.message}`);
130
+ return `Error: ${err.message}`;
131
+ }
132
+ }
133
+
134
+ /**
135
+ * Run multiple agents in parallel
136
+ */
137
+ async runParallel(tasks, allTools, parseSSEStream) {
138
+ const promises = tasks.map(({ name, task }) => {
139
+ this.createAgent(name, task);
140
+ return this.runAgent(name, task, allTools, parseSSEStream);
141
+ });
142
+
143
+ return Promise.all(promises);
144
+ }
145
+
146
+ getAgentStatus() {
147
+ const statuses = {};
148
+ for (const [name, agent] of this.agents) {
149
+ statuses[name] = { status: agent.status, result: agent.result?.slice(0, 200) };
150
+ }
151
+ return statuses;
152
+ }
153
+ }
@@ -0,0 +1,290 @@
1
+ // Multi-provider abstraction
2
+ // Supports: Cheri Cloud (default), OpenAI, Anthropic, Ollama, custom OpenAI-compatible
3
+ import { getConfigValue } from "../config-store.js";
4
+
5
+ /**
6
+ * Provider interface:
7
+ * - chatStream(messages, tools, options) => Response (SSE stream)
8
+ * - chatSync(messages, tools, options) => parsed JSON response
9
+ */
10
+
11
+ // ── Cheri Cloud Provider (default) ──────────────────────────────────────────
12
+ class CheriCloudProvider {
13
+ constructor(token, baseUrl) {
14
+ this.token = token;
15
+ this.baseUrl = baseUrl || "https://cheri.heysalad.app";
16
+ }
17
+
18
+ async chatStream(messages, tools = [], options = {}) {
19
+ const body = { messages, stream: true };
20
+ if (tools.length > 0) body.tools = tools;
21
+ if (options.model) body.model = options.model;
22
+
23
+ const res = await fetch(`${this.baseUrl}/api/chat/completions`, {
24
+ method: "POST",
25
+ headers: {
26
+ Authorization: `Bearer ${this.token}`,
27
+ "Content-Type": "application/json",
28
+ },
29
+ body: JSON.stringify(body),
30
+ });
31
+
32
+ if (!res.ok) {
33
+ const text = await res.text();
34
+ let msg;
35
+ try { msg = JSON.parse(text).error || text; } catch { msg = text; }
36
+ throw new Error(`AI error (${res.status}): ${msg}`);
37
+ }
38
+ return res;
39
+ }
40
+ }
41
+
42
+ // ── OpenAI-Compatible Provider ──────────────────────────────────────────────
43
+ class OpenAIProvider {
44
+ constructor(apiKey, baseUrl, defaultModel) {
45
+ this.apiKey = apiKey;
46
+ this.baseUrl = baseUrl;
47
+ this.defaultModel = defaultModel;
48
+ }
49
+
50
+ async chatStream(messages, tools = [], options = {}) {
51
+ const body = {
52
+ model: options.model || this.defaultModel,
53
+ messages,
54
+ stream: true,
55
+ };
56
+ if (tools.length > 0) body.tools = tools;
57
+ if (options.temperature !== undefined) body.temperature = options.temperature;
58
+ if (options.max_tokens) body.max_tokens = options.max_tokens;
59
+
60
+ const res = await fetch(`${this.baseUrl}/chat/completions`, {
61
+ method: "POST",
62
+ headers: {
63
+ Authorization: `Bearer ${this.apiKey}`,
64
+ "Content-Type": "application/json",
65
+ },
66
+ body: JSON.stringify(body),
67
+ });
68
+
69
+ if (!res.ok) {
70
+ const text = await res.text();
71
+ throw new Error(`Provider error (${res.status}): ${text.slice(0, 200)}`);
72
+ }
73
+ return res;
74
+ }
75
+ }
76
+
77
+ // ── Anthropic Provider ──────────────────────────────────────────────────────
78
+ class AnthropicProvider {
79
+ constructor(apiKey, defaultModel) {
80
+ this.apiKey = apiKey;
81
+ this.defaultModel = defaultModel || "claude-sonnet-4-20250514";
82
+ }
83
+
84
+ async chatStream(messages, tools = [], options = {}) {
85
+ // Convert OpenAI format to Anthropic format
86
+ const system = messages.filter(m => m.role === "system").map(m => m.content).join("\n");
87
+ const anthropicMessages = [];
88
+
89
+ for (const msg of messages) {
90
+ if (msg.role === "system") continue;
91
+
92
+ if (msg.role === "user") {
93
+ anthropicMessages.push({ role: "user", content: msg.content });
94
+ } else if (msg.role === "assistant") {
95
+ const content = [];
96
+ if (msg.content) content.push({ type: "text", text: msg.content });
97
+ if (msg.tool_calls) {
98
+ for (const tc of msg.tool_calls) {
99
+ let input = {};
100
+ try { input = JSON.parse(tc.function.arguments); } catch {}
101
+ content.push({ type: "tool_use", id: tc.id, name: tc.function.name, input });
102
+ }
103
+ }
104
+ anthropicMessages.push({ role: "assistant", content });
105
+ } else if (msg.role === "tool") {
106
+ // Merge consecutive tool results into user message
107
+ const last = anthropicMessages[anthropicMessages.length - 1];
108
+ const block = { type: "tool_result", tool_use_id: msg.tool_call_id, content: msg.content };
109
+ if (last?.role === "user" && Array.isArray(last.content)) {
110
+ last.content.push(block);
111
+ } else {
112
+ anthropicMessages.push({ role: "user", content: [block] });
113
+ }
114
+ }
115
+ }
116
+
117
+ const anthropicTools = tools.map(t => ({
118
+ name: t.function.name,
119
+ description: t.function.description,
120
+ input_schema: t.function.parameters,
121
+ }));
122
+
123
+ const body = {
124
+ model: options.model || this.defaultModel,
125
+ max_tokens: options.max_tokens || 8192,
126
+ system,
127
+ messages: anthropicMessages,
128
+ stream: true,
129
+ };
130
+ if (anthropicTools.length > 0) body.tools = anthropicTools;
131
+
132
+ const res = await fetch("https://api.anthropic.com/v1/messages", {
133
+ method: "POST",
134
+ headers: {
135
+ "x-api-key": this.apiKey,
136
+ "anthropic-version": "2023-06-01",
137
+ "Content-Type": "application/json",
138
+ },
139
+ body: JSON.stringify(body),
140
+ });
141
+
142
+ if (!res.ok) {
143
+ const text = await res.text();
144
+ throw new Error(`Anthropic error (${res.status}): ${text.slice(0, 200)}`);
145
+ }
146
+
147
+ // Anthropic SSE has different format — we need to convert to OpenAI format
148
+ // Return raw response; the agent will need to handle both formats
149
+ // For now, wrap in a converter
150
+ return this._convertStream(res, options.model || this.defaultModel);
151
+ }
152
+
153
+ async _convertStream(res, model) {
154
+ const reader = res.body.getReader();
155
+ const { readable, writable } = new TransformStream();
156
+ const writer = writable.getWriter();
157
+ const encoder = new TextEncoder();
158
+
159
+ (async () => {
160
+ const decoder = new TextDecoder();
161
+ let buffer = "";
162
+ let toolUseId = "";
163
+ let toolName = "";
164
+
165
+ try {
166
+ while (true) {
167
+ const { done, value } = await reader.read();
168
+ if (done) break;
169
+ buffer += decoder.decode(value, { stream: true });
170
+ const lines = buffer.split("\n");
171
+ buffer = lines.pop() || "";
172
+
173
+ for (const line of lines) {
174
+ if (!line.startsWith("data: ")) continue;
175
+ const data = line.slice(6).trim();
176
+ if (!data || data === "[DONE]") continue;
177
+
178
+ try {
179
+ const event = JSON.parse(data);
180
+ let chunk = null;
181
+
182
+ switch (event.type) {
183
+ case "content_block_start":
184
+ if (event.content_block?.type === "tool_use") {
185
+ toolUseId = event.content_block.id;
186
+ toolName = event.content_block.name;
187
+ chunk = { choices: [{ index: 0, delta: { tool_calls: [{ index: event.index, id: toolUseId, type: "function", function: { name: toolName, arguments: "" } }] }, finish_reason: null }] };
188
+ }
189
+ break;
190
+ case "content_block_delta":
191
+ if (event.delta?.type === "text_delta") {
192
+ chunk = { choices: [{ index: 0, delta: { content: event.delta.text }, finish_reason: null }] };
193
+ } else if (event.delta?.type === "input_json_delta") {
194
+ chunk = { choices: [{ index: 0, delta: { tool_calls: [{ index: 0, function: { arguments: event.delta.partial_json } }] }, finish_reason: null }] };
195
+ }
196
+ break;
197
+ case "message_delta":
198
+ if (event.delta?.stop_reason) {
199
+ const fr = event.delta.stop_reason === "tool_use" ? "tool_calls" : "stop";
200
+ chunk = { choices: [{ index: 0, delta: {}, finish_reason: fr }] };
201
+ }
202
+ break;
203
+ }
204
+
205
+ if (chunk) {
206
+ chunk.model = model;
207
+ await writer.write(encoder.encode(`data: ${JSON.stringify(chunk)}\n\n`));
208
+ }
209
+ } catch {}
210
+ }
211
+ }
212
+ await writer.write(encoder.encode("data: [DONE]\n\n"));
213
+ } catch (err) {
214
+ try {
215
+ const errChunk = { choices: [{ index: 0, delta: { content: `\n[Stream error: ${err.message || "unknown"}]` }, finish_reason: "stop" }] };
216
+ await writer.write(encoder.encode(`data: ${JSON.stringify(errChunk)}\n\n`));
217
+ } catch {}
218
+ } finally {
219
+ try { await writer.close(); } catch {}
220
+ }
221
+ })();
222
+
223
+ return new Response(readable, {
224
+ headers: { "Content-Type": "text/event-stream" },
225
+ });
226
+ }
227
+ }
228
+
229
+ // ── Provider Factory ────────────────────────────────────────────────────────
230
+
231
+ const PROVIDER_CONFIGS = {
232
+ cheri: { envKey: null },
233
+ openai: { envKey: "OPENAI_API_KEY", baseUrl: "https://api.openai.com/v1", defaultModel: "gpt-4o" },
234
+ anthropic: { envKey: "ANTHROPIC_API_KEY", defaultModel: "claude-sonnet-4-20250514" },
235
+ deepseek: { envKey: "DEEPSEEK_API_KEY", baseUrl: "https://api.deepseek.com/v1", defaultModel: "deepseek-chat" },
236
+ groq: { envKey: "GROQ_API_KEY", baseUrl: "https://api.groq.com/openai/v1", defaultModel: "llama-3.3-70b-versatile" },
237
+ together: { envKey: "TOGETHER_API_KEY", baseUrl: "https://api.together.xyz/v1", defaultModel: "meta-llama/Llama-3.3-70B-Instruct-Turbo" },
238
+ ollama: { envKey: null, baseUrl: "http://localhost:11434/v1", defaultModel: "llama3.2" },
239
+ openrouter:{ envKey: "OPENROUTER_API_KEY", baseUrl: "https://openrouter.ai/api/v1", defaultModel: "anthropic/claude-sonnet-4" },
240
+ xai: { envKey: "XAI_API_KEY", baseUrl: "https://api.x.ai/v1", defaultModel: "grok-3" },
241
+ mistral: { envKey: "MISTRAL_API_KEY", baseUrl: "https://api.mistral.ai/v1", defaultModel: "mistral-large-latest" },
242
+ };
243
+
244
+ /**
245
+ * Create a provider based on config
246
+ */
247
+ export function createProvider(providerName) {
248
+ providerName = providerName || getConfigValue("ai.provider") || "cheri";
249
+
250
+ if (providerName === "cheri") {
251
+ const token = getConfigValue("token");
252
+ const baseUrl = getConfigValue("apiUrl") || "https://cheri.heysalad.app";
253
+ if (!token) throw new Error("Not logged in. Run 'cheri login' first.");
254
+ return new CheriCloudProvider(token, baseUrl);
255
+ }
256
+
257
+ if (providerName === "anthropic") {
258
+ const apiKey = getConfigValue(`ai.keys.${providerName}`) || process.env.ANTHROPIC_API_KEY;
259
+ if (!apiKey) throw new Error(`No API key for ${providerName}. Set via: cheri config set ai.keys.anthropic <key>`);
260
+ const model = getConfigValue("ai.model") || PROVIDER_CONFIGS.anthropic.defaultModel;
261
+ return new AnthropicProvider(apiKey, model);
262
+ }
263
+
264
+ const config = PROVIDER_CONFIGS[providerName];
265
+ if (!config) {
266
+ // Treat as custom OpenAI-compatible endpoint
267
+ const baseUrl = getConfigValue(`ai.providers.${providerName}.baseUrl`);
268
+ const apiKey = getConfigValue(`ai.keys.${providerName}`) || "none";
269
+ const model = getConfigValue(`ai.providers.${providerName}.model`) || "default";
270
+ if (!baseUrl) throw new Error(`Unknown provider '${providerName}'. Set baseUrl via: cheri config set ai.providers.${providerName}.baseUrl <url>`);
271
+ return new OpenAIProvider(apiKey, baseUrl, model);
272
+ }
273
+
274
+ const apiKey = getConfigValue(`ai.keys.${providerName}`) || (config.envKey ? process.env[config.envKey] : "none");
275
+ if (!apiKey || apiKey === "none") {
276
+ if (providerName !== "ollama") {
277
+ throw new Error(`No API key for ${providerName}. Set via: cheri config set ai.keys.${providerName} <key>`);
278
+ }
279
+ }
280
+
281
+ const model = getConfigValue("ai.model") || config.defaultModel;
282
+ return new OpenAIProvider(apiKey || "ollama", config.baseUrl, model);
283
+ }
284
+
285
+ /**
286
+ * List available providers
287
+ */
288
+ export function listProviders() {
289
+ return Object.keys(PROVIDER_CONFIGS);
290
+ }