@orchagent/cli 0.2.14 → 0.2.15
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/lib/llm-errors.js +19 -1
- package/dist/lib/llm.js +37 -1
- package/package.json +1 -1
package/dist/lib/llm-errors.js
CHANGED
|
@@ -28,6 +28,12 @@ const FALLBACKS = {
|
|
|
28
28
|
502: 'Gemini is temporarily unavailable.',
|
|
29
29
|
503: 'Gemini is overloaded. Try again later.',
|
|
30
30
|
},
|
|
31
|
+
ollama: {
|
|
32
|
+
401: 'Authentication error (Ollama typically does not require auth)',
|
|
33
|
+
404: 'Model not found. Run: ollama pull <model>',
|
|
34
|
+
500: 'Ollama server error',
|
|
35
|
+
502: 'Cannot connect to Ollama. Is it running?',
|
|
36
|
+
},
|
|
31
37
|
};
|
|
32
38
|
const DEFAULT = 'LLM provider error. Check your API key and try again.';
|
|
33
39
|
function isHtml(text) {
|
|
@@ -67,13 +73,25 @@ function parseGemini(text, status) {
|
|
|
67
73
|
catch { }
|
|
68
74
|
return FALLBACKS.gemini[status] || DEFAULT;
|
|
69
75
|
}
|
|
76
|
+
function parseOllama(text, status) {
|
|
77
|
+
// Ollama uses OpenAI-compatible error format
|
|
78
|
+
try {
|
|
79
|
+
const p = JSON.parse(text);
|
|
80
|
+
const msg = p.error?.message || (typeof p.error === 'string' ? p.error : null);
|
|
81
|
+
if (msg)
|
|
82
|
+
return sanitize(msg);
|
|
83
|
+
}
|
|
84
|
+
catch { }
|
|
85
|
+
return FALLBACKS.ollama[status] || DEFAULT;
|
|
86
|
+
}
|
|
70
87
|
function parseLlmError(provider, text, status) {
|
|
71
88
|
if (isHtml(text)) {
|
|
72
89
|
return new errors_1.CliError(`${provider} error: ${FALLBACKS[provider][status] || DEFAULT}`);
|
|
73
90
|
}
|
|
74
91
|
const msg = provider === 'openai' ? parseOpenAI(text, status)
|
|
75
92
|
: provider === 'anthropic' ? parseAnthropic(text, status)
|
|
76
|
-
:
|
|
93
|
+
: provider === 'ollama' ? parseOllama(text, status)
|
|
94
|
+
: parseGemini(text, status);
|
|
77
95
|
const display = provider.charAt(0).toUpperCase() + provider.slice(1);
|
|
78
96
|
return new errors_1.CliError(`${display} API error: ${msg}`);
|
|
79
97
|
}
|
package/dist/lib/llm.js
CHANGED
|
@@ -53,12 +53,14 @@ exports.PROVIDER_ENV_VARS = {
|
|
|
53
53
|
openai: 'OPENAI_API_KEY',
|
|
54
54
|
anthropic: 'ANTHROPIC_API_KEY',
|
|
55
55
|
gemini: 'GEMINI_API_KEY',
|
|
56
|
+
ollama: 'OLLAMA_HOST',
|
|
56
57
|
};
|
|
57
58
|
// Default models for each provider
|
|
58
59
|
exports.DEFAULT_MODELS = {
|
|
59
60
|
openai: 'gpt-4o',
|
|
60
61
|
anthropic: 'claude-sonnet-4-20250514',
|
|
61
62
|
gemini: 'gemini-1.5-pro',
|
|
63
|
+
ollama: 'llama3.2',
|
|
62
64
|
};
|
|
63
65
|
/**
|
|
64
66
|
* Detect LLM API key from environment variables based on supported providers.
|
|
@@ -162,6 +164,9 @@ async function callLlm(provider, apiKey, model, prompt, outputSchema) {
|
|
|
162
164
|
else if (provider === 'gemini') {
|
|
163
165
|
return callGemini(apiKey, model, prompt, outputSchema);
|
|
164
166
|
}
|
|
167
|
+
else if (provider === 'ollama') {
|
|
168
|
+
return callOllama(apiKey, model, prompt, outputSchema);
|
|
169
|
+
}
|
|
165
170
|
throw new errors_1.CliError(`Unsupported provider: ${provider}`);
|
|
166
171
|
}
|
|
167
172
|
async function callOpenAI(apiKey, model, prompt, outputSchema) {
|
|
@@ -242,11 +247,42 @@ async function callGemini(apiKey, model, prompt, _outputSchema) {
|
|
|
242
247
|
return { result: content };
|
|
243
248
|
}
|
|
244
249
|
}
|
|
250
|
+
async function callOllama(endpoint, model, prompt, _outputSchema) {
|
|
251
|
+
// endpoint is passed via apiKey param for Ollama (it's the OLLAMA_HOST)
|
|
252
|
+
const baseUrl = endpoint || 'http://localhost:11434';
|
|
253
|
+
// Ensure /v1 path
|
|
254
|
+
const normalizedBase = baseUrl.endsWith('/v1') ? baseUrl : `${baseUrl.replace(/\/$/, '')}/v1`;
|
|
255
|
+
const url = `${normalizedBase}/chat/completions`;
|
|
256
|
+
const response = await fetch(url, {
|
|
257
|
+
method: 'POST',
|
|
258
|
+
headers: { 'Content-Type': 'application/json' },
|
|
259
|
+
body: JSON.stringify({
|
|
260
|
+
model,
|
|
261
|
+
messages: [{ role: 'user', content: prompt }],
|
|
262
|
+
options: { num_ctx: 8192 },
|
|
263
|
+
}),
|
|
264
|
+
});
|
|
265
|
+
if (!response.ok) {
|
|
266
|
+
if (response.status === 404) {
|
|
267
|
+
throw new errors_1.CliError(`Model '${model}' not found. Run: ollama pull ${model}`);
|
|
268
|
+
}
|
|
269
|
+
const text = await response.text();
|
|
270
|
+
throw (0, llm_errors_1.parseLlmError)('ollama', text, response.status);
|
|
271
|
+
}
|
|
272
|
+
const data = await response.json();
|
|
273
|
+
const content = data.choices?.[0]?.message?.content || '';
|
|
274
|
+
try {
|
|
275
|
+
return JSON.parse(content);
|
|
276
|
+
}
|
|
277
|
+
catch {
|
|
278
|
+
return { result: content };
|
|
279
|
+
}
|
|
280
|
+
}
|
|
245
281
|
/**
|
|
246
282
|
* Validate a provider string against known providers.
|
|
247
283
|
*/
|
|
248
284
|
function validateProvider(provider) {
|
|
249
|
-
const validProviders = ['openai', 'anthropic', 'gemini'];
|
|
285
|
+
const validProviders = ['openai', 'anthropic', 'gemini', 'ollama'];
|
|
250
286
|
if (!validProviders.includes(provider)) {
|
|
251
287
|
throw new errors_1.CliError(`Invalid provider: ${provider}. Valid: ${validProviders.join(', ')}`);
|
|
252
288
|
}
|