@marizmelo/llm-cli 0.0.5 → 0.0.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +23 -18
- package/bundle/gemini.js +45 -21
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -28,32 +28,24 @@ llm-cli --prompt "What is 2+2?"
|
|
|
28
28
|
- 🎯 Provider-specific model switching
|
|
29
29
|
- 💾 Memory and context management
|
|
30
30
|
- 🔑 Secure API key management with persistent settings
|
|
31
|
-
- 🛠️ Full tool/function calling support for OpenAI and
|
|
31
|
+
- 🛠️ Full tool/function calling support for OpenAI, Anthropic, and Ollama
|
|
32
32
|
|
|
33
|
-
##
|
|
33
|
+
## Provider Setup
|
|
34
34
|
|
|
35
|
-
###
|
|
36
|
-
- **OpenAI & Anthropic Tool Support**: Both providers now have full function/tool calling capabilities, allowing them to use all available CLI tools (file operations, shell commands, web searches, etc.)
|
|
37
|
-
- **Persistent API Key Management**: API keys are now saved to settings and automatically synced to environment variables on startup
|
|
38
|
-
- **Improved Provider Setup**: Simplified setup process with `/provider setup <provider> <api-key>` command
|
|
39
|
-
- **Better Error Handling**: Fixed streaming response issues and improved error messages
|
|
40
|
-
|
|
41
|
-
### Provider Setup
|
|
42
|
-
|
|
43
|
-
#### Quick Setup (New Method)
|
|
35
|
+
### Quick Setup
|
|
44
36
|
```bash
|
|
45
|
-
# Setup providers with persistent API keys
|
|
37
|
+
# Setup cloud providers with persistent API keys
|
|
46
38
|
llm-cli
|
|
47
39
|
/provider setup openai sk-your-api-key
|
|
48
40
|
/provider setup anthropic sk-ant-your-api-key
|
|
49
41
|
/provider switch openai
|
|
50
42
|
```
|
|
51
43
|
|
|
52
|
-
|
|
44
|
+
### Ollama (Local)
|
|
53
45
|
```bash
|
|
54
46
|
# 1. Install Ollama (https://ollama.com)
|
|
55
|
-
# 2. Pull a model
|
|
56
|
-
ollama pull
|
|
47
|
+
# 2. Pull a tool-capable model
|
|
48
|
+
ollama pull qwen2.5:latest
|
|
57
49
|
|
|
58
50
|
# 3. Start llm-cli and switch to Ollama
|
|
59
51
|
llm-cli
|
|
@@ -62,13 +54,26 @@ llm-cli
|
|
|
62
54
|
# 4. (Optional) Switch to a different model
|
|
63
55
|
/provider model <model-name>
|
|
64
56
|
```
|
|
65
|
-
The default model is `llama3.2:latest`. Use `/provider model` to list available models and switch between them.
|
|
66
57
|
|
|
67
|
-
|
|
58
|
+
No `.env` file or environment variables required for Ollama. The default model is `llama3.2:latest`. Use `/provider model` to list available models and switch between them.
|
|
59
|
+
|
|
60
|
+
**Tool-capable Ollama models** (recommended for file ops, shell commands, etc.):
|
|
61
|
+
- `qwen2.5:7b` / `qwen2.5:14b`
|
|
62
|
+
- `llama3.1:8b` / `llama3.1:70b`
|
|
63
|
+
- `mistral:7b`
|
|
64
|
+
|
|
65
|
+
Models without tool support (e.g. `gemma3n`, `deepseek-r1`) will automatically fall back to text-only conversation mode.
|
|
66
|
+
|
|
67
|
+
### Environment Variables (Alternative)
|
|
68
68
|
```bash
|
|
69
|
-
#
|
|
69
|
+
# Cloud providers
|
|
70
70
|
export OPENAI_API_KEY="your-api-key"
|
|
71
71
|
export ANTHROPIC_API_KEY="your-api-key"
|
|
72
|
+
|
|
73
|
+
# Ollama (optional overrides)
|
|
74
|
+
export OLLAMA_MODEL="qwen2.5:latest"
|
|
75
|
+
export OLLAMA_BASE_URL="http://localhost:11434"
|
|
76
|
+
|
|
72
77
|
llm-cli
|
|
73
78
|
```
|
|
74
79
|
|
package/bundle/gemini.js
CHANGED
|
@@ -139658,8 +139658,8 @@ var GIT_COMMIT_INFO, CLI_VERSION;
|
|
|
139658
139658
|
var init_git_commit = __esm({
|
|
139659
139659
|
"packages/core/dist/src/generated/git-commit.js"() {
|
|
139660
139660
|
"use strict";
|
|
139661
|
-
GIT_COMMIT_INFO = "
|
|
139662
|
-
CLI_VERSION = "0.0.
|
|
139661
|
+
GIT_COMMIT_INFO = "03a9161d";
|
|
139662
|
+
CLI_VERSION = "0.0.7";
|
|
139663
139663
|
}
|
|
139664
139664
|
});
|
|
139665
139665
|
|
|
@@ -141926,13 +141926,14 @@ var init_ollama_provider = __esm({
|
|
|
141926
141926
|
name = "ollama";
|
|
141927
141927
|
baseUrl;
|
|
141928
141928
|
model;
|
|
141929
|
+
toolsSupported = true;
|
|
141929
141930
|
constructor(config, gcConfig) {
|
|
141930
141931
|
this.baseUrl = config.baseUrl || "http://localhost:11434";
|
|
141931
141932
|
this.model = config.model;
|
|
141932
141933
|
}
|
|
141933
141934
|
async generateContent(request3, userPromptId) {
|
|
141934
141935
|
const messages = this.convertToOllamaMessages(request3);
|
|
141935
|
-
const tools = this.convertToOllamaTools(request3);
|
|
141936
|
+
const tools = this.toolsSupported ? this.convertToOllamaTools(request3) : [];
|
|
141936
141937
|
const ollamaRequest = {
|
|
141937
141938
|
model: this.model,
|
|
141938
141939
|
messages,
|
|
@@ -141942,13 +141943,15 @@ var init_ollama_provider = __esm({
|
|
|
141942
141943
|
};
|
|
141943
141944
|
const response = await fetch(`${this.baseUrl}/api/chat`, {
|
|
141944
141945
|
method: "POST",
|
|
141945
|
-
headers: {
|
|
141946
|
-
"Content-Type": "application/json"
|
|
141947
|
-
},
|
|
141946
|
+
headers: { "Content-Type": "application/json" },
|
|
141948
141947
|
body: JSON.stringify(ollamaRequest)
|
|
141949
141948
|
});
|
|
141950
141949
|
if (!response.ok) {
|
|
141951
141950
|
const body = await response.text().catch(() => "");
|
|
141951
|
+
if (this.isToolsNotSupportedError(body) && tools.length > 0) {
|
|
141952
|
+
this.toolsSupported = false;
|
|
141953
|
+
return this.generateContent(request3, userPromptId);
|
|
141954
|
+
}
|
|
141952
141955
|
throw new Error(`Ollama API error: ${response.status} ${response.statusText} - ${body}`);
|
|
141953
141956
|
}
|
|
141954
141957
|
const ollamaResponse = await response.json();
|
|
@@ -141956,7 +141959,7 @@ var init_ollama_provider = __esm({
|
|
|
141956
141959
|
}
|
|
141957
141960
|
async generateContentStream(request3, userPromptId) {
|
|
141958
141961
|
const messages = this.convertToOllamaMessages(request3);
|
|
141959
|
-
const tools = this.convertToOllamaTools(request3);
|
|
141962
|
+
const tools = this.toolsSupported ? this.convertToOllamaTools(request3) : [];
|
|
141960
141963
|
const ollamaRequest = {
|
|
141961
141964
|
model: this.model,
|
|
141962
141965
|
messages,
|
|
@@ -141966,13 +141969,15 @@ var init_ollama_provider = __esm({
|
|
|
141966
141969
|
};
|
|
141967
141970
|
const response = await fetch(`${this.baseUrl}/api/chat`, {
|
|
141968
141971
|
method: "POST",
|
|
141969
|
-
headers: {
|
|
141970
|
-
"Content-Type": "application/json"
|
|
141971
|
-
},
|
|
141972
|
+
headers: { "Content-Type": "application/json" },
|
|
141972
141973
|
body: JSON.stringify(ollamaRequest)
|
|
141973
141974
|
});
|
|
141974
141975
|
if (!response.ok) {
|
|
141975
141976
|
const body = await response.text().catch(() => "");
|
|
141977
|
+
if (this.isToolsNotSupportedError(body) && tools.length > 0) {
|
|
141978
|
+
this.toolsSupported = false;
|
|
141979
|
+
return this.generateContentStream(request3, userPromptId);
|
|
141980
|
+
}
|
|
141976
141981
|
throw new Error(`Ollama API error: ${response.status} ${response.statusText} - ${body}`);
|
|
141977
141982
|
}
|
|
141978
141983
|
const reader = response.body?.getReader();
|
|
@@ -142021,6 +142026,24 @@ var init_ollama_provider = __esm({
|
|
|
142021
142026
|
validateConfig(config) {
|
|
142022
142027
|
return config.provider === "ollama" && !!config.model;
|
|
142023
142028
|
}
|
|
142029
|
+
isToolsNotSupportedError(body) {
|
|
142030
|
+
const lower = body.toLowerCase();
|
|
142031
|
+
return lower.includes("does not support tools") || lower.includes("tool use is not supported");
|
|
142032
|
+
}
|
|
142033
|
+
parseToolArgs(args) {
|
|
142034
|
+
if (args === null || args === void 0)
|
|
142035
|
+
return {};
|
|
142036
|
+
if (typeof args === "object" && !Array.isArray(args))
|
|
142037
|
+
return args;
|
|
142038
|
+
if (typeof args === "string") {
|
|
142039
|
+
try {
|
|
142040
|
+
return JSON.parse(args);
|
|
142041
|
+
} catch {
|
|
142042
|
+
return {};
|
|
142043
|
+
}
|
|
142044
|
+
}
|
|
142045
|
+
return {};
|
|
142046
|
+
}
|
|
142024
142047
|
buildOptions(request3) {
|
|
142025
142048
|
const options2 = {};
|
|
142026
142049
|
const temp = request3.generationConfig?.temperature ?? request3.config?.temperature;
|
|
@@ -142114,12 +142137,11 @@ var init_ollama_provider = __esm({
|
|
|
142114
142137
|
}
|
|
142115
142138
|
if (message?.tool_calls) {
|
|
142116
142139
|
for (const toolCall of message.tool_calls) {
|
|
142117
|
-
if (toolCall.function) {
|
|
142118
|
-
const args = typeof toolCall.function.arguments === "string" ? JSON.parse(toolCall.function.arguments) : toolCall.function.arguments || {};
|
|
142140
|
+
if (toolCall.function?.name) {
|
|
142119
142141
|
parts.push({
|
|
142120
142142
|
functionCall: {
|
|
142121
142143
|
name: toolCall.function.name,
|
|
142122
|
-
args
|
|
142144
|
+
args: this.parseToolArgs(toolCall.function.arguments)
|
|
142123
142145
|
}
|
|
142124
142146
|
});
|
|
142125
142147
|
}
|
|
@@ -142173,10 +142195,12 @@ var init_ollama_provider = __esm({
|
|
|
142173
142195
|
}
|
|
142174
142196
|
if (chunk.message?.tool_calls) {
|
|
142175
142197
|
for (const toolCall of chunk.message.tool_calls) {
|
|
142176
|
-
if (toolCall.function) {
|
|
142177
|
-
const args = typeof toolCall.function.arguments === "string" ? JSON.parse(toolCall.function.arguments) : toolCall.function.arguments || {};
|
|
142198
|
+
if (toolCall.function?.name) {
|
|
142178
142199
|
accumulatedToolCalls.push({
|
|
142179
|
-
function: {
|
|
142200
|
+
function: {
|
|
142201
|
+
name: toolCall.function.name,
|
|
142202
|
+
arguments: this.parseToolArgs(toolCall.function.arguments)
|
|
142203
|
+
}
|
|
142180
142204
|
});
|
|
142181
142205
|
}
|
|
142182
142206
|
}
|
|
@@ -142372,7 +142396,7 @@ function createContentGeneratorConfig(config, authType) {
|
|
|
142372
142396
|
return contentGeneratorConfig;
|
|
142373
142397
|
}
|
|
142374
142398
|
async function createContentGenerator(config, gcConfig, sessionId2) {
|
|
142375
|
-
const version2 = "0.0.
|
|
142399
|
+
const version2 = "0.0.7";
|
|
142376
142400
|
const userAgent2 = `GeminiCLI/${version2} (${process.platform}; ${process.arch})`;
|
|
142377
142401
|
const baseHeaders = {
|
|
142378
142402
|
"User-Agent": userAgent2
|
|
@@ -243939,8 +243963,8 @@ var init_providerManager = __esm({
|
|
|
243939
243963
|
name: "Ollama (Local)",
|
|
243940
243964
|
description: "Local models via Ollama",
|
|
243941
243965
|
authType: "ollama-local",
|
|
243942
|
-
requiredEnvVars: [
|
|
243943
|
-
optionalEnvVars: ["OLLAMA_BASE_URL"],
|
|
243966
|
+
requiredEnvVars: [],
|
|
243967
|
+
optionalEnvVars: ["OLLAMA_MODEL", "OLLAMA_BASE_URL"],
|
|
243944
243968
|
defaultModel: "llama3.2:latest",
|
|
243945
243969
|
apiKeyFormat: "none",
|
|
243946
243970
|
apiKeyUrl: "https://ollama.ai/",
|
|
@@ -274734,7 +274758,7 @@ async function getPackageJson() {
|
|
|
274734
274758
|
// packages/cli/src/utils/version.ts
|
|
274735
274759
|
async function getCliVersion() {
|
|
274736
274760
|
const pkgJson = await getPackageJson();
|
|
274737
|
-
return "0.0.
|
|
274761
|
+
return "0.0.7";
|
|
274738
274762
|
}
|
|
274739
274763
|
|
|
274740
274764
|
// packages/cli/src/ui/commands/aboutCommand.ts
|
|
@@ -274786,7 +274810,7 @@ init_open();
|
|
|
274786
274810
|
import process30 from "node:process";
|
|
274787
274811
|
|
|
274788
274812
|
// packages/cli/src/generated/git-commit.ts
|
|
274789
|
-
var GIT_COMMIT_INFO2 = "
|
|
274813
|
+
var GIT_COMMIT_INFO2 = "03a9161d";
|
|
274790
274814
|
|
|
274791
274815
|
// packages/cli/src/ui/commands/bugCommand.ts
|
|
274792
274816
|
init_dist3();
|