@marizmelo/llm-cli 0.0.6 → 0.0.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +23 -18
- package/bundle/gemini.js +11 -7
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -28,32 +28,24 @@ llm-cli --prompt "What is 2+2?"
|
|
|
28
28
|
- 🎯 Provider-specific model switching
|
|
29
29
|
- 💾 Memory and context management
|
|
30
30
|
- 🔑 Secure API key management with persistent settings
|
|
31
|
-
- 🛠️ Full tool/function calling support for OpenAI and
|
|
31
|
+
- 🛠️ Full tool/function calling support for OpenAI, Anthropic, and Ollama
|
|
32
32
|
|
|
33
|
-
##
|
|
33
|
+
## Provider Setup
|
|
34
34
|
|
|
35
|
-
###
|
|
36
|
-
- **OpenAI & Anthropic Tool Support**: Both providers now have full function/tool calling capabilities, allowing them to use all available CLI tools (file operations, shell commands, web searches, etc.)
|
|
37
|
-
- **Persistent API Key Management**: API keys are now saved to settings and automatically synced to environment variables on startup
|
|
38
|
-
- **Improved Provider Setup**: Simplified setup process with `/provider setup <provider> <api-key>` command
|
|
39
|
-
- **Better Error Handling**: Fixed streaming response issues and improved error messages
|
|
40
|
-
|
|
41
|
-
### Provider Setup
|
|
42
|
-
|
|
43
|
-
#### Quick Setup (New Method)
|
|
35
|
+
### Quick Setup
|
|
44
36
|
```bash
|
|
45
|
-
# Setup providers with persistent API keys
|
|
37
|
+
# Setup cloud providers with persistent API keys
|
|
46
38
|
llm-cli
|
|
47
39
|
/provider setup openai sk-your-api-key
|
|
48
40
|
/provider setup anthropic sk-ant-your-api-key
|
|
49
41
|
/provider switch openai
|
|
50
42
|
```
|
|
51
43
|
|
|
52
|
-
|
|
44
|
+
### Ollama (Local)
|
|
53
45
|
```bash
|
|
54
46
|
# 1. Install Ollama (https://ollama.com)
|
|
55
|
-
# 2. Pull a model
|
|
56
|
-
ollama pull
|
|
47
|
+
# 2. Pull a tool-capable model
|
|
48
|
+
ollama pull qwen2.5:latest
|
|
57
49
|
|
|
58
50
|
# 3. Start llm-cli and switch to Ollama
|
|
59
51
|
llm-cli
|
|
@@ -62,13 +54,26 @@ llm-cli
|
|
|
62
54
|
# 4. (Optional) Switch to a different model
|
|
63
55
|
/provider model <model-name>
|
|
64
56
|
```
|
|
65
|
-
The default model is `llama3.2:latest`. Use `/provider model` to list available models and switch between them.
|
|
66
57
|
|
|
67
|
-
|
|
58
|
+
No `.env` file or environment variables required for Ollama. The default model is `llama3.2:latest`. Use `/provider model` to list available models and switch between them.
|
|
59
|
+
|
|
60
|
+
**Tool-capable Ollama models** (recommended for file ops, shell commands, etc.):
|
|
61
|
+
- `qwen2.5:7b` / `qwen2.5:14b`
|
|
62
|
+
- `llama3.1:8b` / `llama3.1:70b`
|
|
63
|
+
- `mistral:7b`
|
|
64
|
+
|
|
65
|
+
Models without tool support (e.g. `gemma3n`, `deepseek-r1`) will automatically fall back to text-only conversation mode.
|
|
66
|
+
|
|
67
|
+
### Environment Variables (Alternative)
|
|
68
68
|
```bash
|
|
69
|
-
#
|
|
69
|
+
# Cloud providers
|
|
70
70
|
export OPENAI_API_KEY="your-api-key"
|
|
71
71
|
export ANTHROPIC_API_KEY="your-api-key"
|
|
72
|
+
|
|
73
|
+
# Ollama (optional overrides)
|
|
74
|
+
export OLLAMA_MODEL="qwen2.5:latest"
|
|
75
|
+
export OLLAMA_BASE_URL="http://localhost:11434"
|
|
76
|
+
|
|
72
77
|
llm-cli
|
|
73
78
|
```
|
|
74
79
|
|
package/bundle/gemini.js
CHANGED
|
@@ -139658,8 +139658,8 @@ var GIT_COMMIT_INFO, CLI_VERSION;
|
|
|
139658
139658
|
var init_git_commit = __esm({
|
|
139659
139659
|
"packages/core/dist/src/generated/git-commit.js"() {
|
|
139660
139660
|
"use strict";
|
|
139661
|
-
GIT_COMMIT_INFO = "
|
|
139662
|
-
CLI_VERSION = "0.0.
|
|
139661
|
+
GIT_COMMIT_INFO = "9d0d1bcf";
|
|
139662
|
+
CLI_VERSION = "0.0.8";
|
|
139663
139663
|
}
|
|
139664
139664
|
});
|
|
139665
139665
|
|
|
@@ -141934,10 +141934,12 @@ var init_ollama_provider = __esm({
|
|
|
141934
141934
|
async generateContent(request3, userPromptId) {
|
|
141935
141935
|
const messages = this.convertToOllamaMessages(request3);
|
|
141936
141936
|
const tools = this.toolsSupported ? this.convertToOllamaTools(request3) : [];
|
|
141937
|
+
const wantsJson = request3.config?.responseMimeType === "application/json";
|
|
141937
141938
|
const ollamaRequest = {
|
|
141938
141939
|
model: this.model,
|
|
141939
141940
|
messages,
|
|
141940
141941
|
stream: false,
|
|
141942
|
+
...wantsJson ? { format: "json" } : {},
|
|
141941
141943
|
...tools.length > 0 ? { tools } : {},
|
|
141942
141944
|
options: this.buildOptions(request3)
|
|
141943
141945
|
};
|
|
@@ -141960,10 +141962,12 @@ var init_ollama_provider = __esm({
|
|
|
141960
141962
|
async generateContentStream(request3, userPromptId) {
|
|
141961
141963
|
const messages = this.convertToOllamaMessages(request3);
|
|
141962
141964
|
const tools = this.toolsSupported ? this.convertToOllamaTools(request3) : [];
|
|
141965
|
+
const wantsJson = request3.config?.responseMimeType === "application/json";
|
|
141963
141966
|
const ollamaRequest = {
|
|
141964
141967
|
model: this.model,
|
|
141965
141968
|
messages,
|
|
141966
141969
|
stream: true,
|
|
141970
|
+
...wantsJson ? { format: "json" } : {},
|
|
141967
141971
|
...tools.length > 0 ? { tools } : {},
|
|
141968
141972
|
options: this.buildOptions(request3)
|
|
141969
141973
|
};
|
|
@@ -142396,7 +142400,7 @@ function createContentGeneratorConfig(config, authType) {
|
|
|
142396
142400
|
return contentGeneratorConfig;
|
|
142397
142401
|
}
|
|
142398
142402
|
async function createContentGenerator(config, gcConfig, sessionId2) {
|
|
142399
|
-
const version2 = "0.0.
|
|
142403
|
+
const version2 = "0.0.8";
|
|
142400
142404
|
const userAgent2 = `GeminiCLI/${version2} (${process.platform}; ${process.arch})`;
|
|
142401
142405
|
const baseHeaders = {
|
|
142402
142406
|
"User-Agent": userAgent2
|
|
@@ -243963,8 +243967,8 @@ var init_providerManager = __esm({
|
|
|
243963
243967
|
name: "Ollama (Local)",
|
|
243964
243968
|
description: "Local models via Ollama",
|
|
243965
243969
|
authType: "ollama-local",
|
|
243966
|
-
requiredEnvVars: [
|
|
243967
|
-
optionalEnvVars: ["OLLAMA_BASE_URL"],
|
|
243970
|
+
requiredEnvVars: [],
|
|
243971
|
+
optionalEnvVars: ["OLLAMA_MODEL", "OLLAMA_BASE_URL"],
|
|
243968
243972
|
defaultModel: "llama3.2:latest",
|
|
243969
243973
|
apiKeyFormat: "none",
|
|
243970
243974
|
apiKeyUrl: "https://ollama.ai/",
|
|
@@ -274758,7 +274762,7 @@ async function getPackageJson() {
|
|
|
274758
274762
|
// packages/cli/src/utils/version.ts
|
|
274759
274763
|
async function getCliVersion() {
|
|
274760
274764
|
const pkgJson = await getPackageJson();
|
|
274761
|
-
return "0.0.
|
|
274765
|
+
return "0.0.8";
|
|
274762
274766
|
}
|
|
274763
274767
|
|
|
274764
274768
|
// packages/cli/src/ui/commands/aboutCommand.ts
|
|
@@ -274810,7 +274814,7 @@ init_open();
|
|
|
274810
274814
|
import process30 from "node:process";
|
|
274811
274815
|
|
|
274812
274816
|
// packages/cli/src/generated/git-commit.ts
|
|
274813
|
-
var GIT_COMMIT_INFO2 = "
|
|
274817
|
+
var GIT_COMMIT_INFO2 = "9d0d1bcf";
|
|
274814
274818
|
|
|
274815
274819
|
// packages/cli/src/ui/commands/bugCommand.ts
|
|
274816
274820
|
init_dist3();
|