@marizmelo/llm-cli 0.0.6 → 0.0.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -28,32 +28,24 @@ llm-cli --prompt "What is 2+2?"
28
28
  - 🎯 Provider-specific model switching
29
29
  - 💾 Memory and context management
30
30
  - 🔑 Secure API key management with persistent settings
31
- - 🛠️ Full tool/function calling support for OpenAI and Anthropic
31
+ - 🛠️ Full tool/function calling support for OpenAI, Anthropic, and Ollama
32
32
 
33
- ## What's New in v0.1.0
33
+ ## Provider Setup
34
34
 
35
- ### Enhanced Provider Support
36
- - **OpenAI & Anthropic Tool Support**: Both providers now have full function/tool calling capabilities, allowing them to use all available CLI tools (file operations, shell commands, web searches, etc.)
37
- - **Persistent API Key Management**: API keys are now saved to settings and automatically synced to environment variables on startup
38
- - **Improved Provider Setup**: Simplified setup process with `/provider setup <provider> <api-key>` command
39
- - **Better Error Handling**: Fixed streaming response issues and improved error messages
40
-
41
- ### Provider Setup
42
-
43
- #### Quick Setup (New Method)
35
+ ### Quick Setup
44
36
  ```bash
45
- # Setup providers with persistent API keys
37
+ # Setup cloud providers with persistent API keys
46
38
  llm-cli
47
39
  /provider setup openai sk-your-api-key
48
40
  /provider setup anthropic sk-ant-your-api-key
49
41
  /provider switch openai
50
42
  ```
51
43
 
52
- #### Ollama (Local)
44
+ ### Ollama (Local)
53
45
  ```bash
54
46
  # 1. Install Ollama (https://ollama.com)
55
- # 2. Pull a model
56
- ollama pull llama3.2:latest
47
+ # 2. Pull a tool-capable model
48
+ ollama pull qwen2.5:latest
57
49
 
58
50
  # 3. Start llm-cli and switch to Ollama
59
51
  llm-cli
@@ -62,13 +54,26 @@ llm-cli
62
54
  # 4. (Optional) Switch to a different model
63
55
  /provider model <model-name>
64
56
  ```
65
- The default model is `llama3.2:latest`. Use `/provider model` to list available models and switch between them.
66
57
 
67
- #### Traditional Setup (Environment Variables)
58
+ No `.env` file or environment variables required for Ollama. The default model is `llama3.2:latest`. Use `/provider model` to list available models and switch between them.
59
+
60
+ **Tool-capable Ollama models** (recommended for file ops, shell commands, etc.):
61
+ - `qwen2.5:7b` / `qwen2.5:14b`
62
+ - `llama3.1:8b` / `llama3.1:70b`
63
+ - `mistral:7b`
64
+
65
+ Models without tool support (e.g. `gemma3n`, `deepseek-r1`) will automatically fall back to text-only conversation mode.
66
+
67
+ ### Environment Variables (Alternative)
68
68
  ```bash
69
- # Still supported for backward compatibility
69
+ # Cloud providers
70
70
  export OPENAI_API_KEY="your-api-key"
71
71
  export ANTHROPIC_API_KEY="your-api-key"
72
+
73
+ # Ollama (optional overrides)
74
+ export OLLAMA_MODEL="qwen2.5:latest"
75
+ export OLLAMA_BASE_URL="http://localhost:11434"
76
+
72
77
  llm-cli
73
78
  ```
74
79
 
package/bundle/gemini.js CHANGED
@@ -139658,8 +139658,8 @@ var GIT_COMMIT_INFO, CLI_VERSION;
139658
139658
  var init_git_commit = __esm({
139659
139659
  "packages/core/dist/src/generated/git-commit.js"() {
139660
139660
  "use strict";
139661
- GIT_COMMIT_INFO = "4e5c77fc";
139662
- CLI_VERSION = "0.0.6";
139661
+ GIT_COMMIT_INFO = "03a9161d";
139662
+ CLI_VERSION = "0.0.7";
139663
139663
  }
139664
139664
  });
139665
139665
 
@@ -142396,7 +142396,7 @@ function createContentGeneratorConfig(config, authType) {
142396
142396
  return contentGeneratorConfig;
142397
142397
  }
142398
142398
  async function createContentGenerator(config, gcConfig, sessionId2) {
142399
- const version2 = "0.0.6";
142399
+ const version2 = "0.0.7";
142400
142400
  const userAgent2 = `GeminiCLI/${version2} (${process.platform}; ${process.arch})`;
142401
142401
  const baseHeaders = {
142402
142402
  "User-Agent": userAgent2
@@ -243963,8 +243963,8 @@ var init_providerManager = __esm({
243963
243963
  name: "Ollama (Local)",
243964
243964
  description: "Local models via Ollama",
243965
243965
  authType: "ollama-local",
243966
- requiredEnvVars: ["OLLAMA_MODEL"],
243967
- optionalEnvVars: ["OLLAMA_BASE_URL"],
243966
+ requiredEnvVars: [],
243967
+ optionalEnvVars: ["OLLAMA_MODEL", "OLLAMA_BASE_URL"],
243968
243968
  defaultModel: "llama3.2:latest",
243969
243969
  apiKeyFormat: "none",
243970
243970
  apiKeyUrl: "https://ollama.ai/",
@@ -274758,7 +274758,7 @@ async function getPackageJson() {
274758
274758
  // packages/cli/src/utils/version.ts
274759
274759
  async function getCliVersion() {
274760
274760
  const pkgJson = await getPackageJson();
274761
- return "0.0.6";
274761
+ return "0.0.7";
274762
274762
  }
274763
274763
 
274764
274764
  // packages/cli/src/ui/commands/aboutCommand.ts
@@ -274810,7 +274810,7 @@ init_open();
274810
274810
  import process30 from "node:process";
274811
274811
 
274812
274812
  // packages/cli/src/generated/git-commit.ts
274813
- var GIT_COMMIT_INFO2 = "4e5c77fc";
274813
+ var GIT_COMMIT_INFO2 = "03a9161d";
274814
274814
 
274815
274815
  // packages/cli/src/ui/commands/bugCommand.ts
274816
274816
  init_dist3();
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@marizmelo/llm-cli",
3
- "version": "0.0.6",
3
+ "version": "0.0.7",
4
4
  "engines": {
5
5
  "node": ">=20.0.0"
6
6
  },