@cogitator-ai/cli 0.2.22 → 0.2.23
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +11 -11
- package/dist/__tests__/init.test.js +1 -1
- package/dist/commands/init.js +1 -1
- package/dist/commands/models.js +2 -2
- package/dist/commands/run.js +1 -1
- package/package.json +2 -2
package/README.md
CHANGED
|
@@ -173,7 +173,7 @@ If no model is specified, the CLI will:
|
|
|
173
173
|
|
|
174
174
|
1. Check `COGITATOR_MODEL` environment variable
|
|
175
175
|
2. Query Ollama for available models
|
|
176
|
-
3. Select from preferred models: llama3.
|
|
176
|
+
3. Select from preferred models: llama3.3:8b, llama3.3:8b, gemma3:4b, gemma2:9b, mistral:7b
|
|
177
177
|
4. Fall back to first available model
|
|
178
178
|
|
|
179
179
|
**Examples:**
|
|
@@ -212,7 +212,7 @@ When running without a message or with `-i`, you enter interactive mode:
|
|
|
212
212
|
|
|
213
213
|
AI Agent Runtime v0.1.0
|
|
214
214
|
|
|
215
|
-
Model: llama3.
|
|
215
|
+
Model: llama3.3:8b
|
|
216
216
|
Commands: /model <name>, /clear, /help, exit
|
|
217
217
|
|
|
218
218
|
> Hello!
|
|
@@ -237,7 +237,7 @@ Commands: /model <name>, /clear, /help, exit
|
|
|
237
237
|
|
|
238
238
|
```
|
|
239
239
|
> /model
|
|
240
|
-
Current model: ollama/llama3.
|
|
240
|
+
Current model: ollama/llama3.3:8b
|
|
241
241
|
|
|
242
242
|
> /model gemma3:4b
|
|
243
243
|
✓ Switched to model: ollama/gemma3:4b
|
|
@@ -337,7 +337,7 @@ cogitator models [options]
|
|
|
337
337
|
```
|
|
338
338
|
✓ Found 3 model(s)
|
|
339
339
|
|
|
340
|
-
llama3.
|
|
340
|
+
llama3.3:8b 4.7 GB 2 days ago
|
|
341
341
|
gemma3:4b 2.8 GB 1 week ago
|
|
342
342
|
mistral:7b 4.1 GB 3 weeks ago
|
|
343
343
|
|
|
@@ -351,7 +351,7 @@ Use with: cogitator run -m ollama/<model> "message"
|
|
|
351
351
|
cogitator models
|
|
352
352
|
|
|
353
353
|
# Pull a new model
|
|
354
|
-
cogitator models --pull llama3.
|
|
354
|
+
cogitator models --pull llama3.3:8b
|
|
355
355
|
cogitator models --pull gemma3:4b
|
|
356
356
|
cogitator models --pull mistral:7b
|
|
357
357
|
```
|
|
@@ -395,7 +395,7 @@ memory:
|
|
|
395
395
|
**Example .env:**
|
|
396
396
|
|
|
397
397
|
```bash
|
|
398
|
-
COGITATOR_MODEL=ollama/llama3.
|
|
398
|
+
COGITATOR_MODEL=ollama/llama3.3:8b
|
|
399
399
|
OPENAI_API_KEY=sk-...
|
|
400
400
|
```
|
|
401
401
|
|
|
@@ -422,7 +422,7 @@ const greet = tool({
|
|
|
422
422
|
const agent = new Agent({
|
|
423
423
|
id: 'my-agent',
|
|
424
424
|
name: 'My Agent',
|
|
425
|
-
model: 'ollama/llama3.
|
|
425
|
+
model: 'ollama/llama3.3:8b',
|
|
426
426
|
instructions: 'You are a helpful assistant. Use the greet tool when asked to greet someone.',
|
|
427
427
|
tools: [greet],
|
|
428
428
|
});
|
|
@@ -465,7 +465,7 @@ const datetime = tool({
|
|
|
465
465
|
|
|
466
466
|
const agent = new Agent({
|
|
467
467
|
name: 'Assistant',
|
|
468
|
-
model: 'ollama/llama3.
|
|
468
|
+
model: 'ollama/llama3.3:8b',
|
|
469
469
|
instructions: 'You are a helpful assistant with calculator and datetime tools.',
|
|
470
470
|
tools: [calculator, datetime],
|
|
471
471
|
});
|
|
@@ -541,15 +541,15 @@ Start Ollama with: ollama serve
|
|
|
541
541
|
|
|
542
542
|
```
|
|
543
543
|
⚠ No models installed
|
|
544
|
-
Pull a model with: cogitator models --pull llama3.
|
|
544
|
+
Pull a model with: cogitator models --pull llama3.3:8b
|
|
545
545
|
```
|
|
546
546
|
|
|
547
547
|
**Solution:**
|
|
548
548
|
|
|
549
549
|
```bash
|
|
550
|
-
cogitator models --pull llama3.
|
|
550
|
+
cogitator models --pull llama3.3:8b
|
|
551
551
|
# or
|
|
552
|
-
ollama pull llama3.
|
|
552
|
+
ollama pull llama3.3:8b
|
|
553
553
|
```
|
|
554
554
|
|
|
555
555
|
### Docker Not Running
|
package/dist/commands/init.js
CHANGED
|
@@ -81,7 +81,7 @@ const greet = tool({
|
|
|
81
81
|
const agent = new Agent({
|
|
82
82
|
id: 'my-agent',
|
|
83
83
|
name: 'My Agent',
|
|
84
|
-
model: 'ollama/llama3.
|
|
84
|
+
model: 'ollama/llama3.3:8b',
|
|
85
85
|
instructions: 'You are a helpful assistant. Use the greet tool when asked to greet someone.',
|
|
86
86
|
tools: [greet],
|
|
87
87
|
});
|
package/dist/commands/models.js
CHANGED
|
@@ -48,8 +48,8 @@ export const modelsCommand = new Command('models')
|
|
|
48
48
|
if (data.models.length === 0) {
|
|
49
49
|
log.warn('No models installed');
|
|
50
50
|
console.log();
|
|
51
|
-
log.dim('Pull a model with: cogitator models --pull llama3.
|
|
52
|
-
log.dim('Or directly: ollama pull llama3.
|
|
51
|
+
log.dim('Pull a model with: cogitator models --pull llama3.3:8b');
|
|
52
|
+
log.dim('Or directly: ollama pull llama3.3:8b');
|
|
53
53
|
return;
|
|
54
54
|
}
|
|
55
55
|
console.log();
|
package/dist/commands/run.js
CHANGED
|
@@ -25,7 +25,7 @@ async function detectModel() {
|
|
|
25
25
|
const models = await getOllamaModels();
|
|
26
26
|
if (models.length === 0)
|
|
27
27
|
return null;
|
|
28
|
-
const preferred = ['llama3.
|
|
28
|
+
const preferred = ['llama3.3:8b', 'llama3:8b', 'gemma3:4b', 'gemma2:9b', 'mistral:7b'];
|
|
29
29
|
for (const p of preferred) {
|
|
30
30
|
if (models.includes(p))
|
|
31
31
|
return `ollama/${p}`;
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@cogitator-ai/cli",
|
|
3
|
-
"version": "0.2.
|
|
3
|
+
"version": "0.2.23",
|
|
4
4
|
"description": "CLI for Cogitator AI Agent Runtime",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "./dist/index.js",
|
|
@@ -21,7 +21,7 @@
|
|
|
21
21
|
"commander": "^12.0.0",
|
|
22
22
|
"chalk": "^5.3.0",
|
|
23
23
|
"ora": "^8.0.0",
|
|
24
|
-
"@cogitator-ai/core": "0.17.
|
|
24
|
+
"@cogitator-ai/core": "0.17.2",
|
|
25
25
|
"@cogitator-ai/config": "0.3.11"
|
|
26
26
|
},
|
|
27
27
|
"devDependencies": {
|