claude-conversation-memory-mcp 0.2.5 → 0.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude-memory-config.example.json +8 -0
- package/.claude-memory-config.example.jsonc +50 -0
- package/README.md +203 -0
- package/dist/cli/commands.d.ts.map +1 -1
- package/dist/cli/commands.js +260 -20
- package/dist/cli/commands.js.map +1 -1
- package/dist/embeddings/ModelRegistry.d.ts +48 -0
- package/dist/embeddings/ModelRegistry.d.ts.map +1 -0
- package/dist/embeddings/ModelRegistry.js +170 -0
- package/dist/embeddings/ModelRegistry.js.map +1 -0
- package/dist/embeddings/providers/OllamaEmbeddings.d.ts +1 -1
- package/dist/embeddings/providers/OllamaEmbeddings.d.ts.map +1 -1
- package/dist/embeddings/providers/OllamaEmbeddings.js +7 -14
- package/dist/embeddings/providers/OllamaEmbeddings.js.map +1 -1
- package/dist/embeddings/providers/OpenAIEmbeddings.d.ts +1 -1
- package/dist/embeddings/providers/OpenAIEmbeddings.d.ts.map +1 -1
- package/dist/embeddings/providers/OpenAIEmbeddings.js +9 -7
- package/dist/embeddings/providers/OpenAIEmbeddings.js.map +1 -1
- package/dist/embeddings/providers/TransformersEmbeddings.d.ts +1 -1
- package/dist/embeddings/providers/TransformersEmbeddings.d.ts.map +1 -1
- package/dist/embeddings/providers/TransformersEmbeddings.js +9 -8
- package/dist/embeddings/providers/TransformersEmbeddings.js.map +1 -1
- package/dist/mcp-server.d.ts.map +1 -1
- package/dist/mcp-server.js +6 -0
- package/dist/mcp-server.js.map +1 -1
- package/dist/tools/ToolDefinitions.d.ts +46 -0
- package/dist/tools/ToolDefinitions.d.ts.map +1 -1
- package/dist/tools/ToolDefinitions.js +46 -0
- package/dist/tools/ToolDefinitions.js.map +1 -1
- package/dist/tools/ToolHandlers.d.ts +11 -2
- package/dist/tools/ToolHandlers.d.ts.map +1 -1
- package/dist/tools/ToolHandlers.js +86 -2
- package/dist/tools/ToolHandlers.js.map +1 -1
- package/dist/types/ToolTypes.d.ts +37 -0
- package/dist/types/ToolTypes.d.ts.map +1 -1
- package/dist/utils/ProjectMigration.d.ts +82 -0
- package/dist/utils/ProjectMigration.d.ts.map +1 -0
- package/dist/utils/ProjectMigration.js +413 -0
- package/dist/utils/ProjectMigration.js.map +1 -0
- package/package.json +5 -3
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
{
|
|
2
|
+
"embedding": {
|
|
3
|
+
// Provider: Choose one of "ollama" | "transformers" | "openai"
|
|
4
|
+
// - ollama: Local models via Ollama (fast, private, requires Ollama installed)
|
|
5
|
+
// - transformers: Offline models via Transformers.js (no setup, slower first run)
|
|
6
|
+
// - openai: Cloud API (best quality, requires API key, costs money)
|
|
7
|
+
"provider": "ollama",
|
|
8
|
+
|
|
9
|
+
// Model name: See available models by running `config` command
|
|
10
|
+
//
|
|
11
|
+
// Ollama models (require: ollama pull <model>):
|
|
12
|
+
// - mxbai-embed-large (1024 dims, recommended for quality)
|
|
13
|
+
// - nomic-embed-text (768 dims, fast and good quality)
|
|
14
|
+
// - all-minilm (384 dims, lightweight)
|
|
15
|
+
// - snowflake-arctic-embed (1024 dims, optimized for retrieval)
|
|
16
|
+
//
|
|
17
|
+
// Transformers models (auto-download on first use, no setup):
|
|
18
|
+
// - Xenova/all-MiniLM-L6-v2 (384 dims, default, fastest)
|
|
19
|
+
// - Xenova/all-mpnet-base-v2 (768 dims, better quality)
|
|
20
|
+
// - Xenova/bge-small-en-v1.5 (384 dims, English-optimized)
|
|
21
|
+
// - Xenova/bge-base-en-v1.5 (768 dims, English, higher quality)
|
|
22
|
+
//
|
|
23
|
+
// OpenAI models (require API key):
|
|
24
|
+
// - text-embedding-3-small (1536 dims, $0.020 per 1M tokens)
|
|
25
|
+
// - text-embedding-3-large (3072 dims, $0.130 per 1M tokens, best quality)
|
|
26
|
+
// - text-embedding-ada-002 (1536 dims, legacy)
|
|
27
|
+
"model": "mxbai-embed-large",
|
|
28
|
+
|
|
29
|
+
// Dimensions: Optional - auto-detected based on model name if omitted
|
|
30
|
+
// Only specify if you need to override auto-detection or use a custom model
|
|
31
|
+
// Valid range: 1-10000
|
|
32
|
+
//
|
|
33
|
+
// Common dimensions by model:
|
|
34
|
+
// mxbai-embed-large: 1024
|
|
35
|
+
// nomic-embed-text: 768
|
|
36
|
+
// Xenova/all-MiniLM-L6-v2: 384
|
|
37
|
+
// text-embedding-3-small: 1536
|
|
38
|
+
// text-embedding-3-large: 3072
|
|
39
|
+
"dimensions": 1024,
|
|
40
|
+
|
|
41
|
+
// Base URL: Only for Ollama provider
|
|
42
|
+
// Default: http://localhost:11434
|
|
43
|
+
// Change if Ollama is running on a different host/port
|
|
44
|
+
"baseUrl": "http://localhost:11434"
|
|
45
|
+
|
|
46
|
+
// API Key: Only for OpenAI provider
|
|
47
|
+
// Can also be set via OPENAI_API_KEY environment variable
|
|
48
|
+
// "apiKey": "sk-..."
|
|
49
|
+
}
|
|
50
|
+
}
|
package/README.md
CHANGED
|
@@ -9,6 +9,7 @@ A Model Context Protocol (MCP) server that gives Claude Code long-term memory by
|
|
|
9
9
|
- **Prevents mistakes** - Learn from past errors and avoid repeating them
|
|
10
10
|
- **Links to git commits** - Connect conversations to code changes
|
|
11
11
|
- **Analyzes file history** - See the complete evolution of files with context
|
|
12
|
+
- **Migrates conversation history** - Keep your history when renaming or moving projects
|
|
12
13
|
|
|
13
14
|
## ⚠️ Important: Claude Code CLI Only
|
|
14
15
|
|
|
@@ -56,6 +57,12 @@ Claude Code CLI is required because it stores conversation history in `~/.claude
|
|
|
56
57
|
npm install -g claude-conversation-memory-mcp
|
|
57
58
|
```
|
|
58
59
|
|
|
60
|
+
**Discover Available Models:**
|
|
61
|
+
After installation, you can see all available embedding models and their dimensions:
|
|
62
|
+
- Run the CLI: `claude-conversation-memory-mcp`
|
|
63
|
+
- Type: `config` to see all available models organized by provider
|
|
64
|
+
- Or check the example config file: `.claude-memory-config.example.jsonc`
|
|
65
|
+
|
|
59
66
|
### Configure Claude Code CLI
|
|
60
67
|
|
|
61
68
|
**MCP Configuration File Priority:**
|
|
@@ -303,6 +310,82 @@ By default, conversations about the MCP itself are excluded to prevent self-refe
|
|
|
303
310
|
You: "Index all conversations, including MCP conversations"
|
|
304
311
|
```
|
|
305
312
|
|
|
313
|
+
### Indexing Options
|
|
314
|
+
|
|
315
|
+
When indexing conversations, several options control what gets stored:
|
|
316
|
+
|
|
317
|
+
#### Include Thinking Blocks
|
|
318
|
+
|
|
319
|
+
**Default**: `false` (thinking blocks are excluded)
|
|
320
|
+
|
|
321
|
+
Thinking blocks contain Claude's internal reasoning process. They can be **very large** (3-5x more data) and are usually not needed for search.
|
|
322
|
+
|
|
323
|
+
```
|
|
324
|
+
# Default behavior (recommended)
|
|
325
|
+
You: "Index conversations"
|
|
326
|
+
# Thinking blocks are excluded
|
|
327
|
+
|
|
328
|
+
# Include thinking blocks (increases database size significantly)
|
|
329
|
+
You: "Index conversations with thinking blocks"
|
|
330
|
+
```
|
|
331
|
+
|
|
332
|
+
**When to enable**:
|
|
333
|
+
- ✅ You want to search Claude's reasoning process
|
|
334
|
+
- ✅ You're analyzing decision-making patterns
|
|
335
|
+
- ❌ Don't enable if you just want to search visible conversation content
|
|
336
|
+
|
|
337
|
+
#### Exclude MCP Conversations
|
|
338
|
+
|
|
339
|
+
**Default**: `"self-only"` (excludes only conversation-memory MCP calls)
|
|
340
|
+
|
|
341
|
+
Controls which MCP tool interactions are indexed:
|
|
342
|
+
|
|
343
|
+
- `"self-only"` (default): Excludes messages about this conversation-memory MCP to prevent self-referential loops
|
|
344
|
+
- `false`: Index all MCP tool calls from all servers
|
|
345
|
+
- `"all-mcp"` or `true`: Exclude all MCP tool calls from all servers
|
|
346
|
+
- `["server1", "server2"]`: Exclude specific MCP servers
|
|
347
|
+
|
|
348
|
+
```
|
|
349
|
+
# Default - exclude only conversation-memory MCP
|
|
350
|
+
You: "Index conversations"
|
|
351
|
+
|
|
352
|
+
# Include all MCP conversations (including this one)
|
|
353
|
+
You: "Index conversations, include all MCP tools"
|
|
354
|
+
|
|
355
|
+
# Exclude all MCP tool calls
|
|
356
|
+
You: "Index conversations, exclude all MCP interactions"
|
|
357
|
+
```
|
|
358
|
+
|
|
359
|
+
**What gets filtered**: Only the specific **messages** that invoke MCP tools are excluded, not entire conversations. This preserves conversation context while preventing self-referential loops.
|
|
360
|
+
|
|
361
|
+
#### Enable Git Integration
|
|
362
|
+
|
|
363
|
+
**Default**: `true` (git commits are linked)
|
|
364
|
+
|
|
365
|
+
Links git commits to conversations based on timestamps and file changes.
|
|
366
|
+
|
|
367
|
+
```
|
|
368
|
+
# Default behavior
|
|
369
|
+
You: "Index conversations"
|
|
370
|
+
# Git commits are automatically linked
|
|
371
|
+
|
|
372
|
+
# Disable git integration
|
|
373
|
+
You: "Index conversations without git integration"
|
|
374
|
+
```
|
|
375
|
+
|
|
376
|
+
#### Index Output
|
|
377
|
+
|
|
378
|
+
After indexing, you'll see:
|
|
379
|
+
|
|
380
|
+
```
|
|
381
|
+
📁 Indexed from: /path/to/modern-folder, /path/to/legacy-folder
|
|
382
|
+
💾 Database: /path/to/.claude-conversations-memory.db
|
|
383
|
+
```
|
|
384
|
+
|
|
385
|
+
This shows:
|
|
386
|
+
- **Indexed folders**: Which conversation folders were used (including legacy if it exists)
|
|
387
|
+
- **Database location**: Where your indexed data is stored
|
|
388
|
+
|
|
306
389
|
### Search with Date Filters
|
|
307
390
|
|
|
308
391
|
```
|
|
@@ -317,6 +400,126 @@ You: "Generate project documentation from our conversations"
|
|
|
317
400
|
|
|
318
401
|
Claude will create comprehensive docs combining code analysis with conversation history.
|
|
319
402
|
|
|
403
|
+
### Migrate Conversation History
|
|
404
|
+
|
|
405
|
+
When you rename or move a project directory, your conversation history becomes inaccessible because Claude Code creates a new folder for the new path. Use the migration tools to recover your history:
|
|
406
|
+
|
|
407
|
+
**Step 1: Discover old conversation folders**
|
|
408
|
+
|
|
409
|
+
```
|
|
410
|
+
You: "Discover old conversations for this project"
|
|
411
|
+
```
|
|
412
|
+
|
|
413
|
+
Claude will scan `~/.claude/projects/` and show you folders that match your current project, ranked by similarity score. The output includes:
|
|
414
|
+
- Folder name and path
|
|
415
|
+
- Original project path stored in the database
|
|
416
|
+
- Number of conversations and files
|
|
417
|
+
- Last activity timestamp
|
|
418
|
+
- Similarity score (higher = better match)
|
|
419
|
+
|
|
420
|
+
**Step 2: Migrate the history**
|
|
421
|
+
|
|
422
|
+
```
|
|
423
|
+
You: "Migrate conversations from /Users/name/.claude/projects/-old-project-name, old path was /Users/name/old-project, new path is /Users/name/new-project"
|
|
424
|
+
```
|
|
425
|
+
|
|
426
|
+
Claude will:
|
|
427
|
+
- Copy all conversation JSONL files to the new location
|
|
428
|
+
- Update the `project_path` in the database
|
|
429
|
+
- Create automatic backups (`.claude-conversations-memory.db.bak`)
|
|
430
|
+
- Preserve all original data (copy, not move)
|
|
431
|
+
|
|
432
|
+
**Example workflow:**
|
|
433
|
+
|
|
434
|
+
```markdown
|
|
435
|
+
# You renamed your project directory
|
|
436
|
+
# Old: /Users/alice/code/my-app
|
|
437
|
+
# New: /Users/alice/code/my-awesome-app
|
|
438
|
+
|
|
439
|
+
You: "Discover old conversations for this project"
|
|
440
|
+
|
|
441
|
+
Claude: Found 1 potential old conversation folder:
|
|
442
|
+
- Folder: -Users-alice-code-my-app
|
|
443
|
+
- Original path: /Users/alice/code/my-app
|
|
444
|
+
- Conversations: 15
|
|
445
|
+
- Files: 47
|
|
446
|
+
- Score: 95.3
|
|
447
|
+
|
|
448
|
+
You: "Migrate from /Users/alice/.claude/projects/-Users-alice-code-my-app, old path /Users/alice/code/my-app, new path /Users/alice/code/my-awesome-app"
|
|
449
|
+
|
|
450
|
+
Claude: Successfully migrated 47 conversation files.
|
|
451
|
+
Now you can index and search your full history!
|
|
452
|
+
```
|
|
453
|
+
|
|
454
|
+
**Dry run mode:**
|
|
455
|
+
|
|
456
|
+
Test the migration without making changes:
|
|
457
|
+
|
|
458
|
+
```
|
|
459
|
+
You: "Dry run: migrate from [source] old path [old] new path [new]"
|
|
460
|
+
```
|
|
461
|
+
|
|
462
|
+
This shows what would be migrated without actually copying files.
|
|
463
|
+
|
|
464
|
+
### Merge Conversations from Different Projects
|
|
465
|
+
|
|
466
|
+
**NEW in v0.4.0**: Combine conversation history from different projects into one folder using merge mode.
|
|
467
|
+
|
|
468
|
+
**Use case**: You want to merge conversations from `/project-a/drafts/2025-01-05` into your current project `/project-b`.
|
|
469
|
+
|
|
470
|
+
**Step 1: Discover the source folder**
|
|
471
|
+
|
|
472
|
+
```
|
|
473
|
+
You: "Discover old conversations for project path /Users/name/project-a/drafts/2025-01-05"
|
|
474
|
+
```
|
|
475
|
+
|
|
476
|
+
**Step 2: Merge into current project**
|
|
477
|
+
|
|
478
|
+
```
|
|
479
|
+
You: "Merge conversations from /Users/name/.claude/projects/-project-a-drafts-2025-01-05, old path /Users/name/project-a/drafts/2025-01-05, new path /Users/name/project-b, mode merge"
|
|
480
|
+
```
|
|
481
|
+
|
|
482
|
+
Claude will:
|
|
483
|
+
- Copy only **new** conversation files (skip duplicates)
|
|
484
|
+
- Keep target conversations when IDs collide (no data loss)
|
|
485
|
+
- Merge all database entries using INSERT OR IGNORE
|
|
486
|
+
- Create backup of target database before merge
|
|
487
|
+
- Preserve all original source data
|
|
488
|
+
|
|
489
|
+
**Example workflow:**
|
|
490
|
+
|
|
491
|
+
```markdown
|
|
492
|
+
# Scenario: You have conversations from different projects to combine
|
|
493
|
+
|
|
494
|
+
Current project: /Users/alice/main-project (already has 20 conversations)
|
|
495
|
+
Source project: /Users/alice/drafts/experiment (has 10 conversations, 3 overlap with main)
|
|
496
|
+
|
|
497
|
+
You: "Discover old conversations for /Users/alice/drafts/experiment"
|
|
498
|
+
|
|
499
|
+
Claude: Found 1 folder:
|
|
500
|
+
- Folder: -Users-alice-drafts-experiment
|
|
501
|
+
- Original path: /Users/alice/drafts/experiment
|
|
502
|
+
- Conversations: 10
|
|
503
|
+
- Files: 10
|
|
504
|
+
|
|
505
|
+
You: "Merge from /Users/alice/.claude/projects/-Users-alice-drafts-experiment, old path /Users/alice/drafts/experiment, new path /Users/alice/main-project, mode merge"
|
|
506
|
+
|
|
507
|
+
Claude: Successfully merged 7 new conversation files into /Users/alice/.claude/projects/-Users-alice-main-project
|
|
508
|
+
(3 duplicate conversations were skipped to preserve target data)
|
|
509
|
+
Backup created at: .claude-conversations-memory.db.bak
|
|
510
|
+
|
|
511
|
+
# Result: main-project now has 27 conversations (20 original + 7 new from experiment)
|
|
512
|
+
```
|
|
513
|
+
|
|
514
|
+
**Key differences between migrate and merge:**
|
|
515
|
+
|
|
516
|
+
| Feature | Migrate Mode (default) | Merge Mode |
|
|
517
|
+
|---------|----------------------|------------|
|
|
518
|
+
| Target has data | ❌ Rejected (conflict) | ✅ Allowed |
|
|
519
|
+
| Duplicate IDs | Overwrites target | Skips source (keeps target) |
|
|
520
|
+
| Use case | Renamed project | Combine different projects |
|
|
521
|
+
| Backup location | Source folder | Target folder |
|
|
522
|
+
|
|
320
523
|
## 📚 Learn More
|
|
321
524
|
|
|
322
525
|
- **[Tool Examples](docs/TOOL-EXAMPLES.md)** - 50+ natural language examples for each tool
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"commands.d.ts","sourceRoot":"","sources":["../../src/cli/commands.ts"],"names":[],"mappings":"AAAA;;GAEG;AAIH,OAAO,EAAE,YAAY,EAAE,MAAM,0BAA0B,CAAC;
|
|
1
|
+
{"version":3,"file":"commands.d.ts","sourceRoot":"","sources":["../../src/cli/commands.ts"],"names":[],"mappings":"AAAA;;GAEG;AAIH,OAAO,EAAE,YAAY,EAAE,MAAM,0BAA0B,CAAC;AAiExD;;GAEG;AACH,wBAAsB,cAAc,CAClC,KAAK,EAAE,MAAM,EACb,QAAQ,EAAE,YAAY,GACrB,OAAO,CAAC,MAAM,GAAG,IAAI,CAAC,CAsKxB"}
|
package/dist/cli/commands.js
CHANGED
|
@@ -6,9 +6,11 @@ import Table from "cli-table3";
|
|
|
6
6
|
import { getSQLiteManager } from "../storage/SQLiteManager.js";
|
|
7
7
|
import { showHelp, showCommandHelp } from "./help.js";
|
|
8
8
|
import { ConfigManager } from "../embeddings/ConfigManager.js";
|
|
9
|
+
import { getModelsByProvider, getAllModels, getModelsByQuality, getRecommendedModel, modelExists } from "../embeddings/ModelRegistry.js";
|
|
9
10
|
import { readFileSync } from "fs";
|
|
10
11
|
import { join, dirname } from "path";
|
|
11
12
|
import { fileURLToPath } from "url";
|
|
13
|
+
import prompts from "prompts";
|
|
12
14
|
const __filename = fileURLToPath(import.meta.url);
|
|
13
15
|
const __dirname = dirname(__filename);
|
|
14
16
|
/**
|
|
@@ -169,6 +171,14 @@ export async function executeCommand(input, handlers) {
|
|
|
169
171
|
return chalk.yellow("Usage: config (show current config)\n config <key> <value> (set config value)");
|
|
170
172
|
}
|
|
171
173
|
}
|
|
174
|
+
// Handle models
|
|
175
|
+
if (command === "models") {
|
|
176
|
+
return handleModels(args);
|
|
177
|
+
}
|
|
178
|
+
// Handle select-model (interactive)
|
|
179
|
+
if (command === "select-model" || command === "select") {
|
|
180
|
+
return await handleSelectModel();
|
|
181
|
+
}
|
|
172
182
|
// Handle get
|
|
173
183
|
if (command === "get") {
|
|
174
184
|
if (args.length === 0) {
|
|
@@ -513,10 +523,36 @@ function handleConfigShow() {
|
|
|
513
523
|
output += ` ${chalk.cyan("dimensions")} Embedding dimensions (e.g., 1024)\n`;
|
|
514
524
|
output += ` ${chalk.cyan("baseUrl")} Ollama base URL (default: http://localhost:11434)\n`;
|
|
515
525
|
output += ` ${chalk.cyan("apiKey")} OpenAI API key\n\n`;
|
|
526
|
+
// Show available models by provider using ModelRegistry
|
|
527
|
+
output += chalk.bold("Known Models by Provider:\n\n");
|
|
528
|
+
// Ollama models
|
|
529
|
+
output += chalk.yellow("Ollama (local):\n");
|
|
530
|
+
const ollamaModels = getModelsByProvider("ollama");
|
|
531
|
+
for (const model of ollamaModels) {
|
|
532
|
+
const suffix = model.installation ? ` ${chalk.dim(`(${model.description})`)}` : "";
|
|
533
|
+
output += ` ${model.name.padEnd(30)} ${model.dimensions.toString().padStart(4)} dims${suffix}\n`;
|
|
534
|
+
}
|
|
535
|
+
output += "\n";
|
|
536
|
+
// Transformers models
|
|
537
|
+
output += chalk.yellow("Transformers (offline):\n");
|
|
538
|
+
const transformersModels = getModelsByProvider("transformers");
|
|
539
|
+
for (const model of transformersModels) {
|
|
540
|
+
output += ` ${model.name.padEnd(30)} ${model.dimensions.toString().padStart(4)} dims ${chalk.dim(`(${model.description})`)}\n`;
|
|
541
|
+
}
|
|
542
|
+
output += "\n";
|
|
543
|
+
// OpenAI models
|
|
544
|
+
output += chalk.yellow("OpenAI (cloud):\n");
|
|
545
|
+
const openaiModels = getModelsByProvider("openai");
|
|
546
|
+
for (const model of openaiModels) {
|
|
547
|
+
const costSuffix = model.cost ? ` - ${model.cost}` : "";
|
|
548
|
+
output += ` ${model.name.padEnd(30)} ${model.dimensions.toString().padStart(4)} dims ${chalk.dim(`(${model.description}${costSuffix})`)}\n`;
|
|
549
|
+
}
|
|
550
|
+
output += "\n";
|
|
516
551
|
output += chalk.gray(`Config file location: ${configPath}\n`);
|
|
517
552
|
if (!configExists) {
|
|
518
553
|
output += chalk.yellow("Config file will be created on first 'set' command.\n");
|
|
519
554
|
}
|
|
555
|
+
output += chalk.gray("See example config: .claude-memory-config.example.jsonc\n");
|
|
520
556
|
return output;
|
|
521
557
|
}
|
|
522
558
|
/**
|
|
@@ -544,33 +580,32 @@ function handleConfigGet(key) {
|
|
|
544
580
|
*/
|
|
545
581
|
function handleConfigSet(key, value) {
|
|
546
582
|
try {
|
|
583
|
+
// Validate model name if setting model
|
|
584
|
+
if (key === "model") {
|
|
585
|
+
if (!modelExists(value)) {
|
|
586
|
+
let warning = chalk.yellow(`⚠️ Model '${value}' is not in the registry.\n\n`);
|
|
587
|
+
warning += chalk.gray("This might be a custom model. If so, make sure to also set the correct dimensions.\n\n");
|
|
588
|
+
warning += chalk.cyan("Known models:\n");
|
|
589
|
+
warning += chalk.gray(" Run 'models' to see all available models\n");
|
|
590
|
+
warning += chalk.gray(" Or 'models <provider>' to see provider-specific models\n\n");
|
|
591
|
+
warning += chalk.yellow("Proceeding with custom model...\n\n");
|
|
592
|
+
console.warn(warning);
|
|
593
|
+
}
|
|
594
|
+
}
|
|
547
595
|
ConfigManager.setConfigValue(key, value);
|
|
548
596
|
// Show confirmation with helpful info
|
|
549
597
|
let output = chalk.green(`✓ Config updated: ${key} = ${value}\n\n`);
|
|
550
598
|
// If setting dimensions, suggest matching models
|
|
551
599
|
if (key === "dimensions") {
|
|
552
600
|
const dims = parseInt(value, 10);
|
|
553
|
-
|
|
554
|
-
if (
|
|
555
|
-
output += "
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
output += "
|
|
560
|
-
output += " - Xenova/all-mpnet-base-v2 (transformers)\n";
|
|
561
|
-
}
|
|
562
|
-
else if (dims === 1024) {
|
|
563
|
-
output += " - mxbai-embed-large (ollama) ⭐ default\n";
|
|
564
|
-
output += " - snowflake-arctic-embed (ollama)\n";
|
|
601
|
+
const matchingModels = getAllModels().filter(m => m.dimensions === dims);
|
|
602
|
+
if (matchingModels.length > 0) {
|
|
603
|
+
output += chalk.cyan("Models with matching dimensions:\n");
|
|
604
|
+
for (const model of matchingModels) {
|
|
605
|
+
output += ` - ${model.name} (${model.provider})\n`;
|
|
606
|
+
}
|
|
607
|
+
output += "\n";
|
|
565
608
|
}
|
|
566
|
-
else if (dims === 1536) {
|
|
567
|
-
output += " - text-embedding-3-small (openai)\n";
|
|
568
|
-
output += " - text-embedding-ada-002 (openai)\n";
|
|
569
|
-
}
|
|
570
|
-
else if (dims === 3072) {
|
|
571
|
-
output += " - text-embedding-3-large (openai)\n";
|
|
572
|
-
}
|
|
573
|
-
output += "\n";
|
|
574
609
|
}
|
|
575
610
|
// If setting model, suggest dimensions
|
|
576
611
|
if (key === "model") {
|
|
@@ -587,4 +622,209 @@ function handleConfigSet(key, value) {
|
|
|
587
622
|
return chalk.red(`Error: ${error.message}`);
|
|
588
623
|
}
|
|
589
624
|
}
|
|
625
|
+
/**
|
|
626
|
+
* Handle models command - List, filter, search models
|
|
627
|
+
* Usage:
|
|
628
|
+
* models - List all models
|
|
629
|
+
* models <provider> - Filter by provider (ollama, transformers, openai)
|
|
630
|
+
* models quality <tier> - Filter by quality (low, medium, high, highest)
|
|
631
|
+
* models recommend - Show recommended models for each provider
|
|
632
|
+
*/
|
|
633
|
+
function handleModels(args) {
|
|
634
|
+
let output = "";
|
|
635
|
+
// No args: list all models
|
|
636
|
+
if (args.length === 0) {
|
|
637
|
+
output += chalk.bold("📚 All Available Embedding Models\n\n");
|
|
638
|
+
const allModels = getAllModels();
|
|
639
|
+
output += formatModelsTable(allModels);
|
|
640
|
+
output += "\n";
|
|
641
|
+
output += chalk.gray("💡 Tip: Use 'models <provider>' to filter by provider\n");
|
|
642
|
+
output += chalk.gray(" Or: 'models quality <tier>' to filter by quality\n");
|
|
643
|
+
output += chalk.gray(" Or: 'models recommend' to see recommendations\n");
|
|
644
|
+
return output;
|
|
645
|
+
}
|
|
646
|
+
const subcommand = args[0].toLowerCase();
|
|
647
|
+
// Filter by provider
|
|
648
|
+
if (["ollama", "transformers", "openai"].includes(subcommand)) {
|
|
649
|
+
const models = getModelsByProvider(subcommand);
|
|
650
|
+
output += chalk.bold(`📚 ${capitalize(subcommand)} Models\n\n`);
|
|
651
|
+
output += formatModelsTable(models);
|
|
652
|
+
// Show recommended model for this provider
|
|
653
|
+
const recommended = getRecommendedModel(subcommand);
|
|
654
|
+
if (recommended) {
|
|
655
|
+
output += "\n";
|
|
656
|
+
output += chalk.cyan(`⭐ Recommended: ${recommended.name} (${recommended.dimensions} dims, ${recommended.quality} quality)\n`);
|
|
657
|
+
}
|
|
658
|
+
return output;
|
|
659
|
+
}
|
|
660
|
+
// Filter by quality
|
|
661
|
+
if (subcommand === "quality") {
|
|
662
|
+
if (args.length < 2) {
|
|
663
|
+
return chalk.yellow("Usage: models quality <tier>\nTiers: low, medium, high, highest");
|
|
664
|
+
}
|
|
665
|
+
const quality = args[1].toLowerCase();
|
|
666
|
+
if (!["low", "medium", "high", "highest"].includes(quality)) {
|
|
667
|
+
return chalk.red(`Invalid quality tier: ${args[1]}\nValid tiers: low, medium, high, highest`);
|
|
668
|
+
}
|
|
669
|
+
const models = getModelsByQuality(quality);
|
|
670
|
+
output += chalk.bold(`📚 ${capitalize(quality)} Quality Models\n\n`);
|
|
671
|
+
output += formatModelsTable(models);
|
|
672
|
+
return output;
|
|
673
|
+
}
|
|
674
|
+
// Show recommended models
|
|
675
|
+
if (subcommand === "recommend" || subcommand === "recommended") {
|
|
676
|
+
output += chalk.bold("⭐ Recommended Models by Provider\n\n");
|
|
677
|
+
const providers = ["ollama", "transformers", "openai"];
|
|
678
|
+
for (const provider of providers) {
|
|
679
|
+
const recommended = getRecommendedModel(provider);
|
|
680
|
+
if (recommended) {
|
|
681
|
+
output += chalk.yellow(`${capitalize(provider)}:\n`);
|
|
682
|
+
output += ` ${chalk.green(recommended.name)} ${chalk.dim(`(${recommended.dimensions} dims, ${recommended.quality} quality)`)}\n`;
|
|
683
|
+
output += ` ${chalk.dim(recommended.description)}\n`;
|
|
684
|
+
if (recommended.installation) {
|
|
685
|
+
output += ` ${chalk.dim(`Install: ${recommended.installation}`)}\n`;
|
|
686
|
+
}
|
|
687
|
+
if (recommended.cost) {
|
|
688
|
+
output += ` ${chalk.dim(`Cost: ${recommended.cost}`)}\n`;
|
|
689
|
+
}
|
|
690
|
+
output += "\n";
|
|
691
|
+
}
|
|
692
|
+
}
|
|
693
|
+
return output;
|
|
694
|
+
}
|
|
695
|
+
return chalk.yellow(`Unknown models subcommand: ${subcommand}\n\nUsage:\n models - List all models\n models <provider> - Filter by provider (ollama, transformers, openai)\n models quality <tier> - Filter by quality\n models recommend - Show recommendations`);
|
|
696
|
+
}
|
|
697
|
+
/**
|
|
698
|
+
* Format models into a table
|
|
699
|
+
*/
|
|
700
|
+
function formatModelsTable(models) {
|
|
701
|
+
const table = new Table({
|
|
702
|
+
head: [
|
|
703
|
+
chalk.cyan("Model"),
|
|
704
|
+
chalk.cyan("Provider"),
|
|
705
|
+
chalk.cyan("Dimensions"),
|
|
706
|
+
chalk.cyan("Quality"),
|
|
707
|
+
chalk.cyan("Description")
|
|
708
|
+
],
|
|
709
|
+
colWidths: [35, 13, 12, 10, 45],
|
|
710
|
+
wordWrap: true,
|
|
711
|
+
});
|
|
712
|
+
for (const model of models) {
|
|
713
|
+
table.push([
|
|
714
|
+
model.name,
|
|
715
|
+
model.provider,
|
|
716
|
+
model.dimensions.toString(),
|
|
717
|
+
model.quality,
|
|
718
|
+
model.description
|
|
719
|
+
]);
|
|
720
|
+
}
|
|
721
|
+
return table.toString();
|
|
722
|
+
}
|
|
723
|
+
/**
|
|
724
|
+
* Capitalize first letter
|
|
725
|
+
*/
|
|
726
|
+
function capitalize(str) {
|
|
727
|
+
return str.charAt(0).toUpperCase() + str.slice(1);
|
|
728
|
+
}
|
|
729
|
+
/**
|
|
730
|
+
* Handle interactive model selection
|
|
731
|
+
*/
|
|
732
|
+
async function handleSelectModel() {
|
|
733
|
+
try {
|
|
734
|
+
// Step 1: Choose provider
|
|
735
|
+
const providerResponse = await prompts({
|
|
736
|
+
type: "select",
|
|
737
|
+
name: "provider",
|
|
738
|
+
message: "Choose an embedding provider:",
|
|
739
|
+
choices: [
|
|
740
|
+
{
|
|
741
|
+
title: "Ollama (Local, High Quality)",
|
|
742
|
+
value: "ollama",
|
|
743
|
+
description: "Run models locally with Ollama. Requires: ollama serve"
|
|
744
|
+
},
|
|
745
|
+
{
|
|
746
|
+
title: "Transformers (Offline, No Setup)",
|
|
747
|
+
value: "transformers",
|
|
748
|
+
description: "Auto-download models, runs offline. No external setup needed."
|
|
749
|
+
},
|
|
750
|
+
{
|
|
751
|
+
title: "OpenAI (Cloud, Highest Quality)",
|
|
752
|
+
value: "openai",
|
|
753
|
+
description: "Cloud API with best quality. Requires API key and costs money."
|
|
754
|
+
}
|
|
755
|
+
],
|
|
756
|
+
initial: 0,
|
|
757
|
+
});
|
|
758
|
+
if (!providerResponse.provider) {
|
|
759
|
+
return chalk.yellow("Selection cancelled");
|
|
760
|
+
}
|
|
761
|
+
const provider = providerResponse.provider;
|
|
762
|
+
// Step 2: Choose model from that provider
|
|
763
|
+
const models = getModelsByProvider(provider);
|
|
764
|
+
const modelChoices = models.map(m => ({
|
|
765
|
+
title: `${m.name} (${m.dimensions} dims, ${m.quality} quality)`,
|
|
766
|
+
value: m.name,
|
|
767
|
+
description: m.description + (m.installation ? ` - ${m.installation}` : "") + (m.cost ? ` - ${m.cost}` : "")
|
|
768
|
+
}));
|
|
769
|
+
// Highlight recommended model
|
|
770
|
+
const recommended = getRecommendedModel(provider);
|
|
771
|
+
if (recommended) {
|
|
772
|
+
const recIndex = modelChoices.findIndex(c => c.value === recommended.name);
|
|
773
|
+
if (recIndex >= 0) {
|
|
774
|
+
modelChoices[recIndex].title = `⭐ ${modelChoices[recIndex].title} (recommended)`;
|
|
775
|
+
}
|
|
776
|
+
}
|
|
777
|
+
const modelResponse = await prompts({
|
|
778
|
+
type: "select",
|
|
779
|
+
name: "model",
|
|
780
|
+
message: `Choose a model from ${capitalize(provider)}:`,
|
|
781
|
+
choices: modelChoices,
|
|
782
|
+
initial: 0,
|
|
783
|
+
});
|
|
784
|
+
if (!modelResponse.model) {
|
|
785
|
+
return chalk.yellow("Selection cancelled");
|
|
786
|
+
}
|
|
787
|
+
const modelName = modelResponse.model;
|
|
788
|
+
const selectedModel = models.find(m => m.name === modelName);
|
|
789
|
+
if (!selectedModel) {
|
|
790
|
+
return chalk.red("Error: Model not found");
|
|
791
|
+
}
|
|
792
|
+
// Step 3: Confirm and save
|
|
793
|
+
const confirmResponse = await prompts({
|
|
794
|
+
type: "confirm",
|
|
795
|
+
name: "confirm",
|
|
796
|
+
message: `Set ${selectedModel.name} as your embedding model?\n Provider: ${selectedModel.provider}\n Dimensions: ${selectedModel.dimensions}\n Quality: ${selectedModel.quality}`,
|
|
797
|
+
initial: true,
|
|
798
|
+
});
|
|
799
|
+
if (!confirmResponse.confirm) {
|
|
800
|
+
return chalk.yellow("Selection cancelled");
|
|
801
|
+
}
|
|
802
|
+
// Save configuration
|
|
803
|
+
ConfigManager.setConfigValue("provider", provider);
|
|
804
|
+
ConfigManager.setConfigValue("model", modelName);
|
|
805
|
+
ConfigManager.setConfigValue("dimensions", selectedModel.dimensions.toString());
|
|
806
|
+
let output = chalk.green(`✓ Configuration updated!\n\n`);
|
|
807
|
+
output += ` Provider: ${chalk.cyan(provider)}\n`;
|
|
808
|
+
output += ` Model: ${chalk.cyan(modelName)}\n`;
|
|
809
|
+
output += ` Dimensions: ${chalk.cyan(selectedModel.dimensions)}\n\n`;
|
|
810
|
+
// Add setup instructions
|
|
811
|
+
if (selectedModel.installation) {
|
|
812
|
+
output += chalk.yellow(`⚠️ Setup Required:\n`);
|
|
813
|
+
output += ` ${selectedModel.installation}\n\n`;
|
|
814
|
+
}
|
|
815
|
+
if (selectedModel.cost) {
|
|
816
|
+
output += chalk.yellow(`💰 Cost: ${selectedModel.cost}\n\n`);
|
|
817
|
+
}
|
|
818
|
+
output += chalk.dim("💡 Tip: You may need to reindex conversations for the new model:\n");
|
|
819
|
+
output += chalk.dim(" reset && index\n\n");
|
|
820
|
+
output += chalk.gray(`Config saved to: ${ConfigManager.getConfigPath()}\n`);
|
|
821
|
+
return output;
|
|
822
|
+
}
|
|
823
|
+
catch (error) {
|
|
824
|
+
if (error.message === "User force closed the prompt") {
|
|
825
|
+
return chalk.yellow("\nSelection cancelled");
|
|
826
|
+
}
|
|
827
|
+
return chalk.red(`Error: ${error.message}`);
|
|
828
|
+
}
|
|
829
|
+
}
|
|
590
830
|
//# sourceMappingURL=commands.js.map
|