@duckmind/deepquark-darwin-arm64 0.9.83 → 0.9.90
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.deepquark/skills/bundled/knowledge-graph/SKILL.md +385 -0
- package/.deepquark/skills/bundled/knowledge-graph/STANDARDS.md +461 -0
- package/.deepquark/skills/bundled/knowledge-graph/lib/cli.ts +588 -0
- package/.deepquark/skills/bundled/knowledge-graph/lib/config.ts +630 -0
- package/.deepquark/skills/bundled/knowledge-graph/lib/connection-profile.ts +629 -0
- package/.deepquark/skills/bundled/knowledge-graph/lib/container.ts +756 -0
- package/.deepquark/skills/bundled/knowledge-graph/lib/mcp-client.ts +1310 -0
- package/.deepquark/skills/bundled/knowledge-graph/lib/output-formatter.ts +997 -0
- package/.deepquark/skills/bundled/knowledge-graph/lib/token-metrics.ts +335 -0
- package/.deepquark/skills/bundled/knowledge-graph/lib/transformation-log.ts +137 -0
- package/.deepquark/skills/bundled/knowledge-graph/lib/wrapper-config.ts +113 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/.env.example +129 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/compare-embeddings.ts +175 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/config-falkordb.yaml +108 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/config-neo4j.yaml +111 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/diagnose.ts +483 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/docker-compose-falkordb-dev.yml +146 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/docker-compose-falkordb.yml +151 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/docker-compose-neo4j-dev-local.yml +161 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/docker-compose-neo4j-dev.yml +161 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/docker-compose-neo4j.yml +169 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/docker-compose-production.yml +128 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/docker-compose-test.yml +10 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/docker-compose.yml +84 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/entrypoint.sh +40 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/install.ts +2054 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/podman-compose-falkordb.yml +78 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/podman-compose-neo4j.yml +88 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/podman-compose.yml +83 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/test-all-llms-mcp.ts +387 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/test-embedding-models.ts +201 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/test-embedding-providers.ts +641 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/test-graphiti-model.ts +217 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/test-grok-correct.ts +141 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/test-grok-llms-mcp.ts +386 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/test-grok-models.ts +173 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/test-llama-extraction.ts +188 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/test-mcp-final.ts +240 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/test-mcp-live.ts +187 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/test-mcp-session.ts +127 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/test-model-combinations.ts +316 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/test-ollama-models.ts +228 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/test-openrouter-models.ts +460 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/test-real-life-mcp.ts +311 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/test-search-debug.ts +199 -0
- package/.deepquark/skills/bundled/knowledge-graph/tools/Install.md +104 -0
- package/.deepquark/skills/bundled/knowledge-graph/tools/README.md +120 -0
- package/.deepquark/skills/bundled/knowledge-graph/tools/knowledge-cli.ts +996 -0
- package/.deepquark/skills/bundled/knowledge-graph/tools/server-cli.ts +531 -0
- package/.deepquark/skills/bundled/knowledge-graph/workflows/BulkImport.md +514 -0
- package/.deepquark/skills/bundled/knowledge-graph/workflows/CaptureEpisode.md +242 -0
- package/.deepquark/skills/bundled/knowledge-graph/workflows/ClearGraph.md +392 -0
- package/.deepquark/skills/bundled/knowledge-graph/workflows/GetRecent.md +352 -0
- package/.deepquark/skills/bundled/knowledge-graph/workflows/GetStatus.md +373 -0
- package/.deepquark/skills/bundled/knowledge-graph/workflows/HealthReport.md +212 -0
- package/.deepquark/skills/bundled/knowledge-graph/workflows/InvestigateEntity.md +142 -0
- package/.deepquark/skills/bundled/knowledge-graph/workflows/OntologyManagement.md +201 -0
- package/.deepquark/skills/bundled/knowledge-graph/workflows/RunMaintenance.md +302 -0
- package/.deepquark/skills/bundled/knowledge-graph/workflows/SearchByDate.md +255 -0
- package/.deepquark/skills/bundled/knowledge-graph/workflows/SearchFacts.md +382 -0
- package/.deepquark/skills/bundled/knowledge-graph/workflows/SearchKnowledge.md +374 -0
- package/.deepquark/skills/bundled/knowledge-graph/workflows/StixImport.md +212 -0
- package/bin/deepquark +0 -0
- package/package.json +1 -1
- package/.deepquark/skills/bundled/ge-payroll/SKILL.md +0 -153
- package/.deepquark/skills/bundled/ge-payroll/evals/evals.json +0 -23
- package/.deepquark/skills/bundled/ge-payroll/references/pain-points-improvements.md +0 -106
- package/.deepquark/skills/bundled/ge-payroll/references/process-detail.md +0 -217
- package/.deepquark/skills/bundled/ge-payroll/references/raci-stakeholders.md +0 -85
- package/.deepquark/skills/bundled/ge-payroll/references/timeline-mandays.md +0 -64
|
@@ -0,0 +1,2054 @@
|
|
|
1
|
+
#!/usr/bin/env bun
|
|
2
|
+
/**
|
|
3
|
+
* Madeinoz Knowledge System Installation Script v2.1.0
|
|
4
|
+
*
|
|
5
|
+
* Interactive installer that guides users through:
|
|
6
|
+
* - LLM provider selection
|
|
7
|
+
* - API key configuration
|
|
8
|
+
* - Model selection
|
|
9
|
+
* - Performance tuning
|
|
10
|
+
* - PAI .env integration
|
|
11
|
+
* - Service startup
|
|
12
|
+
* - PAI skill installation
|
|
13
|
+
*
|
|
14
|
+
* Usage:
|
|
15
|
+
* bun run src/server/install.ts # Fresh install (interactive)
|
|
16
|
+
* bun run src/server/install.ts --update # Update existing installation
|
|
17
|
+
* bun run src/server/install.ts --yes # Non-interactive with defaults
|
|
18
|
+
* bun run src/server/install.ts --update --yes # Update non-interactively
|
|
19
|
+
*/
|
|
20
|
+
|
|
21
|
+
// =============================================================================
|
|
22
|
+
// CLI FLAGS
|
|
23
|
+
// =============================================================================
|
|
24
|
+
|
|
25
|
+
const isUpdateMode = process.argv.includes('--update') || process.argv.includes('-u');
|
|
26
|
+
const isNonInteractive = process.argv.includes('--yes') || process.argv.includes('-y');
|
|
27
|
+
|
|
28
|
+
import { createContainerManager } from '../lib/container';
|
|
29
|
+
import { createConfigLoader } from '../lib/config';
|
|
30
|
+
import { cli } from '../lib/cli';
|
|
31
|
+
import { profileManager } from '../lib/connection-profile';
|
|
32
|
+
import inquirer from 'inquirer';
|
|
33
|
+
|
|
34
|
+
/**
|
|
35
|
+
* LLM Provider configuration
|
|
36
|
+
*/
|
|
37
|
+
interface LLMProvider {
|
|
38
|
+
id: string;
|
|
39
|
+
name: string;
|
|
40
|
+
embedder: string;
|
|
41
|
+
needsOpenAI: boolean;
|
|
42
|
+
models?: ModelChoice[];
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
/**
|
|
46
|
+
* Model choice for provider
|
|
47
|
+
*/
|
|
48
|
+
interface ModelChoice {
|
|
49
|
+
name: string;
|
|
50
|
+
value: string;
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
/**
|
|
54
|
+
* API tier configuration
|
|
55
|
+
*/
|
|
56
|
+
interface APITier {
|
|
57
|
+
name: string;
|
|
58
|
+
semaphoreLimit: number;
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
/**
|
|
62
|
+
* OpenAI-compatible provider configuration
|
|
63
|
+
*/
|
|
64
|
+
interface OpenAICompatibleProvider {
|
|
65
|
+
id: string;
|
|
66
|
+
name: string;
|
|
67
|
+
baseUrl: string;
|
|
68
|
+
keyName: string;
|
|
69
|
+
keyUrl: string;
|
|
70
|
+
models: ModelChoice[];
|
|
71
|
+
embedderModels?: ModelChoice[];
|
|
72
|
+
defaultEmbedderDimensions?: number;
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
/**
|
|
76
|
+
* OpenAI-compatible cloud providers (use OpenAI API format)
|
|
77
|
+
*/
|
|
78
|
+
const OPENAI_COMPATIBLE_PROVIDERS: OpenAICompatibleProvider[] = [
|
|
79
|
+
{
|
|
80
|
+
id: 'openrouter',
|
|
81
|
+
name: 'OpenRouter (access to 200+ models) - RECOMMENDED',
|
|
82
|
+
baseUrl: 'https://openrouter.ai/api/v1',
|
|
83
|
+
keyName: 'OpenRouter',
|
|
84
|
+
keyUrl: 'https://openrouter.ai/keys',
|
|
85
|
+
models: [
|
|
86
|
+
// ✅ BENCHMARK TESTED - These models WORK with Graphiti MCP
|
|
87
|
+
{
|
|
88
|
+
name: 'google/gemini-2.0-flash-001 (BEST VALUE - $0.125/1K)',
|
|
89
|
+
value: 'google/gemini-2.0-flash-001',
|
|
90
|
+
},
|
|
91
|
+
{ name: 'openai/gpt-4o-mini (reliable - $0.129/1K)', value: 'openai/gpt-4o-mini' },
|
|
92
|
+
{ name: 'qwen/qwen-2.5-72b-instruct ($0.126/1K, slow)', value: 'qwen/qwen-2.5-72b-instruct' },
|
|
93
|
+
{ name: 'anthropic/claude-3.5-haiku ($0.816/1K)', value: 'anthropic/claude-3.5-haiku' },
|
|
94
|
+
{ name: 'openai/gpt-4o (FASTEST - $2.155/1K)', value: 'openai/gpt-4o' },
|
|
95
|
+
{ name: 'x-ai/grok-3 (xAI option - $2.163/1K)', value: 'x-ai/grok-3' },
|
|
96
|
+
// ❌ BENCHMARK TESTED - These models FAIL Graphiti validation (kept for reference)
|
|
97
|
+
// { name: "meta-llama/llama-3.1-70b-instruct (FAILS)", value: "meta-llama/llama-3.1-70b-instruct" },
|
|
98
|
+
// { name: "deepseek/deepseek-chat (FAILS)", value: "deepseek/deepseek-chat" },
|
|
99
|
+
],
|
|
100
|
+
},
|
|
101
|
+
{
|
|
102
|
+
id: 'together',
|
|
103
|
+
name: 'Together AI (fast inference)',
|
|
104
|
+
baseUrl: 'https://api.together.xyz/v1',
|
|
105
|
+
keyName: 'Together AI',
|
|
106
|
+
keyUrl: 'https://api.together.xyz/settings/api-keys',
|
|
107
|
+
models: [
|
|
108
|
+
{
|
|
109
|
+
name: 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo (recommended)',
|
|
110
|
+
value: 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo',
|
|
111
|
+
},
|
|
112
|
+
{
|
|
113
|
+
name: 'meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo',
|
|
114
|
+
value: 'meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo',
|
|
115
|
+
},
|
|
116
|
+
{
|
|
117
|
+
name: 'mistralai/Mixtral-8x22B-Instruct-v0.1',
|
|
118
|
+
value: 'mistralai/Mixtral-8x22B-Instruct-v0.1',
|
|
119
|
+
},
|
|
120
|
+
{ name: 'Qwen/Qwen2.5-72B-Instruct-Turbo', value: 'Qwen/Qwen2.5-72B-Instruct-Turbo' },
|
|
121
|
+
],
|
|
122
|
+
embedderModels: [
|
|
123
|
+
{ name: 'BAAI/bge-large-en-v1.5 (recommended)', value: 'BAAI/bge-large-en-v1.5' },
|
|
124
|
+
{
|
|
125
|
+
name: 'togethercomputer/m2-bert-80M-8k-retrieval',
|
|
126
|
+
value: 'togethercomputer/m2-bert-80M-8k-retrieval',
|
|
127
|
+
},
|
|
128
|
+
],
|
|
129
|
+
defaultEmbedderDimensions: 1024,
|
|
130
|
+
},
|
|
131
|
+
{
|
|
132
|
+
id: 'fireworks',
|
|
133
|
+
name: 'Fireworks AI (low latency)',
|
|
134
|
+
baseUrl: 'https://api.fireworks.ai/inference/v1',
|
|
135
|
+
keyName: 'Fireworks AI',
|
|
136
|
+
keyUrl: 'https://fireworks.ai/api-keys',
|
|
137
|
+
models: [
|
|
138
|
+
{
|
|
139
|
+
name: 'accounts/fireworks/models/llama-v3p1-70b-instruct (recommended)',
|
|
140
|
+
value: 'accounts/fireworks/models/llama-v3p1-70b-instruct',
|
|
141
|
+
},
|
|
142
|
+
{
|
|
143
|
+
name: 'accounts/fireworks/models/mixtral-8x22b-instruct',
|
|
144
|
+
value: 'accounts/fireworks/models/mixtral-8x22b-instruct',
|
|
145
|
+
},
|
|
146
|
+
{
|
|
147
|
+
name: 'accounts/fireworks/models/qwen2p5-72b-instruct',
|
|
148
|
+
value: 'accounts/fireworks/models/qwen2p5-72b-instruct',
|
|
149
|
+
},
|
|
150
|
+
],
|
|
151
|
+
},
|
|
152
|
+
{
|
|
153
|
+
id: 'deepinfra',
|
|
154
|
+
name: 'DeepInfra (serverless GPUs)',
|
|
155
|
+
baseUrl: 'https://api.deepinfra.com/v1/openai',
|
|
156
|
+
keyName: 'DeepInfra',
|
|
157
|
+
keyUrl: 'https://deepinfra.com/dash/api_keys',
|
|
158
|
+
models: [
|
|
159
|
+
{
|
|
160
|
+
name: 'meta-llama/Meta-Llama-3.1-70B-Instruct (recommended)',
|
|
161
|
+
value: 'meta-llama/Meta-Llama-3.1-70B-Instruct',
|
|
162
|
+
},
|
|
163
|
+
{
|
|
164
|
+
name: 'mistralai/Mixtral-8x22B-Instruct-v0.1',
|
|
165
|
+
value: 'mistralai/Mixtral-8x22B-Instruct-v0.1',
|
|
166
|
+
},
|
|
167
|
+
{ name: 'Qwen/Qwen2.5-72B-Instruct', value: 'Qwen/Qwen2.5-72B-Instruct' },
|
|
168
|
+
],
|
|
169
|
+
embedderModels: [
|
|
170
|
+
{ name: 'BAAI/bge-large-en-v1.5 (recommended)', value: 'BAAI/bge-large-en-v1.5' },
|
|
171
|
+
{
|
|
172
|
+
name: 'sentence-transformers/all-MiniLM-L6-v2',
|
|
173
|
+
value: 'sentence-transformers/all-MiniLM-L6-v2',
|
|
174
|
+
},
|
|
175
|
+
],
|
|
176
|
+
defaultEmbedderDimensions: 1024,
|
|
177
|
+
},
|
|
178
|
+
];
|
|
179
|
+
|
|
180
|
+
/**
|
|
181
|
+
* Provider configurations
|
|
182
|
+
*/
|
|
183
|
+
const PROVIDERS: LLMProvider[] = [
|
|
184
|
+
{
|
|
185
|
+
id: 'openai',
|
|
186
|
+
name: 'OpenAI (direct)',
|
|
187
|
+
embedder: 'openai',
|
|
188
|
+
needsOpenAI: true,
|
|
189
|
+
models: [
|
|
190
|
+
{ name: 'gpt-4o-mini (recommended - fast & cost-effective)', value: 'gpt-4o-mini' },
|
|
191
|
+
{ name: 'gpt-4o (best quality)', value: 'gpt-4o' },
|
|
192
|
+
{ name: 'gpt-3.5-turbo (economy)', value: 'gpt-3.5-turbo' },
|
|
193
|
+
],
|
|
194
|
+
},
|
|
195
|
+
{
|
|
196
|
+
id: 'openai-compatible',
|
|
197
|
+
name: 'OpenAI-compatible (OpenRouter, Together, etc.)',
|
|
198
|
+
embedder: 'openai-compatible',
|
|
199
|
+
needsOpenAI: false,
|
|
200
|
+
},
|
|
201
|
+
{
|
|
202
|
+
id: 'ollama',
|
|
203
|
+
name: 'Ollama (local, free) - ⚠️ LLM FAILS, embeddings OK',
|
|
204
|
+
embedder: 'ollama',
|
|
205
|
+
needsOpenAI: false,
|
|
206
|
+
models: [
|
|
207
|
+
// ⚠️ WARNING: All Ollama LLM models FAIL Graphiti Pydantic validation
|
|
208
|
+
// Use OpenRouter + Ollama (embeddings only) instead!
|
|
209
|
+
{ name: '⚠️ llama3.2 (FAILS Graphiti validation)', value: 'llama3.2' },
|
|
210
|
+
{ name: '⚠️ llama3.1:70b (FAILS Graphiti validation)', value: 'llama3.1:70b' },
|
|
211
|
+
{ name: '⚠️ mistral (FAILS Graphiti validation)', value: 'mistral' },
|
|
212
|
+
{ name: '⚠️ deepseek-r1:7b (FAILS Graphiti validation)', value: 'deepseek-r1:7b' },
|
|
213
|
+
{ name: '⚠️ qwen2.5:7b (FAILS Graphiti validation)', value: 'qwen2.5:7b' },
|
|
214
|
+
],
|
|
215
|
+
},
|
|
216
|
+
{
|
|
217
|
+
id: 'hybrid',
|
|
218
|
+
name: 'Hybrid (OpenAI LLM + Ollama embeddings - recommended)',
|
|
219
|
+
embedder: 'ollama',
|
|
220
|
+
needsOpenAI: true,
|
|
221
|
+
models: [
|
|
222
|
+
{ name: 'gpt-4o-mini (recommended - fast & cost-effective)', value: 'gpt-4o-mini' },
|
|
223
|
+
{ name: 'gpt-4o (best quality)', value: 'gpt-4o' },
|
|
224
|
+
],
|
|
225
|
+
},
|
|
226
|
+
{
|
|
227
|
+
id: 'anthropic',
|
|
228
|
+
name: 'Anthropic Claude',
|
|
229
|
+
embedder: 'openai',
|
|
230
|
+
needsOpenAI: true,
|
|
231
|
+
},
|
|
232
|
+
{
|
|
233
|
+
id: 'gemini',
|
|
234
|
+
name: 'Google Gemini',
|
|
235
|
+
embedder: 'gemini',
|
|
236
|
+
needsOpenAI: false,
|
|
237
|
+
},
|
|
238
|
+
{
|
|
239
|
+
id: 'groq',
|
|
240
|
+
name: 'Groq (fast inference)',
|
|
241
|
+
embedder: 'openai',
|
|
242
|
+
needsOpenAI: true,
|
|
243
|
+
models: [
|
|
244
|
+
{ name: 'llama-3.3-70b-versatile (recommended)', value: 'llama-3.3-70b-versatile' },
|
|
245
|
+
{ name: 'llama-3.1-70b-versatile', value: 'llama-3.1-70b-versatile' },
|
|
246
|
+
],
|
|
247
|
+
},
|
|
248
|
+
];
|
|
249
|
+
|
|
250
|
+
/**
|
|
251
|
+
* OpenAI API tiers
|
|
252
|
+
*/
|
|
253
|
+
const OPENAI_TIERS: APITier[] = [
|
|
254
|
+
{ name: 'Free tier', semaphoreLimit: 2 },
|
|
255
|
+
{ name: 'Tier 2 (60 requests/minute)', semaphoreLimit: 8 },
|
|
256
|
+
{ name: 'Tier 3 (500 requests/minute) - most common', semaphoreLimit: 10 },
|
|
257
|
+
{ name: 'Tier 4 (5000 requests/minute)', semaphoreLimit: 20 },
|
|
258
|
+
];
|
|
259
|
+
|
|
260
|
+
/**
|
|
261
|
+
* Database backend choice
|
|
262
|
+
*/
|
|
263
|
+
interface DatabaseBackend {
|
|
264
|
+
id: string;
|
|
265
|
+
name: string;
|
|
266
|
+
description: string;
|
|
267
|
+
}
|
|
268
|
+
|
|
269
|
+
/**
|
|
270
|
+
* Database backends
|
|
271
|
+
*/
|
|
272
|
+
const DATABASE_BACKENDS: DatabaseBackend[] = [
|
|
273
|
+
{
|
|
274
|
+
id: 'neo4j',
|
|
275
|
+
name: 'Neo4j (recommended)',
|
|
276
|
+
description:
|
|
277
|
+
'Native graph database with Cypher queries. Better special character handling, richer query language.',
|
|
278
|
+
},
|
|
279
|
+
{
|
|
280
|
+
id: 'falkordb',
|
|
281
|
+
name: 'FalkorDB',
|
|
282
|
+
description: 'Redis-based graph database. Simpler setup, lower resource usage.',
|
|
283
|
+
},
|
|
284
|
+
];
|
|
285
|
+
|
|
286
|
+
// =============================================================================
|
|
287
|
+
// NON-INTERACTIVE HELPERS
|
|
288
|
+
// =============================================================================
|
|
289
|
+
|
|
290
|
+
/**
|
|
291
|
+
* Confirm wrapper that auto-accepts in non-interactive mode
|
|
292
|
+
*/
|
|
293
|
+
async function confirmWithDefault(message: string, defaultValue = true): Promise<boolean> {
|
|
294
|
+
if (isNonInteractive) {
|
|
295
|
+
cli.dim(` Auto-${defaultValue ? 'accepting' : 'declining'}: ${message}`);
|
|
296
|
+
return defaultValue;
|
|
297
|
+
}
|
|
298
|
+
const { result } = await inquirer.prompt([
|
|
299
|
+
{
|
|
300
|
+
type: 'confirm',
|
|
301
|
+
name: 'result',
|
|
302
|
+
message,
|
|
303
|
+
default: defaultValue,
|
|
304
|
+
},
|
|
305
|
+
]);
|
|
306
|
+
return result;
|
|
307
|
+
}
|
|
308
|
+
|
|
309
|
+
/**
|
|
310
|
+
* Press enter wrapper that skips in non-interactive mode
|
|
311
|
+
*/
|
|
312
|
+
async function pressEnterToContinue(message = 'Press Enter to continue...'): Promise<void> {
|
|
313
|
+
if (isNonInteractive) {
|
|
314
|
+
return;
|
|
315
|
+
}
|
|
316
|
+
await inquirer.prompt([
|
|
317
|
+
{
|
|
318
|
+
type: 'input',
|
|
319
|
+
name: 'continue',
|
|
320
|
+
message,
|
|
321
|
+
},
|
|
322
|
+
]);
|
|
323
|
+
}
|
|
324
|
+
|
|
325
|
+
/**
|
|
326
|
+
* Installation type
|
|
327
|
+
*/
|
|
328
|
+
type InstallationType = 'local' | 'remote' | 'hybrid';
|
|
329
|
+
|
|
330
|
+
/**
|
|
331
|
+
* Installation state
|
|
332
|
+
*/
|
|
333
|
+
interface InstallState {
|
|
334
|
+
installType: InstallationType;
|
|
335
|
+
llmProvider: string;
|
|
336
|
+
embedderProvider: string;
|
|
337
|
+
modelName: string;
|
|
338
|
+
semaphoreLimit: string;
|
|
339
|
+
apiKeys: {
|
|
340
|
+
OPENAI_API_KEY?: string;
|
|
341
|
+
ANTHROPIC_API_KEY?: string;
|
|
342
|
+
GOOGLE_API_KEY?: string;
|
|
343
|
+
GROQ_API_KEY?: string;
|
|
344
|
+
OPENAI_COMPATIBLE_KEY?: string;
|
|
345
|
+
};
|
|
346
|
+
paiConfig: {
|
|
347
|
+
GROUP_ID?: string;
|
|
348
|
+
DATABASE_TYPE?: string;
|
|
349
|
+
GRAPHITI_TELEMETRY_ENABLED?: string;
|
|
350
|
+
};
|
|
351
|
+
// OpenAI-compatible provider configuration
|
|
352
|
+
openaiCompatible?: {
|
|
353
|
+
providerId: string;
|
|
354
|
+
baseUrl: string;
|
|
355
|
+
embedderBaseUrl?: string;
|
|
356
|
+
embedderModel?: string;
|
|
357
|
+
embedderDimensions?: number;
|
|
358
|
+
};
|
|
359
|
+
// Remote profile configuration
|
|
360
|
+
remoteProfile?: {
|
|
361
|
+
host: string;
|
|
362
|
+
port: number;
|
|
363
|
+
protocol: 'http' | 'https';
|
|
364
|
+
};
|
|
365
|
+
// Which profile is default (for hybrid installs)
|
|
366
|
+
defaultProfile: 'local' | 'remote';
|
|
367
|
+
}
|
|
368
|
+
|
|
369
|
+
/**
|
|
370
|
+
* Installer class
|
|
371
|
+
*/
|
|
372
|
+
class Installer {
|
|
373
|
+
private state: InstallState = {
|
|
374
|
+
installType: 'local',
|
|
375
|
+
llmProvider: 'ollama',
|
|
376
|
+
embedderProvider: 'ollama',
|
|
377
|
+
modelName: 'llama3.2',
|
|
378
|
+
semaphoreLimit: '10',
|
|
379
|
+
apiKeys: {},
|
|
380
|
+
paiConfig: {
|
|
381
|
+
GROUP_ID: 'main',
|
|
382
|
+
DATABASE_TYPE: 'neo4j',
|
|
383
|
+
GRAPHITI_TELEMETRY_ENABLED: 'false',
|
|
384
|
+
},
|
|
385
|
+
defaultProfile: 'local',
|
|
386
|
+
};
|
|
387
|
+
|
|
388
|
+
// Ollama configuration
|
|
389
|
+
private ollamaBaseUrl = 'http://host.docker.internal:11434/v1';
|
|
390
|
+
private embedderModel = 'mxbai-embed-large'; // Best Ollama embedder (77% quality, 156ms)
|
|
391
|
+
|
|
392
|
+
private containerManager = createContainerManager();
|
|
393
|
+
private configLoader = createConfigLoader();
|
|
394
|
+
|
|
395
|
+
/**
|
|
396
|
+
* Collect an API key with optional existing value
|
|
397
|
+
*/
|
|
398
|
+
private async collectKey(
|
|
399
|
+
keyName: string,
|
|
400
|
+
keyUrl: string,
|
|
401
|
+
existingValue?: string
|
|
402
|
+
): Promise<string | undefined> {
|
|
403
|
+
cli.blank();
|
|
404
|
+
cli.info(`You need a ${keyName} API key.`);
|
|
405
|
+
cli.dim(`Get it from: ${keyUrl}`);
|
|
406
|
+
cli.blank();
|
|
407
|
+
|
|
408
|
+
// If we have an existing value, confirm it
|
|
409
|
+
if (existingValue && existingValue.length > 0) {
|
|
410
|
+
const masked = this.maskApiKey(existingValue);
|
|
411
|
+
cli.info(`Found ${keyName} API key in PAI configuration.`);
|
|
412
|
+
cli.dim(`Key: ${masked}`);
|
|
413
|
+
cli.blank();
|
|
414
|
+
|
|
415
|
+
// In non-interactive mode, always use existing key
|
|
416
|
+
if (isNonInteractive) {
|
|
417
|
+
cli.success(`Using existing ${keyName} API key`);
|
|
418
|
+
return existingValue;
|
|
419
|
+
}
|
|
420
|
+
|
|
421
|
+
const { useKey } = await inquirer.prompt([
|
|
422
|
+
{
|
|
423
|
+
type: 'confirm',
|
|
424
|
+
name: 'useKey',
|
|
425
|
+
message: 'Use this key?',
|
|
426
|
+
default: true,
|
|
427
|
+
},
|
|
428
|
+
]);
|
|
429
|
+
|
|
430
|
+
if (useKey) {
|
|
431
|
+
cli.success(`Using existing ${keyName} API key`);
|
|
432
|
+
return existingValue;
|
|
433
|
+
}
|
|
434
|
+
}
|
|
435
|
+
|
|
436
|
+
// In non-interactive mode without existing key, check environment
|
|
437
|
+
if (isNonInteractive) {
|
|
438
|
+
// Try to get from environment variables
|
|
439
|
+
// MADEINOZ_KNOWLEDGE_* prefixed variables take PRECEDENCE over unprefixed variables
|
|
440
|
+
const envKey = keyName.includes('OpenAI')
|
|
441
|
+
? process.env.MADEINOZ_KNOWLEDGE_OPENAI_API_KEY || process.env.OPENAI_API_KEY
|
|
442
|
+
: keyName.includes('Anthropic')
|
|
443
|
+
? process.env.MADEINOZ_KNOWLEDGE_ANTHROPIC_API_KEY || process.env.ANTHROPIC_API_KEY
|
|
444
|
+
: keyName.includes('Google')
|
|
445
|
+
? process.env.MADEINOZ_KNOWLEDGE_GOOGLE_API_KEY || process.env.GOOGLE_API_KEY
|
|
446
|
+
: keyName.includes('Groq')
|
|
447
|
+
? process.env.MADEINOZ_KNOWLEDGE_GROQ_API_KEY || process.env.GROQ_API_KEY
|
|
448
|
+
: undefined;
|
|
449
|
+
|
|
450
|
+
if (envKey) {
|
|
451
|
+
cli.success(`Using ${keyName} API key from environment`);
|
|
452
|
+
return envKey;
|
|
453
|
+
}
|
|
454
|
+
|
|
455
|
+
cli.warning(`No ${keyName} API key found - set via environment or run interactively`);
|
|
456
|
+
return undefined;
|
|
457
|
+
}
|
|
458
|
+
|
|
459
|
+
// Prompt for new key
|
|
460
|
+
const { apiKey } = await inquirer.prompt([
|
|
461
|
+
{
|
|
462
|
+
type: 'password',
|
|
463
|
+
name: 'apiKey',
|
|
464
|
+
message: `Enter your ${keyName} API key:`,
|
|
465
|
+
validate: (input: string) => {
|
|
466
|
+
if (!input || input.trim().length === 0) {
|
|
467
|
+
return "No API key provided. You'll need to add it later.";
|
|
468
|
+
}
|
|
469
|
+
return true;
|
|
470
|
+
},
|
|
471
|
+
},
|
|
472
|
+
]);
|
|
473
|
+
|
|
474
|
+
if (apiKey && apiKey.trim().length > 0) {
|
|
475
|
+
cli.success(`${keyName} API key received`);
|
|
476
|
+
return apiKey.trim();
|
|
477
|
+
}
|
|
478
|
+
cli.warning('No API key provided');
|
|
479
|
+
return undefined;
|
|
480
|
+
}
|
|
481
|
+
|
|
482
|
+
/**
|
|
483
|
+
* Mask API key for display
|
|
484
|
+
*/
|
|
485
|
+
private maskApiKey(key: string): string {
|
|
486
|
+
if (key.length <= 24) {
|
|
487
|
+
return `${key.slice(0, 8)}...${key.slice(-4)}`;
|
|
488
|
+
}
|
|
489
|
+
return `${key.slice(0, 20)}...${key.slice(-4)}`;
|
|
490
|
+
}
|
|
491
|
+
|
|
492
|
+
/**
|
|
493
|
+
* Read PAI configuration from PAI .env file
|
|
494
|
+
* Path priority: PAI_DIR > ~/.claude > ~/.config/pai (legacy)
|
|
495
|
+
*/
|
|
496
|
+
private async readPAIConfig(): Promise<void> {
|
|
497
|
+
const possiblePaths = [
|
|
498
|
+
process.env.PAI_DIR ? `${process.env.PAI_DIR}/.env` : '',
|
|
499
|
+
`${process.env.HOME}/.claude/.env`,
|
|
500
|
+
`${process.env.HOME}/.config/pai/.env`, // Legacy fallback
|
|
501
|
+
].filter(Boolean);
|
|
502
|
+
|
|
503
|
+
let paiEnvPath: string | undefined;
|
|
504
|
+
|
|
505
|
+
for (const path of possiblePaths) {
|
|
506
|
+
try {
|
|
507
|
+
const file = Bun.file(path);
|
|
508
|
+
if (await file.exists()) {
|
|
509
|
+
paiEnvPath = path;
|
|
510
|
+
break;
|
|
511
|
+
}
|
|
512
|
+
} catch {
|
|
513
|
+
// Continue
|
|
514
|
+
}
|
|
515
|
+
}
|
|
516
|
+
|
|
517
|
+
if (!paiEnvPath) {
|
|
518
|
+
return;
|
|
519
|
+
}
|
|
520
|
+
|
|
521
|
+
cli.blank();
|
|
522
|
+
cli.info('Checking PAI configuration...');
|
|
523
|
+
|
|
524
|
+
try {
|
|
525
|
+
const file = Bun.file(paiEnvPath);
|
|
526
|
+
const content = await file.text();
|
|
527
|
+
const env: Record<string, string> = {};
|
|
528
|
+
|
|
529
|
+
// Parse .env file
|
|
530
|
+
for (const line of content.split('\n')) {
|
|
531
|
+
const trimmed = line.trim();
|
|
532
|
+
if (trimmed && !trimmed.startsWith('#')) {
|
|
533
|
+
const eqIndex = trimmed.indexOf('=');
|
|
534
|
+
if (eqIndex > 0) {
|
|
535
|
+
const key = trimmed.slice(0, eqIndex).trim();
|
|
536
|
+
const value = trimmed.slice(eqIndex + 1).trim();
|
|
537
|
+
env[key] = value;
|
|
538
|
+
}
|
|
539
|
+
}
|
|
540
|
+
}
|
|
541
|
+
|
|
542
|
+
// Read MADEINOZ_KNOWLEDGE_* variables
|
|
543
|
+
const paiPrefix = 'MADEINOZ_KNOWLEDGE_';
|
|
544
|
+
let foundAny = false;
|
|
545
|
+
|
|
546
|
+
for (const [key, value] of Object.entries(env)) {
|
|
547
|
+
if (key.startsWith(paiPrefix)) {
|
|
548
|
+
const configKey = key.slice(paiPrefix.length);
|
|
549
|
+
const _standardKey = key.replace(paiPrefix, '');
|
|
550
|
+
|
|
551
|
+
if (configKey === 'OPENAI_API_KEY') {
|
|
552
|
+
this.state.apiKeys.OPENAI_API_KEY = value;
|
|
553
|
+
foundAny = true;
|
|
554
|
+
} else if (configKey === 'LLM_PROVIDER') {
|
|
555
|
+
this.state.llmProvider = value;
|
|
556
|
+
foundAny = true;
|
|
557
|
+
} else if (configKey === 'EMBEDDER_PROVIDER') {
|
|
558
|
+
this.state.embedderProvider = value;
|
|
559
|
+
} else if (configKey === 'MODEL_NAME') {
|
|
560
|
+
this.state.modelName = value;
|
|
561
|
+
foundAny = true;
|
|
562
|
+
} else if (configKey === 'SEMAPHORE_LIMIT') {
|
|
563
|
+
this.state.semaphoreLimit = value;
|
|
564
|
+
} else if (configKey === 'GROUP_ID') {
|
|
565
|
+
this.state.paiConfig.GROUP_ID = value;
|
|
566
|
+
} else if (configKey === 'DATABASE_TYPE') {
|
|
567
|
+
this.state.paiConfig.DATABASE_TYPE = value;
|
|
568
|
+
} else if (configKey === 'GRAPHITI_TELEMETRY_ENABLED') {
|
|
569
|
+
this.state.paiConfig.GRAPHITI_TELEMETRY_ENABLED = value;
|
|
570
|
+
}
|
|
571
|
+
}
|
|
572
|
+
}
|
|
573
|
+
|
|
574
|
+
if (foundAny) {
|
|
575
|
+
cli.success('Found Madeinoz Knowledge System configuration in PAI .env');
|
|
576
|
+
if (this.state.llmProvider) {
|
|
577
|
+
cli.success(`LLM Provider from config: ${this.state.llmProvider}`);
|
|
578
|
+
}
|
|
579
|
+
if (this.state.modelName) {
|
|
580
|
+
cli.success(`Model from config: ${this.state.modelName}`);
|
|
581
|
+
}
|
|
582
|
+
}
|
|
583
|
+
} catch {
|
|
584
|
+
// Ignore errors reading PAI config
|
|
585
|
+
}
|
|
586
|
+
}
|
|
587
|
+
|
|
588
|
+
/**
|
|
589
|
+
* Step 1: Verify prerequisites
|
|
590
|
+
*/
|
|
591
|
+
private async verifyPrerequisites(): Promise<void> {
|
|
592
|
+
cli.blank();
|
|
593
|
+
cli.header('Step 1: Verify Prerequisites');
|
|
594
|
+
|
|
595
|
+
// For remote-only installations, Podman is not required
|
|
596
|
+
if (this.state.installType === 'remote') {
|
|
597
|
+
cli.info('Remote-only installation does not require local container runtime.');
|
|
598
|
+
cli.blank();
|
|
599
|
+
cli.info('Required for remote installation:');
|
|
600
|
+
cli.dim(' - Bun runtime (for CLI tools)');
|
|
601
|
+
cli.dim(' - Access to remote MCP server');
|
|
602
|
+
cli.blank();
|
|
603
|
+
cli.success('Remote-only prerequisites met');
|
|
604
|
+
return;
|
|
605
|
+
}
|
|
606
|
+
|
|
607
|
+
// For local and hybrid installations, check for Podman
|
|
608
|
+
if (!this.containerManager.isRuntimeAvailable()) {
|
|
609
|
+
cli.error('Podman is not installed!');
|
|
610
|
+
cli.blank();
|
|
611
|
+
cli.info('Please install Podman first:');
|
|
612
|
+
cli.dim(' macOS: brew install podman');
|
|
613
|
+
cli.dim(' Linux: sudo apt install podman');
|
|
614
|
+
cli.blank();
|
|
615
|
+
cli.dim('Visit: https://podman.io/getting-started/installation');
|
|
616
|
+
process.exit(1);
|
|
617
|
+
}
|
|
618
|
+
|
|
619
|
+
const versionResult = await this.containerManager.exec(['--version'], { silent: true });
|
|
620
|
+
if (versionResult.success) {
|
|
621
|
+
cli.success(`Podman is installed: ${versionResult.stdout.trim()}`);
|
|
622
|
+
}
|
|
623
|
+
}
|
|
624
|
+
|
|
625
|
+
/**
|
|
626
|
+
* Step 2: Installation directory (skip in TS version - we know where we are)
|
|
627
|
+
*/
|
|
628
|
+
private async confirmDirectory(): Promise<void> {
|
|
629
|
+
cli.blank();
|
|
630
|
+
cli.header('Step 2: Installation Directory');
|
|
631
|
+
cli.info('Using current pack directory');
|
|
632
|
+
cli.blank();
|
|
633
|
+
cli.dim('(In TypeScript version, we always use the current pack directory)');
|
|
634
|
+
await pressEnterToContinue();
|
|
635
|
+
}
|
|
636
|
+
|
|
637
|
+
/**
|
|
638
|
+
* Step 2.5: Installation Type Selection
|
|
639
|
+
*/
|
|
640
|
+
private async selectInstallationType(): Promise<void> {
|
|
641
|
+
cli.blank();
|
|
642
|
+
cli.header('Step 2.5: Installation Type');
|
|
643
|
+
cli.blank();
|
|
644
|
+
cli.info('How will you use the Madeinoz Knowledge System?');
|
|
645
|
+
cli.blank();
|
|
646
|
+
cli.info('This determines how the MCP server is configured.');
|
|
647
|
+
cli.blank();
|
|
648
|
+
|
|
649
|
+
const installTypeChoices = [
|
|
650
|
+
{
|
|
651
|
+
name: 'Local only - Run MCP server locally via Docker/Podman',
|
|
652
|
+
value: 'local',
|
|
653
|
+
short: 'Local',
|
|
654
|
+
},
|
|
655
|
+
{
|
|
656
|
+
name: 'Remote only - Connect to an existing remote MCP server',
|
|
657
|
+
value: 'remote',
|
|
658
|
+
short: 'Remote',
|
|
659
|
+
},
|
|
660
|
+
{
|
|
661
|
+
name: 'Local and Remote - Development setup with both',
|
|
662
|
+
value: 'hybrid',
|
|
663
|
+
short: 'Hybrid',
|
|
664
|
+
},
|
|
665
|
+
];
|
|
666
|
+
|
|
667
|
+
let installType: InstallationType;
|
|
668
|
+
|
|
669
|
+
if (isNonInteractive) {
|
|
670
|
+
installType = this.state.installType;
|
|
671
|
+
cli.dim(` Using installation type: ${installType}`);
|
|
672
|
+
} else {
|
|
673
|
+
const result = await inquirer.prompt([
|
|
674
|
+
{
|
|
675
|
+
type: 'list',
|
|
676
|
+
name: 'installType',
|
|
677
|
+
message: 'Select installation type:',
|
|
678
|
+
choices: installTypeChoices,
|
|
679
|
+
default: this.state.installType,
|
|
680
|
+
},
|
|
681
|
+
]);
|
|
682
|
+
installType = result.installType;
|
|
683
|
+
}
|
|
684
|
+
|
|
685
|
+
this.state.installType = installType;
|
|
686
|
+
cli.success(`Selected: ${installTypeChoices.find(c => c.value === installType)?.name || installType}`);
|
|
687
|
+
|
|
688
|
+
// For remote or hybrid, collect remote server details
|
|
689
|
+
if (installType === 'remote' || installType === 'hybrid') {
|
|
690
|
+
await this.collectRemoteServerDetails();
|
|
691
|
+
}
|
|
692
|
+
|
|
693
|
+
// For hybrid, ask which is default
|
|
694
|
+
if (installType === 'hybrid') {
|
|
695
|
+
await this.selectDefaultProfile();
|
|
696
|
+
}
|
|
697
|
+
|
|
698
|
+
// Show explanation of what happens next
|
|
699
|
+
cli.blank();
|
|
700
|
+
if (installType === 'local') {
|
|
701
|
+
cli.info('Next steps:');
|
|
702
|
+
cli.dim(' - Configure LLM provider and API keys');
|
|
703
|
+
cli.dim(' - Start local Docker/Podman containers');
|
|
704
|
+
cli.dim(' - Install skill and configure connection');
|
|
705
|
+
} else if (installType === 'remote') {
|
|
706
|
+
cli.info('Next steps:');
|
|
707
|
+
cli.dim(' - Configure LLM provider and API keys (for reference)');
|
|
708
|
+
cli.dim(' - Create connection profile for remote server');
|
|
709
|
+
cli.dim(' - Install skill (no local containers needed)');
|
|
710
|
+
cli.blank();
|
|
711
|
+
cli.info('Note: For remote-only installs, the MCP server must already be running');
|
|
712
|
+
cli.dim(' on the remote host. We will configure the client to connect to it.');
|
|
713
|
+
} else {
|
|
714
|
+
cli.info('Next steps:');
|
|
715
|
+
cli.dim(' - Configure LLM provider and API keys');
|
|
716
|
+
cli.dim(' - Create connection profiles for both local and remote');
|
|
717
|
+
cli.dim(' - Install skill (you choose which profile is default)');
|
|
718
|
+
cli.blank();
|
|
719
|
+
cli.info('Note: In hybrid mode, local containers are NOT started automatically.');
|
|
720
|
+
cli.dim(' Use server-cli commands to manage the local server.');
|
|
721
|
+
}
|
|
722
|
+
await pressEnterToContinue();
|
|
723
|
+
}
|
|
724
|
+
|
|
725
|
+
/**
|
|
726
|
+
* Collect remote server details for remote/hybrid installations
|
|
727
|
+
*/
|
|
728
|
+
private async collectRemoteServerDetails(): Promise<void> {
|
|
729
|
+
cli.blank();
|
|
730
|
+
cli.header('Remote Server Configuration');
|
|
731
|
+
cli.blank();
|
|
732
|
+
cli.info('Enter the connection details for your remote MCP server.');
|
|
733
|
+
cli.blank();
|
|
734
|
+
|
|
735
|
+
const result = await inquirer.prompt([
|
|
736
|
+
{
|
|
737
|
+
type: 'input',
|
|
738
|
+
name: 'host',
|
|
739
|
+
message: 'Remote server hostname or IP:',
|
|
740
|
+
default: '10.0.0.150',
|
|
741
|
+
validate: (input: string) => {
|
|
742
|
+
if (!input || input.trim().length === 0) {
|
|
743
|
+
return 'Hostname is required';
|
|
744
|
+
}
|
|
745
|
+
// Basic hostname/IP validation
|
|
746
|
+
const hostPattern = /^[a-zA-Z0-9.-]+$/;
|
|
747
|
+
if (!hostPattern.test(input.trim())) {
|
|
748
|
+
return 'Invalid hostname or IP address';
|
|
749
|
+
}
|
|
750
|
+
return true;
|
|
751
|
+
},
|
|
752
|
+
},
|
|
753
|
+
{
|
|
754
|
+
type: 'number',
|
|
755
|
+
name: 'port',
|
|
756
|
+
message: 'Remote server port:',
|
|
757
|
+
default: 8001,
|
|
758
|
+
validate: (input: number) => {
|
|
759
|
+
if (isNaN(input) || input < 1 || input > 65535) {
|
|
760
|
+
return 'Port must be between 1 and 65535';
|
|
761
|
+
}
|
|
762
|
+
return true;
|
|
763
|
+
},
|
|
764
|
+
},
|
|
765
|
+
{
|
|
766
|
+
type: 'list',
|
|
767
|
+
name: 'protocol',
|
|
768
|
+
message: 'Protocol:',
|
|
769
|
+
choices: [
|
|
770
|
+
{ name: 'HTTP (unencrypted)', value: 'http' },
|
|
771
|
+
{ name: 'HTTPS (encrypted)', value: 'https' },
|
|
772
|
+
],
|
|
773
|
+
default: 'http',
|
|
774
|
+
},
|
|
775
|
+
]);
|
|
776
|
+
|
|
777
|
+
this.state.remoteProfile = {
|
|
778
|
+
host: result.host.trim(),
|
|
779
|
+
port: result.port,
|
|
780
|
+
protocol: result.protocol,
|
|
781
|
+
};
|
|
782
|
+
|
|
783
|
+
cli.blank();
|
|
784
|
+
cli.success('Remote server configuration saved');
|
|
785
|
+
cli.dim(` Host: ${this.state.remoteProfile.host}`);
|
|
786
|
+
cli.dim(` Port: ${this.state.remoteProfile.port}`);
|
|
787
|
+
cli.dim(` Protocol: ${this.state.remoteProfile.protocol}`);
|
|
788
|
+
}
|
|
789
|
+
|
|
790
|
+
/**
|
|
791
|
+
* For hybrid installations, ask which profile should be default
|
|
792
|
+
*/
|
|
793
|
+
private async selectDefaultProfile(): Promise<void> {
|
|
794
|
+
cli.blank();
|
|
795
|
+
cli.info('Which connection should be the default?');
|
|
796
|
+
cli.blank();
|
|
797
|
+
cli.info('The default profile is used when no profile is specified.');
|
|
798
|
+
cli.blank();
|
|
799
|
+
|
|
800
|
+
const result = await inquirer.prompt([
|
|
801
|
+
{
|
|
802
|
+
type: 'list',
|
|
803
|
+
name: 'defaultProfile',
|
|
804
|
+
message: 'Select default profile:',
|
|
805
|
+
choices: [
|
|
806
|
+
{
|
|
807
|
+
name: 'Local (for development)',
|
|
808
|
+
value: 'local',
|
|
809
|
+
},
|
|
810
|
+
{
|
|
811
|
+
name: `Remote (${this.state.remoteProfile?.host || 'server'})`,
|
|
812
|
+
value: 'remote',
|
|
813
|
+
},
|
|
814
|
+
],
|
|
815
|
+
default: 'local',
|
|
816
|
+
},
|
|
817
|
+
]);
|
|
818
|
+
|
|
819
|
+
this.state.defaultProfile = result.defaultProfile;
|
|
820
|
+
cli.success(`Default profile: ${result.defaultProfile}`);
|
|
821
|
+
}
|
|
822
|
+
|
|
823
|
+
/**
|
|
824
|
+
* Step 3: Database Backend Selection (skipped for remote-only)
|
|
825
|
+
*/
|
|
826
|
+
private async selectDatabaseBackend(): Promise<void> {
|
|
827
|
+
// Skip database backend selection for remote-only installations
|
|
828
|
+
if (this.state.installType === 'remote') {
|
|
829
|
+
cli.blank();
|
|
830
|
+
cli.info('Step 3: Database Backend Selection (skipped for remote-only)');
|
|
831
|
+
cli.blank();
|
|
832
|
+
cli.dim('Remote-only installation does not require local database configuration.');
|
|
833
|
+
cli.dim('The remote server handles all database operations.');
|
|
834
|
+
// Set a default for reference (won't be used locally)
|
|
835
|
+
this.state.paiConfig.DATABASE_TYPE = 'neo4j';
|
|
836
|
+
return;
|
|
837
|
+
}
|
|
838
|
+
|
|
839
|
+
cli.blank();
|
|
840
|
+
cli.header('Step 3: Database Backend Selection');
|
|
841
|
+
|
|
842
|
+
cli.blank();
|
|
843
|
+
cli.info('Choose your graph database backend:');
|
|
844
|
+
cli.blank();
|
|
845
|
+
|
|
846
|
+
for (const backend of DATABASE_BACKENDS) {
|
|
847
|
+
cli.dim(` ${backend.name}`);
|
|
848
|
+
cli.dim(` ${backend.description}`);
|
|
849
|
+
cli.blank();
|
|
850
|
+
}
|
|
851
|
+
|
|
852
|
+
let backend: string;
|
|
853
|
+
|
|
854
|
+
if (isNonInteractive) {
|
|
855
|
+
// Use existing backend from state (loaded from config) or default to neo4j
|
|
856
|
+
backend = this.state.paiConfig.DATABASE_TYPE || 'neo4j';
|
|
857
|
+
cli.dim(` Using database backend: ${backend}`);
|
|
858
|
+
} else {
|
|
859
|
+
const result = await inquirer.prompt([
|
|
860
|
+
{
|
|
861
|
+
type: 'list',
|
|
862
|
+
name: 'backend',
|
|
863
|
+
message: 'Select database backend:',
|
|
864
|
+
choices: DATABASE_BACKENDS.map((b) => ({ name: b.name, value: b.id })),
|
|
865
|
+
default: this.state.paiConfig.DATABASE_TYPE || 'neo4j',
|
|
866
|
+
},
|
|
867
|
+
]);
|
|
868
|
+
backend = result.backend;
|
|
869
|
+
}
|
|
870
|
+
|
|
871
|
+
const selected = DATABASE_BACKENDS.find((b) => b.id === backend);
|
|
872
|
+
if (!selected) {
|
|
873
|
+
cli.error('Invalid backend selection');
|
|
874
|
+
process.exit(1);
|
|
875
|
+
}
|
|
876
|
+
|
|
877
|
+
this.state.paiConfig.DATABASE_TYPE = selected.id;
|
|
878
|
+
cli.success(`Selected: ${selected.name}`);
|
|
879
|
+
|
|
880
|
+
// Show backend-specific information
|
|
881
|
+
if (selected.id === 'neo4j') {
|
|
882
|
+
cli.blank();
|
|
883
|
+
cli.info('Neo4j provides:');
|
|
884
|
+
cli.dim(' - Native Cypher query language');
|
|
885
|
+
cli.dim(' - Neo4j Browser UI at http://localhost:7474');
|
|
886
|
+
cli.dim(' - Bolt protocol at port 7687');
|
|
887
|
+
} else {
|
|
888
|
+
cli.blank();
|
|
889
|
+
cli.info('FalkorDB provides:');
|
|
890
|
+
cli.dim(' - Redis-based graph database');
|
|
891
|
+
cli.dim(' - FalkorDB UI at http://localhost:3000');
|
|
892
|
+
cli.dim(' - Lower memory footprint');
|
|
893
|
+
}
|
|
894
|
+
}
|
|
895
|
+
|
|
896
|
+
/**
|
|
897
|
+
* Step 4: LLM Provider Selection
|
|
898
|
+
*/
|
|
899
|
+
private async selectProvider(): Promise<void> {
|
|
900
|
+
cli.blank();
|
|
901
|
+
cli.header('Step 4: LLM Provider Selection');
|
|
902
|
+
|
|
903
|
+
let provider: string;
|
|
904
|
+
|
|
905
|
+
if (isNonInteractive) {
|
|
906
|
+
// Use existing provider from state (loaded from config) or default to openai
|
|
907
|
+
provider = this.state.llmProvider || 'openai';
|
|
908
|
+
cli.dim(` Using provider: ${provider}`);
|
|
909
|
+
} else {
|
|
910
|
+
const result = await inquirer.prompt([
|
|
911
|
+
{
|
|
912
|
+
type: 'list',
|
|
913
|
+
name: 'provider',
|
|
914
|
+
message: 'Select your LLM provider:',
|
|
915
|
+
choices: PROVIDERS.map((p) => ({ name: p.name, value: p.id })),
|
|
916
|
+
default: this.state.llmProvider || 'openai',
|
|
917
|
+
},
|
|
918
|
+
]);
|
|
919
|
+
provider = result.provider;
|
|
920
|
+
}
|
|
921
|
+
|
|
922
|
+
const selected = PROVIDERS.find((p) => p.id === provider);
|
|
923
|
+
if (!selected) {
|
|
924
|
+
cli.error('Invalid provider selection');
|
|
925
|
+
process.exit(1);
|
|
926
|
+
}
|
|
927
|
+
|
|
928
|
+
// Handle OpenAI-compatible provider sub-selection
|
|
929
|
+
if (provider === 'openai-compatible') {
|
|
930
|
+
await this.selectOpenAICompatibleProvider();
|
|
931
|
+
return;
|
|
932
|
+
}
|
|
933
|
+
|
|
934
|
+
this.state.llmProvider = selected.id;
|
|
935
|
+
this.state.embedderProvider = selected.embedder;
|
|
936
|
+
|
|
937
|
+
if (selected.embedder !== selected.id) {
|
|
938
|
+
cli.warning(`Selected: ${selected.name} (requires OpenAI for embeddings)`);
|
|
939
|
+
} else {
|
|
940
|
+
cli.success(`Selected: ${selected.name}`);
|
|
941
|
+
}
|
|
942
|
+
}
|
|
943
|
+
|
|
944
|
+
/**
|
|
945
|
+
* Sub-selection for OpenAI-compatible providers (OpenRouter, Together, etc.)
|
|
946
|
+
*/
|
|
947
|
+
private async selectOpenAICompatibleProvider(): Promise<void> {
|
|
948
|
+
cli.blank();
|
|
949
|
+
cli.info('OpenAI-compatible providers use the same API format as OpenAI');
|
|
950
|
+
cli.info('but with different base URLs and API keys.');
|
|
951
|
+
cli.blank();
|
|
952
|
+
|
|
953
|
+
let selectedProviderId: string;
|
|
954
|
+
|
|
955
|
+
if (isNonInteractive) {
|
|
956
|
+
// Use existing or default to openrouter
|
|
957
|
+
selectedProviderId = this.state.openaiCompatible?.providerId || 'openrouter';
|
|
958
|
+
cli.dim(` Using OpenAI-compatible provider: ${selectedProviderId}`);
|
|
959
|
+
} else {
|
|
960
|
+
const result = await inquirer.prompt([
|
|
961
|
+
{
|
|
962
|
+
type: 'list',
|
|
963
|
+
name: 'provider',
|
|
964
|
+
message: 'Select OpenAI-compatible provider:',
|
|
965
|
+
choices: OPENAI_COMPATIBLE_PROVIDERS.map((p) => ({ name: p.name, value: p.id })),
|
|
966
|
+
default: this.state.openaiCompatible?.providerId || 'openrouter',
|
|
967
|
+
},
|
|
968
|
+
]);
|
|
969
|
+
selectedProviderId = result.provider;
|
|
970
|
+
}
|
|
971
|
+
|
|
972
|
+
const compatibleProvider = OPENAI_COMPATIBLE_PROVIDERS.find((p) => p.id === selectedProviderId);
|
|
973
|
+
if (!compatibleProvider) {
|
|
974
|
+
cli.error('Invalid OpenAI-compatible provider selection');
|
|
975
|
+
process.exit(1);
|
|
976
|
+
}
|
|
977
|
+
|
|
978
|
+
// Store the OpenAI-compatible provider configuration
|
|
979
|
+
this.state.llmProvider = 'openai'; // Use openai provider type (works with OpenAI-compatible API)
|
|
980
|
+
this.state.openaiCompatible = {
|
|
981
|
+
providerId: compatibleProvider.id,
|
|
982
|
+
baseUrl: compatibleProvider.baseUrl,
|
|
983
|
+
};
|
|
984
|
+
|
|
985
|
+
// Handle embeddings - if provider has embeddings, use them; otherwise default to Ollama
|
|
986
|
+
if (compatibleProvider.embedderModels && compatibleProvider.embedderModels.length > 0) {
|
|
987
|
+
this.state.embedderProvider = 'openai'; // OpenAI-compatible for embeddings too
|
|
988
|
+
this.state.openaiCompatible.embedderBaseUrl = compatibleProvider.baseUrl;
|
|
989
|
+
this.state.openaiCompatible.embedderModel = compatibleProvider.embedderModels[0].value;
|
|
990
|
+
this.state.openaiCompatible.embedderDimensions = compatibleProvider.defaultEmbedderDimensions;
|
|
991
|
+
cli.success(`Selected: ${compatibleProvider.name} (LLM + embeddings)`);
|
|
992
|
+
} else {
|
|
993
|
+
// Provider doesn't have embeddings, offer Ollama or OpenAI
|
|
994
|
+
cli.blank();
|
|
995
|
+
cli.warning(`${compatibleProvider.name} doesn't provide embeddings.`);
|
|
996
|
+
cli.info('You need a separate embedder. Options:');
|
|
997
|
+
cli.dim(' 1. Ollama (free, local) - recommended');
|
|
998
|
+
cli.dim(' 2. OpenAI (paid, cloud)');
|
|
999
|
+
cli.blank();
|
|
1000
|
+
|
|
1001
|
+
if (isNonInteractive) {
|
|
1002
|
+
this.state.embedderProvider = 'ollama';
|
|
1003
|
+
cli.dim(' Using Ollama for embeddings (free, local)');
|
|
1004
|
+
} else {
|
|
1005
|
+
const { embedder } = await inquirer.prompt([
|
|
1006
|
+
{
|
|
1007
|
+
type: 'list',
|
|
1008
|
+
name: 'embedder',
|
|
1009
|
+
message: 'Select embedder:',
|
|
1010
|
+
choices: [
|
|
1011
|
+
{ name: 'Ollama (free, local) - recommended', value: 'ollama' },
|
|
1012
|
+
{ name: 'OpenAI (paid, cloud)', value: 'openai' },
|
|
1013
|
+
],
|
|
1014
|
+
default: 'ollama',
|
|
1015
|
+
},
|
|
1016
|
+
]);
|
|
1017
|
+
this.state.embedderProvider = embedder;
|
|
1018
|
+
}
|
|
1019
|
+
|
|
1020
|
+
cli.success(
|
|
1021
|
+
`Selected: ${compatibleProvider.name} (LLM) + ${this.state.embedderProvider === 'ollama' ? 'Ollama' : 'OpenAI'} (embeddings)`
|
|
1022
|
+
);
|
|
1023
|
+
}
|
|
1024
|
+
}
|
|
1025
|
+
|
|
1026
|
+
/**
|
|
1027
|
+
* Step 5: API Key Collection
|
|
1028
|
+
*/
|
|
1029
|
+
private async collectAPIKeys(): Promise<void> {
|
|
1030
|
+
cli.blank();
|
|
1031
|
+
cli.header('Step 5: API Key Configuration');
|
|
1032
|
+
|
|
1033
|
+
// Read existing PAI config first
|
|
1034
|
+
await this.readPAIConfig();
|
|
1035
|
+
|
|
1036
|
+
const provider = PROVIDERS.find((p) => p.id === this.state.llmProvider);
|
|
1037
|
+
|
|
1038
|
+
if (!provider) {
|
|
1039
|
+
cli.error('Invalid provider');
|
|
1040
|
+
process.exit(1);
|
|
1041
|
+
}
|
|
1042
|
+
|
|
1043
|
+
// Ollama doesn't need API keys
|
|
1044
|
+
if (this.state.llmProvider === 'ollama') {
|
|
1045
|
+
cli.blank();
|
|
1046
|
+
cli.success('Ollama selected - no API key required!');
|
|
1047
|
+
cli.blank();
|
|
1048
|
+
cli.info('Ollama Prerequisites:');
|
|
1049
|
+
cli.dim(' 1. Install Ollama: https://ollama.com/download');
|
|
1050
|
+
cli.dim(' 2. Pull required models:');
|
|
1051
|
+
cli.dim(` ollama pull ${this.state.modelName}`);
|
|
1052
|
+
cli.dim(` ollama pull ${this.embedderModel}`);
|
|
1053
|
+
cli.dim(' 3. Ensure Ollama is running: ollama serve');
|
|
1054
|
+
cli.blank();
|
|
1055
|
+
|
|
1056
|
+
// Configure Ollama base URL
|
|
1057
|
+
if (!isNonInteractive) {
|
|
1058
|
+
const { baseUrl } = await inquirer.prompt([
|
|
1059
|
+
{
|
|
1060
|
+
type: 'input',
|
|
1061
|
+
name: 'baseUrl',
|
|
1062
|
+
message: 'Ollama API URL (for Docker containers):',
|
|
1063
|
+
default: this.ollamaBaseUrl,
|
|
1064
|
+
},
|
|
1065
|
+
]);
|
|
1066
|
+
this.ollamaBaseUrl = baseUrl;
|
|
1067
|
+
}
|
|
1068
|
+
|
|
1069
|
+
cli.success(`Ollama endpoint: ${this.ollamaBaseUrl}`);
|
|
1070
|
+
return;
|
|
1071
|
+
}
|
|
1072
|
+
|
|
1073
|
+
// Handle OpenAI-compatible provider (OpenRouter, Together, etc.)
|
|
1074
|
+
if (this.state.openaiCompatible) {
|
|
1075
|
+
const compatibleProvider = OPENAI_COMPATIBLE_PROVIDERS.find(
|
|
1076
|
+
(p) => p.id === this.state.openaiCompatible?.providerId
|
|
1077
|
+
);
|
|
1078
|
+
|
|
1079
|
+
if (compatibleProvider) {
|
|
1080
|
+
this.state.apiKeys.OPENAI_COMPATIBLE_KEY = await this.collectKey(
|
|
1081
|
+
compatibleProvider.keyName,
|
|
1082
|
+
compatibleProvider.keyUrl,
|
|
1083
|
+
this.state.apiKeys.OPENAI_COMPATIBLE_KEY
|
|
1084
|
+
);
|
|
1085
|
+
|
|
1086
|
+
// If embedder is Ollama, configure its base URL
|
|
1087
|
+
if (this.state.embedderProvider === 'ollama') {
|
|
1088
|
+
cli.blank();
|
|
1089
|
+
cli.info('Configuring Ollama for embeddings...');
|
|
1090
|
+
cli.dim(' Ollama Prerequisites:');
|
|
1091
|
+
cli.dim(' 1. Install Ollama: https://ollama.com/download');
|
|
1092
|
+
cli.dim(` 2. Pull embedding model: ollama pull ${this.embedderModel}`);
|
|
1093
|
+
cli.dim(' 3. Ensure Ollama is running: ollama serve');
|
|
1094
|
+
cli.blank();
|
|
1095
|
+
|
|
1096
|
+
if (!isNonInteractive) {
|
|
1097
|
+
const { baseUrl } = await inquirer.prompt([
|
|
1098
|
+
{
|
|
1099
|
+
type: 'input',
|
|
1100
|
+
name: 'baseUrl',
|
|
1101
|
+
message: 'Ollama API URL (for Docker containers):',
|
|
1102
|
+
default: this.ollamaBaseUrl,
|
|
1103
|
+
},
|
|
1104
|
+
]);
|
|
1105
|
+
this.ollamaBaseUrl = baseUrl;
|
|
1106
|
+
}
|
|
1107
|
+
cli.success(`Ollama endpoint for embeddings: ${this.ollamaBaseUrl}`);
|
|
1108
|
+
}
|
|
1109
|
+
|
|
1110
|
+
// If embedder is OpenAI (not Ollama), need OpenAI key for embeddings
|
|
1111
|
+
if (
|
|
1112
|
+
this.state.embedderProvider === 'openai' &&
|
|
1113
|
+
!this.state.openaiCompatible.embedderBaseUrl
|
|
1114
|
+
) {
|
|
1115
|
+
cli.blank();
|
|
1116
|
+
cli.info('You need an OpenAI API key for embeddings.');
|
|
1117
|
+
this.state.apiKeys.OPENAI_API_KEY = await this.collectKey(
|
|
1118
|
+
'OpenAI (for embeddings)',
|
|
1119
|
+
'https://platform.openai.com/api-keys',
|
|
1120
|
+
this.state.apiKeys.OPENAI_API_KEY
|
|
1121
|
+
);
|
|
1122
|
+
}
|
|
1123
|
+
|
|
1124
|
+
return;
|
|
1125
|
+
}
|
|
1126
|
+
}
|
|
1127
|
+
|
|
1128
|
+
// Collect OpenAI key if needed
|
|
1129
|
+
if (provider.needsOpenAI) {
|
|
1130
|
+
this.state.apiKeys.OPENAI_API_KEY = await this.collectKey(
|
|
1131
|
+
'Madeinoz Knowledge System OpenAI',
|
|
1132
|
+
'https://platform.openai.com/api-keys',
|
|
1133
|
+
this.state.apiKeys.OPENAI_API_KEY
|
|
1134
|
+
);
|
|
1135
|
+
}
|
|
1136
|
+
|
|
1137
|
+
// Collect Anthropic key
|
|
1138
|
+
if (this.state.llmProvider === 'anthropic') {
|
|
1139
|
+
this.state.apiKeys.ANTHROPIC_API_KEY = await this.collectKey(
|
|
1140
|
+
'Anthropic',
|
|
1141
|
+
'https://console.anthropic.com/',
|
|
1142
|
+
this.state.apiKeys.ANTHROPIC_API_KEY
|
|
1143
|
+
);
|
|
1144
|
+
}
|
|
1145
|
+
|
|
1146
|
+
// Collect Google key
|
|
1147
|
+
if (this.state.llmProvider === 'gemini' || this.state.embedderProvider === 'gemini') {
|
|
1148
|
+
this.state.apiKeys.GOOGLE_API_KEY = await this.collectKey(
|
|
1149
|
+
'Google',
|
|
1150
|
+
'https://aistudio.google.com/app/apikey',
|
|
1151
|
+
this.state.apiKeys.GOOGLE_API_KEY
|
|
1152
|
+
);
|
|
1153
|
+
}
|
|
1154
|
+
|
|
1155
|
+
// Collect Groq key
|
|
1156
|
+
if (this.state.llmProvider === 'groq') {
|
|
1157
|
+
this.state.apiKeys.GROQ_API_KEY = await this.collectKey(
|
|
1158
|
+
'Groq',
|
|
1159
|
+
'https://console.groq.com/',
|
|
1160
|
+
this.state.apiKeys.GROQ_API_KEY
|
|
1161
|
+
);
|
|
1162
|
+
}
|
|
1163
|
+
}
|
|
1164
|
+
|
|
1165
|
+
/**
|
|
1166
|
+
* Step 6: Model Selection
|
|
1167
|
+
*/
|
|
1168
|
+
private async selectModel(): Promise<void> {
|
|
1169
|
+
cli.blank();
|
|
1170
|
+
cli.header('Step 6: Model Configuration');
|
|
1171
|
+
|
|
1172
|
+
// Handle OpenAI-compatible provider models
|
|
1173
|
+
if (this.state.openaiCompatible) {
|
|
1174
|
+
const compatibleProvider = OPENAI_COMPATIBLE_PROVIDERS.find(
|
|
1175
|
+
(p) => p.id === this.state.openaiCompatible?.providerId
|
|
1176
|
+
);
|
|
1177
|
+
|
|
1178
|
+
if (compatibleProvider && compatibleProvider.models.length > 0) {
|
|
1179
|
+
if (isNonInteractive) {
|
|
1180
|
+
// Use existing model or first available
|
|
1181
|
+
const existingModel = compatibleProvider.models.find(
|
|
1182
|
+
(m) => m.value === this.state.modelName
|
|
1183
|
+
);
|
|
1184
|
+
this.state.modelName = existingModel?.value || compatibleProvider.models[0].value;
|
|
1185
|
+
cli.dim(` Using model: ${this.state.modelName}`);
|
|
1186
|
+
} else {
|
|
1187
|
+
const { model } = await inquirer.prompt([
|
|
1188
|
+
{
|
|
1189
|
+
type: 'list',
|
|
1190
|
+
name: 'model',
|
|
1191
|
+
message: `Select ${compatibleProvider.name} model:`,
|
|
1192
|
+
choices: compatibleProvider.models,
|
|
1193
|
+
default: this.state.modelName || compatibleProvider.models[0].value,
|
|
1194
|
+
},
|
|
1195
|
+
]);
|
|
1196
|
+
this.state.modelName = model;
|
|
1197
|
+
}
|
|
1198
|
+
|
|
1199
|
+
cli.success(`Selected model: ${this.state.modelName}`);
|
|
1200
|
+
|
|
1201
|
+
// If provider has embedder models and we're using them, select embedder model too
|
|
1202
|
+
if (
|
|
1203
|
+
this.state.openaiCompatible.embedderBaseUrl &&
|
|
1204
|
+
compatibleProvider.embedderModels &&
|
|
1205
|
+
compatibleProvider.embedderModels.length > 0
|
|
1206
|
+
) {
|
|
1207
|
+
cli.blank();
|
|
1208
|
+
cli.info('Configuring embeddings model...');
|
|
1209
|
+
|
|
1210
|
+
if (isNonInteractive) {
|
|
1211
|
+
this.state.openaiCompatible.embedderModel = compatibleProvider.embedderModels[0].value;
|
|
1212
|
+
cli.dim(` Using embedder: ${this.state.openaiCompatible.embedderModel}`);
|
|
1213
|
+
} else {
|
|
1214
|
+
const { embedderModel } = await inquirer.prompt([
|
|
1215
|
+
{
|
|
1216
|
+
type: 'list',
|
|
1217
|
+
name: 'embedderModel',
|
|
1218
|
+
message: `Select ${compatibleProvider.name} embeddings model:`,
|
|
1219
|
+
choices: compatibleProvider.embedderModels,
|
|
1220
|
+
default: compatibleProvider.embedderModels[0].value,
|
|
1221
|
+
},
|
|
1222
|
+
]);
|
|
1223
|
+
this.state.openaiCompatible.embedderModel = embedderModel;
|
|
1224
|
+
}
|
|
1225
|
+
|
|
1226
|
+
cli.success(`Selected embedder: ${this.state.openaiCompatible.embedderModel}`);
|
|
1227
|
+
}
|
|
1228
|
+
|
|
1229
|
+
return;
|
|
1230
|
+
}
|
|
1231
|
+
}
|
|
1232
|
+
|
|
1233
|
+
const provider = PROVIDERS.find((p) => p.id === this.state.llmProvider);
|
|
1234
|
+
|
|
1235
|
+
if (!provider) {
|
|
1236
|
+
cli.error('Invalid provider');
|
|
1237
|
+
process.exit(1);
|
|
1238
|
+
}
|
|
1239
|
+
|
|
1240
|
+
// If provider has specific models, prompt for selection
|
|
1241
|
+
if (provider.models && provider.models.length > 0) {
|
|
1242
|
+
if (isNonInteractive) {
|
|
1243
|
+
// Use existing model from state or first available
|
|
1244
|
+
const existingModel = provider.models.find((m) => m.value === this.state.modelName);
|
|
1245
|
+
this.state.modelName = existingModel?.value || provider.models[0].value;
|
|
1246
|
+
cli.dim(` Using model: ${this.state.modelName}`);
|
|
1247
|
+
} else {
|
|
1248
|
+
const { model } = await inquirer.prompt([
|
|
1249
|
+
{
|
|
1250
|
+
type: 'list',
|
|
1251
|
+
name: 'model',
|
|
1252
|
+
message: `Select ${provider.name} model:`,
|
|
1253
|
+
choices: provider.models,
|
|
1254
|
+
default: this.state.modelName || provider.models[0].value,
|
|
1255
|
+
},
|
|
1256
|
+
]);
|
|
1257
|
+
this.state.modelName = model;
|
|
1258
|
+
}
|
|
1259
|
+
} else {
|
|
1260
|
+
// Use default model for provider
|
|
1261
|
+
switch (this.state.llmProvider) {
|
|
1262
|
+
case 'anthropic':
|
|
1263
|
+
this.state.modelName = 'claude-sonnet-4-20250514';
|
|
1264
|
+
cli.info('Using Claude Sonnet 4');
|
|
1265
|
+
break;
|
|
1266
|
+
case 'gemini':
|
|
1267
|
+
this.state.modelName = 'gemini-2.0-flash-exp';
|
|
1268
|
+
cli.info('Using Gemini 2.0 Flash');
|
|
1269
|
+
break;
|
|
1270
|
+
default:
|
|
1271
|
+
this.state.modelName = 'gpt-4o-mini';
|
|
1272
|
+
}
|
|
1273
|
+
}
|
|
1274
|
+
|
|
1275
|
+
cli.success(`Selected model: ${this.state.modelName}`);
|
|
1276
|
+
}
|
|
1277
|
+
|
|
1278
|
+
/**
|
|
1279
|
+
* Step 7: Concurrency Configuration
|
|
1280
|
+
*/
|
|
1281
|
+
private async configureConcurrency(): Promise<void> {
|
|
1282
|
+
cli.blank();
|
|
1283
|
+
cli.header('Step 7: Performance Configuration');
|
|
1284
|
+
|
|
1285
|
+
if (this.state.llmProvider === 'openai') {
|
|
1286
|
+
if (isNonInteractive) {
|
|
1287
|
+
// Use existing or default to Tier 3 (most common)
|
|
1288
|
+
const existingLimit = Number.parseInt(this.state.semaphoreLimit, 10);
|
|
1289
|
+
if (!existingLimit || existingLimit < 1) {
|
|
1290
|
+
this.state.semaphoreLimit = '10';
|
|
1291
|
+
}
|
|
1292
|
+
cli.dim(` Using concurrency limit: ${this.state.semaphoreLimit}`);
|
|
1293
|
+
} else {
|
|
1294
|
+
const { tier } = await inquirer.prompt([
|
|
1295
|
+
{
|
|
1296
|
+
type: 'list',
|
|
1297
|
+
name: 'tier',
|
|
1298
|
+
message: 'What is your OpenAI API tier?',
|
|
1299
|
+
choices: OPENAI_TIERS.map((t) => ({ name: t.name, value: t.semaphoreLimit })),
|
|
1300
|
+
default: Number.parseInt(this.state.semaphoreLimit, 10) || 10,
|
|
1301
|
+
},
|
|
1302
|
+
]);
|
|
1303
|
+
this.state.semaphoreLimit = String(tier);
|
|
1304
|
+
}
|
|
1305
|
+
} else {
|
|
1306
|
+
this.state.semaphoreLimit = '5';
|
|
1307
|
+
cli.success('Using conservative concurrency: 5');
|
|
1308
|
+
}
|
|
1309
|
+
|
|
1310
|
+
cli.success(`Concurrency limit: ${this.state.semaphoreLimit}`);
|
|
1311
|
+
}
|
|
1312
|
+
|
|
1313
|
+
/**
|
|
1314
|
+
* Step 8: Create Configuration
|
|
1315
|
+
*/
|
|
1316
|
+
private async createConfiguration(): Promise<void> {
|
|
1317
|
+
cli.blank();
|
|
1318
|
+
cli.header('Step 8: Saving to PAI .env');
|
|
1319
|
+
|
|
1320
|
+
// PAI .env is the ONLY source of truth
|
|
1321
|
+
// Location: ${PAI_DIR}/.env or ~/.claude/.env
|
|
1322
|
+
cli.info(`Target: ${this.configLoader.getEnvFile()}`);
|
|
1323
|
+
cli.blank();
|
|
1324
|
+
|
|
1325
|
+
// Backup existing PAI .env
|
|
1326
|
+
if (await this.configLoader.envExists()) {
|
|
1327
|
+
cli.warning(`Found existing PAI .env: ${this.configLoader.getEnvFile()}`);
|
|
1328
|
+
|
|
1329
|
+
const shouldBackup = await confirmWithDefault('Backup and replace?', true);
|
|
1330
|
+
|
|
1331
|
+
if (shouldBackup) {
|
|
1332
|
+
const timestamp = new Date().toISOString().replace(/[:.]/g, '-').slice(0, -5);
|
|
1333
|
+
const backupPath = `${this.configLoader.getEnvFile()}.backup.${timestamp}`;
|
|
1334
|
+
const originalFile = Bun.file(this.configLoader.getEnvFile());
|
|
1335
|
+
const content = await originalFile.text();
|
|
1336
|
+
await Bun.write(backupPath, content);
|
|
1337
|
+
cli.success(`Backed up to ${backupPath}`);
|
|
1338
|
+
} else {
|
|
1339
|
+
cli.error('Installation cancelled');
|
|
1340
|
+
process.exit(1);
|
|
1341
|
+
}
|
|
1342
|
+
}
|
|
1343
|
+
|
|
1344
|
+
// Build configuration object
|
|
1345
|
+
const config: Record<string, string | undefined> = {
|
|
1346
|
+
OPENAI_API_KEY: this.state.apiKeys.OPENAI_API_KEY,
|
|
1347
|
+
ANTHROPIC_API_KEY: this.state.apiKeys.ANTHROPIC_API_KEY,
|
|
1348
|
+
GOOGLE_API_KEY: this.state.apiKeys.GOOGLE_API_KEY,
|
|
1349
|
+
GROQ_API_KEY: this.state.apiKeys.GROQ_API_KEY,
|
|
1350
|
+
LLM_PROVIDER: this.state.llmProvider,
|
|
1351
|
+
EMBEDDER_PROVIDER: this.state.embedderProvider,
|
|
1352
|
+
MODEL_NAME: this.state.modelName,
|
|
1353
|
+
SEMAPHORE_LIMIT: this.state.semaphoreLimit,
|
|
1354
|
+
GROUP_ID: this.state.paiConfig.GROUP_ID || 'main',
|
|
1355
|
+
DATABASE_TYPE: this.state.paiConfig.DATABASE_TYPE || 'neo4j',
|
|
1356
|
+
GRAPHITI_TELEMETRY_ENABLED: this.state.paiConfig.GRAPHITI_TELEMETRY_ENABLED || 'false',
|
|
1357
|
+
};
|
|
1358
|
+
|
|
1359
|
+
// Add Ollama-specific configuration
|
|
1360
|
+
if (this.state.llmProvider === 'ollama') {
|
|
1361
|
+
config.OPENAI_BASE_URL = this.ollamaBaseUrl;
|
|
1362
|
+
config.EMBEDDER_MODEL = this.embedderModel;
|
|
1363
|
+
}
|
|
1364
|
+
|
|
1365
|
+
// Add OpenAI-compatible provider configuration
|
|
1366
|
+
if (this.state.openaiCompatible) {
|
|
1367
|
+
// Use the provider's API key as OPENAI_API_KEY (works with OpenAI-compatible clients)
|
|
1368
|
+
config.OPENAI_API_KEY = this.state.apiKeys.OPENAI_COMPATIBLE_KEY;
|
|
1369
|
+
config.OPENAI_BASE_URL = this.state.openaiCompatible.baseUrl;
|
|
1370
|
+
|
|
1371
|
+
// Note which provider we're using (for reference)
|
|
1372
|
+
config.OPENAI_COMPATIBLE_PROVIDER = this.state.openaiCompatible.providerId;
|
|
1373
|
+
|
|
1374
|
+
// Configure embedder based on what's selected
|
|
1375
|
+
if (this.state.openaiCompatible.embedderBaseUrl) {
|
|
1376
|
+
// Using the same OpenAI-compatible provider for embeddings
|
|
1377
|
+
config.EMBEDDER_BASE_URL = this.state.openaiCompatible.embedderBaseUrl;
|
|
1378
|
+
config.EMBEDDER_MODEL = this.state.openaiCompatible.embedderModel;
|
|
1379
|
+
if (this.state.openaiCompatible.embedderDimensions) {
|
|
1380
|
+
config.EMBEDDER_DIMENSIONS = String(this.state.openaiCompatible.embedderDimensions);
|
|
1381
|
+
}
|
|
1382
|
+
} else if (this.state.embedderProvider === 'ollama') {
|
|
1383
|
+
// Using Ollama for embeddings
|
|
1384
|
+
config.EMBEDDER_BASE_URL = this.ollamaBaseUrl;
|
|
1385
|
+
config.EMBEDDER_MODEL = this.embedderModel;
|
|
1386
|
+
config.EMBEDDER_DIMENSIONS = '1024'; // mxbai-embed-large dimensions
|
|
1387
|
+
}
|
|
1388
|
+
// If embedderProvider is "openai" without embedderBaseUrl, we use standard OpenAI embeddings
|
|
1389
|
+
// In that case, OPENAI_API_KEY should be set separately for embeddings
|
|
1390
|
+
}
|
|
1391
|
+
|
|
1392
|
+
// Add Hybrid configuration (OpenAI LLM + Ollama embeddings)
|
|
1393
|
+
if (
|
|
1394
|
+
this.state.llmProvider === 'hybrid' ||
|
|
1395
|
+
(this.state.llmProvider === 'openai' &&
|
|
1396
|
+
this.state.embedderProvider === 'ollama' &&
|
|
1397
|
+
!this.state.openaiCompatible)
|
|
1398
|
+
) {
|
|
1399
|
+
config.EMBEDDER_BASE_URL = this.ollamaBaseUrl;
|
|
1400
|
+
config.EMBEDDER_MODEL = this.embedderModel;
|
|
1401
|
+
config.EMBEDDER_DIMENSIONS = '1024'; // mxbai-embed-large dimensions
|
|
1402
|
+
}
|
|
1403
|
+
|
|
1404
|
+
// Save configuration
|
|
1405
|
+
await this.configLoader.save(config);
|
|
1406
|
+
|
|
1407
|
+
cli.success(`Configuration saved to PAI .env: ${this.configLoader.getEnvFile()}`);
|
|
1408
|
+
}
|
|
1409
|
+
|
|
1410
|
+
/**
|
|
1411
|
+
* Step 9: Start Services (skipped for remote-only)
|
|
1412
|
+
*/
|
|
1413
|
+
private async startServices(): Promise<void> {
|
|
1414
|
+
// Skip starting services for remote-only installations
|
|
1415
|
+
if (this.state.installType === 'remote') {
|
|
1416
|
+
cli.blank();
|
|
1417
|
+
cli.header('Step 9: Starting Services (skipped for remote-only)');
|
|
1418
|
+
cli.blank();
|
|
1419
|
+
cli.info('Remote-only installation does not start local services.');
|
|
1420
|
+
cli.dim('The MCP server on the remote host should already be running.');
|
|
1421
|
+
return;
|
|
1422
|
+
}
|
|
1423
|
+
|
|
1424
|
+
// For hybrid mode, also skip auto-start (user manages local server manually)
|
|
1425
|
+
if (this.state.installType === 'hybrid') {
|
|
1426
|
+
cli.blank();
|
|
1427
|
+
cli.header('Step 9: Starting Services (skipped for hybrid mode)');
|
|
1428
|
+
cli.blank();
|
|
1429
|
+
cli.info('Hybrid mode does not auto-start local services.');
|
|
1430
|
+
cli.dim('Use server-cli commands to manage the local server:');
|
|
1431
|
+
cli.dim(' bun run server-cli start # Start local server');
|
|
1432
|
+
cli.dim(' bun run server-cli stop # Stop local server');
|
|
1433
|
+
cli.dim(' bun run server-cli status # Check status');
|
|
1434
|
+
cli.blank();
|
|
1435
|
+
cli.info('To use the local server, set the profile:');
|
|
1436
|
+
cli.dim(` export MADEINOZ_KNOWLEDGE_PROFILE=local`);
|
|
1437
|
+
cli.dim(' # Or use --profile local flag with commands');
|
|
1438
|
+
return;
|
|
1439
|
+
}
|
|
1440
|
+
|
|
1441
|
+
cli.blank();
|
|
1442
|
+
cli.header('Step 9: Starting Services');
|
|
1443
|
+
|
|
1444
|
+
cli.info('Starting Graphiti MCP server...');
|
|
1445
|
+
cli.blank();
|
|
1446
|
+
|
|
1447
|
+
// Import and run the run.ts script
|
|
1448
|
+
const _startTime = Date.now();
|
|
1449
|
+
|
|
1450
|
+
// Use spawn to run run.ts from the pack directory
|
|
1451
|
+
const packDir = import.meta.dir.replace(/\/src\/server$/, '');
|
|
1452
|
+
const proc = Bun.spawn(['bun', 'run', 'src/server/run.ts'], {
|
|
1453
|
+
stdout: 'inherit',
|
|
1454
|
+
stderr: 'inherit',
|
|
1455
|
+
cwd: packDir,
|
|
1456
|
+
});
|
|
1457
|
+
|
|
1458
|
+
const exitCode = await proc.exited;
|
|
1459
|
+
|
|
1460
|
+
if (exitCode !== 0) {
|
|
1461
|
+
cli.error('Failed to start services');
|
|
1462
|
+
process.exit(1);
|
|
1463
|
+
}
|
|
1464
|
+
|
|
1465
|
+
cli.blank();
|
|
1466
|
+
cli.info('Waiting for server to start...');
|
|
1467
|
+
|
|
1468
|
+
// Wait for server to be ready
|
|
1469
|
+
await new Promise((resolve) => setTimeout(resolve, 15000));
|
|
1470
|
+
|
|
1471
|
+
// Check server health
|
|
1472
|
+
cli.blank();
|
|
1473
|
+
cli.info('Verifying server health...');
|
|
1474
|
+
|
|
1475
|
+
try {
|
|
1476
|
+
const response = await fetch('http://localhost:8000/health', {
|
|
1477
|
+
signal: AbortSignal.timeout(5000),
|
|
1478
|
+
});
|
|
1479
|
+
|
|
1480
|
+
if (response.ok) {
|
|
1481
|
+
const data = await response.json();
|
|
1482
|
+
if (data.status === 'healthy' || data.status === 'ok') {
|
|
1483
|
+
cli.success('Server is running!');
|
|
1484
|
+
} else {
|
|
1485
|
+
cli.warning('Server health check inconclusive');
|
|
1486
|
+
}
|
|
1487
|
+
} else {
|
|
1488
|
+
cli.warning('Server health check inconclusive');
|
|
1489
|
+
}
|
|
1490
|
+
} catch {
|
|
1491
|
+
cli.warning('Server health check inconclusive');
|
|
1492
|
+
cli.dim('Check logs with: bun run server-cli logs');
|
|
1493
|
+
}
|
|
1494
|
+
}
|
|
1495
|
+
|
|
1496
|
+
/**
|
|
1497
|
+
* Step 8.5: Create Connection Profiles (for remote and hybrid installations)
|
|
1498
|
+
*/
|
|
1499
|
+
private async createConnectionProfiles(): Promise<void> {
|
|
1500
|
+
// Only create profiles for remote or hybrid installations
|
|
1501
|
+
if (this.state.installType === 'local') {
|
|
1502
|
+
return;
|
|
1503
|
+
}
|
|
1504
|
+
|
|
1505
|
+
cli.blank();
|
|
1506
|
+
cli.header('Creating Connection Profiles');
|
|
1507
|
+
cli.blank();
|
|
1508
|
+
|
|
1509
|
+
const profiles = [];
|
|
1510
|
+
|
|
1511
|
+
// Add local profile for hybrid installations
|
|
1512
|
+
if (this.state.installType === 'hybrid') {
|
|
1513
|
+
profiles.push({
|
|
1514
|
+
name: 'local',
|
|
1515
|
+
host: 'localhost',
|
|
1516
|
+
port: 8000,
|
|
1517
|
+
protocol: 'http' as const,
|
|
1518
|
+
basePath: '/mcp',
|
|
1519
|
+
});
|
|
1520
|
+
cli.info('Adding local profile...');
|
|
1521
|
+
}
|
|
1522
|
+
|
|
1523
|
+
// Add remote profile (always present for remote/hybrid)
|
|
1524
|
+
if (this.state.remoteProfile) {
|
|
1525
|
+
profiles.push({
|
|
1526
|
+
name: 'remote',
|
|
1527
|
+
host: this.state.remoteProfile.host,
|
|
1528
|
+
port: this.state.remoteProfile.port,
|
|
1529
|
+
protocol: this.state.remoteProfile.protocol,
|
|
1530
|
+
basePath: '/mcp',
|
|
1531
|
+
});
|
|
1532
|
+
cli.info(`Adding remote profile (${this.state.remoteProfile.host})...`);
|
|
1533
|
+
}
|
|
1534
|
+
|
|
1535
|
+
// Determine default profile name
|
|
1536
|
+
const defaultProfileName = this.state.defaultProfile === 'local' ? 'local' : 'remote';
|
|
1537
|
+
|
|
1538
|
+
try {
|
|
1539
|
+
const configPath = profileManager.saveProfiles(profiles, defaultProfileName);
|
|
1540
|
+
cli.blank();
|
|
1541
|
+
cli.success(`Connection profiles created: ${configPath}`);
|
|
1542
|
+
cli.blank();
|
|
1543
|
+
cli.dim(`Profiles: ${profiles.map(p => p.name).join(', ')}`);
|
|
1544
|
+
cli.dim(`Default: ${defaultProfileName}`);
|
|
1545
|
+
cli.blank();
|
|
1546
|
+
|
|
1547
|
+
// Show usage examples
|
|
1548
|
+
cli.info('Using profiles:');
|
|
1549
|
+
cli.dim(` # Use default profile (${defaultProfileName})`);
|
|
1550
|
+
cli.dim(` bun run knowledge-cli.ts status`);
|
|
1551
|
+
cli.blank();
|
|
1552
|
+
cli.dim(` # Use specific profile`);
|
|
1553
|
+
cli.dim(` bun run knowledge-cli.ts status --profile local`);
|
|
1554
|
+
cli.dim(` bun run knowledge-cli.ts status --profile remote`);
|
|
1555
|
+
cli.blank();
|
|
1556
|
+
|
|
1557
|
+
if (this.state.installType === 'remote') {
|
|
1558
|
+
cli.info('For remote-only installation:');
|
|
1559
|
+
cli.dim(' The default profile is set to your remote server.');
|
|
1560
|
+
cli.dim(' All commands will connect to the remote server by default.');
|
|
1561
|
+
} else {
|
|
1562
|
+
cli.info('For hybrid installation:');
|
|
1563
|
+
if (defaultProfileName === 'local') {
|
|
1564
|
+
cli.dim(' The default profile is set to LOCAL (for development).');
|
|
1565
|
+
cli.dim(' To use the remote server, specify --profile remote');
|
|
1566
|
+
} else {
|
|
1567
|
+
cli.dim(` The default profile is set to REMOTE (${this.state.remoteProfile?.host}).`);
|
|
1568
|
+
cli.dim(' To use the local server, specify --profile local');
|
|
1569
|
+
}
|
|
1570
|
+
}
|
|
1571
|
+
} catch (error) {
|
|
1572
|
+
cli.error(`Failed to create connection profiles: ${error}`);
|
|
1573
|
+
cli.dim('You can create them manually in ~/.claude/config/knowledge-profiles.yaml');
|
|
1574
|
+
}
|
|
1575
|
+
}
|
|
1576
|
+
|
|
1577
|
+
/**
|
|
1578
|
+
* Step 10: Install PAI Skill
|
|
1579
|
+
* Path priority: PAI_DIR > ~/.claude (PAI v2.1.0 standard)
|
|
1580
|
+
*/
|
|
1581
|
+
private async installPAISkill(): Promise<void> {
|
|
1582
|
+
cli.blank();
|
|
1583
|
+
cli.header('Step 10: Installing PAI Skill');
|
|
1584
|
+
|
|
1585
|
+
// Determine PAI directory - prefer PAI_DIR, then ~/.claude
|
|
1586
|
+
const possiblePaths = [
|
|
1587
|
+
process.env.PAI_DIR ? `${process.env.PAI_DIR}/skills` : '',
|
|
1588
|
+
`${process.env.HOME}/.claude/skills`,
|
|
1589
|
+
].filter(Boolean);
|
|
1590
|
+
|
|
1591
|
+
let paiSkillsDir: string | undefined;
|
|
1592
|
+
|
|
1593
|
+
for (const path of possiblePaths) {
|
|
1594
|
+
try {
|
|
1595
|
+
const dir = Bun.file(path);
|
|
1596
|
+
if (await dir.exists()) {
|
|
1597
|
+
paiSkillsDir = path;
|
|
1598
|
+
break;
|
|
1599
|
+
}
|
|
1600
|
+
} catch {
|
|
1601
|
+
// Continue
|
|
1602
|
+
}
|
|
1603
|
+
}
|
|
1604
|
+
|
|
1605
|
+
if (!paiSkillsDir) {
|
|
1606
|
+
cli.blank();
|
|
1607
|
+
cli.warning('PAI skills directory not found.');
|
|
1608
|
+
cli.info('Common locations:');
|
|
1609
|
+
cli.dim(' - ~/.claude/skills');
|
|
1610
|
+
cli.dim(' - $PAI_DIR/skills');
|
|
1611
|
+
cli.blank();
|
|
1612
|
+
|
|
1613
|
+
if (isNonInteractive) {
|
|
1614
|
+
// Create default skills directory in non-interactive mode
|
|
1615
|
+
paiSkillsDir = `${process.env.HOME}/.claude/skills`;
|
|
1616
|
+
cli.dim(` Creating default: ${paiSkillsDir}`);
|
|
1617
|
+
const { mkdirSync } = await import('node:fs');
|
|
1618
|
+
mkdirSync(paiSkillsDir, { recursive: true });
|
|
1619
|
+
} else {
|
|
1620
|
+
const { customDir } = await inquirer.prompt([
|
|
1621
|
+
{
|
|
1622
|
+
type: 'input',
|
|
1623
|
+
name: 'customDir',
|
|
1624
|
+
message: 'Enter PAI skills directory (or press Enter to skip):',
|
|
1625
|
+
},
|
|
1626
|
+
]);
|
|
1627
|
+
|
|
1628
|
+
if (customDir && customDir.trim().length > 0) {
|
|
1629
|
+
paiSkillsDir = customDir.trim();
|
|
1630
|
+
}
|
|
1631
|
+
}
|
|
1632
|
+
}
|
|
1633
|
+
|
|
1634
|
+
if (paiSkillsDir) {
|
|
1635
|
+
cli.blank();
|
|
1636
|
+
cli.info(`Installing to: ${paiSkillsDir}/madeinoz-knowledge-system`);
|
|
1637
|
+
|
|
1638
|
+
// Remove existing installation
|
|
1639
|
+
const existingPath = `${paiSkillsDir}/madeinoz-knowledge-system`;
|
|
1640
|
+
try {
|
|
1641
|
+
const existing = Bun.file(existingPath);
|
|
1642
|
+
if (await existing.exists()) {
|
|
1643
|
+
cli.warning('Removing existing installation');
|
|
1644
|
+
await this.containerManager.exec(['rm', '-rf', existingPath], { silent: true });
|
|
1645
|
+
}
|
|
1646
|
+
} catch {
|
|
1647
|
+
// Continue
|
|
1648
|
+
}
|
|
1649
|
+
|
|
1650
|
+
// Copy skill directory
|
|
1651
|
+
const packDir = `${import.meta.dir}/../../../`;
|
|
1652
|
+
const skillSource = `${packDir}/Knowledge`;
|
|
1653
|
+
|
|
1654
|
+
try {
|
|
1655
|
+
await this.containerManager.exec(
|
|
1656
|
+
['cp', '-r', skillSource, `${paiSkillsDir}/madeinoz-knowledge-system`],
|
|
1657
|
+
{
|
|
1658
|
+
silent: false,
|
|
1659
|
+
}
|
|
1660
|
+
);
|
|
1661
|
+
cli.success('Madeinoz Knowledge System skill installed');
|
|
1662
|
+
} catch (error) {
|
|
1663
|
+
cli.error(`Failed to install PAI skill: ${String(error)}`);
|
|
1664
|
+
}
|
|
1665
|
+
} else {
|
|
1666
|
+
cli.warning('Skipping PAI skill installation. You can install it manually later.');
|
|
1667
|
+
}
|
|
1668
|
+
}
|
|
1669
|
+
|
|
1670
|
+
/**
|
|
1671
|
+
* Step 11: Install Memory Sync Hook
|
|
1672
|
+
* Hooks install to ~/.claude/hooks/ where Claude Code reads them (PAI v2.1.0)
|
|
1673
|
+
* Updated for Memory System v7.0 (2026-01-12)
|
|
1674
|
+
*/
|
|
1675
|
+
private async installMemorySyncHook(): Promise<void> {
|
|
1676
|
+
cli.blank();
|
|
1677
|
+
cli.header('Step 11: Installing Memory Sync Hook');
|
|
1678
|
+
|
|
1679
|
+
cli.blank();
|
|
1680
|
+
cli.info('The Memory Sync Hook automatically syncs learnings and research');
|
|
1681
|
+
cli.info('from the PAI Memory System to your knowledge graph.');
|
|
1682
|
+
cli.blank();
|
|
1683
|
+
|
|
1684
|
+
const installHook = await confirmWithDefault('Install the Memory Sync Hook?', true);
|
|
1685
|
+
|
|
1686
|
+
if (!installHook) {
|
|
1687
|
+
cli.warning('Skipping hook installation. You can install it manually later.');
|
|
1688
|
+
return;
|
|
1689
|
+
}
|
|
1690
|
+
|
|
1691
|
+
// Determine PAI hooks directory - ~/.claude is where Claude Code reads hooks
|
|
1692
|
+
const paiDir = process.env.PAI_DIR || `${process.env.HOME}/.claude`;
|
|
1693
|
+
const hooksDir = `${paiDir}/hooks`;
|
|
1694
|
+
const settingsPath = `${paiDir}/settings.json`;
|
|
1695
|
+
|
|
1696
|
+
cli.blank();
|
|
1697
|
+
cli.info(`Installing hooks to: ${hooksDir}`);
|
|
1698
|
+
|
|
1699
|
+
try {
|
|
1700
|
+
// Create hooks directory
|
|
1701
|
+
const { mkdirSync, existsSync, copyFileSync, readFileSync, writeFileSync } = await import(
|
|
1702
|
+
'node:fs'
|
|
1703
|
+
);
|
|
1704
|
+
const { join, dirname } = await import('node:path');
|
|
1705
|
+
|
|
1706
|
+
if (!existsSync(hooksDir)) {
|
|
1707
|
+
mkdirSync(hooksDir, { recursive: true });
|
|
1708
|
+
}
|
|
1709
|
+
|
|
1710
|
+
// Create lib subdirectory
|
|
1711
|
+
const libDir = join(hooksDir, 'lib');
|
|
1712
|
+
if (!existsSync(libDir)) {
|
|
1713
|
+
mkdirSync(libDir, { recursive: true });
|
|
1714
|
+
}
|
|
1715
|
+
|
|
1716
|
+
// Copy hook files from pack
|
|
1717
|
+
const packDir = dirname(dirname(dirname(import.meta.dir)));
|
|
1718
|
+
const sourceHooksDir = join(packDir, 'src', 'hooks');
|
|
1719
|
+
|
|
1720
|
+
const filesToCopy = [
|
|
1721
|
+
{ src: 'sync-memory-to-knowledge.ts', dest: 'sync-memory-to-knowledge.ts' },
|
|
1722
|
+
{ src: 'lib/frontmatter-parser.ts', dest: 'lib/frontmatter-parser.ts' },
|
|
1723
|
+
{ src: 'lib/sync-state.ts', dest: 'lib/sync-state.ts' },
|
|
1724
|
+
{ src: 'lib/knowledge-client.ts', dest: 'lib/knowledge-client.ts' },
|
|
1725
|
+
{ src: 'lib/sync-config.ts', dest: 'lib/sync-config.ts' },
|
|
1726
|
+
{ src: 'lib/anti-loop-patterns.ts', dest: 'lib/anti-loop-patterns.ts' },
|
|
1727
|
+
];
|
|
1728
|
+
|
|
1729
|
+
for (const file of filesToCopy) {
|
|
1730
|
+
const srcPath = join(sourceHooksDir, file.src);
|
|
1731
|
+
const destPath = join(hooksDir, file.dest);
|
|
1732
|
+
|
|
1733
|
+
if (existsSync(srcPath)) {
|
|
1734
|
+
copyFileSync(srcPath, destPath);
|
|
1735
|
+
cli.success(`Installed: ${file.dest}`);
|
|
1736
|
+
} else {
|
|
1737
|
+
cli.warning(`Source not found: ${file.src}`);
|
|
1738
|
+
}
|
|
1739
|
+
}
|
|
1740
|
+
|
|
1741
|
+
// Copy config file to ~/.claude/config/
|
|
1742
|
+
const configDir = join(paiDir, 'config');
|
|
1743
|
+
if (!existsSync(configDir)) {
|
|
1744
|
+
mkdirSync(configDir, { recursive: true });
|
|
1745
|
+
}
|
|
1746
|
+
|
|
1747
|
+
const sourceConfigPath = join(packDir, 'config', 'sync-sources.json');
|
|
1748
|
+
const destConfigPath = join(configDir, 'sync-sources.json');
|
|
1749
|
+
|
|
1750
|
+
if (existsSync(sourceConfigPath)) {
|
|
1751
|
+
// Only copy if destination doesn't exist (preserve user customizations)
|
|
1752
|
+
if (!existsSync(destConfigPath)) {
|
|
1753
|
+
copyFileSync(sourceConfigPath, destConfigPath);
|
|
1754
|
+
cli.success(`Installed: config/sync-sources.json`);
|
|
1755
|
+
} else {
|
|
1756
|
+
cli.dim('Config file exists, preserving user customizations');
|
|
1757
|
+
}
|
|
1758
|
+
}
|
|
1759
|
+
|
|
1760
|
+
// Update settings.json to register the hook
|
|
1761
|
+
cli.blank();
|
|
1762
|
+
cli.info('Registering hook in settings.json...');
|
|
1763
|
+
|
|
1764
|
+
let settings: any = {};
|
|
1765
|
+
if (existsSync(settingsPath)) {
|
|
1766
|
+
try {
|
|
1767
|
+
const content = readFileSync(settingsPath, 'utf-8');
|
|
1768
|
+
settings = JSON.parse(content);
|
|
1769
|
+
} catch {
|
|
1770
|
+
cli.warning('Could not parse existing settings.json, creating new');
|
|
1771
|
+
}
|
|
1772
|
+
}
|
|
1773
|
+
|
|
1774
|
+
// Ensure hooks structure exists
|
|
1775
|
+
if (!settings.hooks) {
|
|
1776
|
+
settings.hooks = {};
|
|
1777
|
+
}
|
|
1778
|
+
|
|
1779
|
+
// Add SessionStart hook if not already present
|
|
1780
|
+
const hookCommand = `bun run ${hooksDir}/sync-memory-to-knowledge.ts`;
|
|
1781
|
+
|
|
1782
|
+
if (!settings.hooks.SessionStart) {
|
|
1783
|
+
settings.hooks.SessionStart = [];
|
|
1784
|
+
}
|
|
1785
|
+
|
|
1786
|
+
// Check if hook already registered (check for both old and new names)
|
|
1787
|
+
const hookExists = settings.hooks.SessionStart.some((h: any) =>
|
|
1788
|
+
h.hooks?.some(
|
|
1789
|
+
(inner: any) =>
|
|
1790
|
+
inner.command?.includes('sync-memory-to-knowledge') ||
|
|
1791
|
+
inner.command?.includes('sync-history-to-knowledge')
|
|
1792
|
+
)
|
|
1793
|
+
);
|
|
1794
|
+
|
|
1795
|
+
if (!hookExists) {
|
|
1796
|
+
settings.hooks.SessionStart.push({
|
|
1797
|
+
matcher: '*',
|
|
1798
|
+
hooks: [
|
|
1799
|
+
{
|
|
1800
|
+
type: 'command',
|
|
1801
|
+
command: hookCommand,
|
|
1802
|
+
timeout: 30000,
|
|
1803
|
+
},
|
|
1804
|
+
],
|
|
1805
|
+
});
|
|
1806
|
+
|
|
1807
|
+
writeFileSync(settingsPath, JSON.stringify(settings, null, 2));
|
|
1808
|
+
cli.success('Hook registered in settings.json');
|
|
1809
|
+
} else {
|
|
1810
|
+
cli.info('Hook already registered in settings.json');
|
|
1811
|
+
}
|
|
1812
|
+
|
|
1813
|
+
cli.blank();
|
|
1814
|
+
cli.success('Memory Sync Hook installed!');
|
|
1815
|
+
cli.blank();
|
|
1816
|
+
cli.dim('The hook will:');
|
|
1817
|
+
cli.dim(' - Run on SessionStart');
|
|
1818
|
+
cli.dim(' - Sync LEARNING/ALGORITHM/, LEARNING/SYSTEM/, and RESEARCH/ to knowledge graph');
|
|
1819
|
+
cli.dim(' - Skip already-synced files');
|
|
1820
|
+
cli.dim(' - Gracefully handle MCP server being offline');
|
|
1821
|
+
} catch (error) {
|
|
1822
|
+
cli.error(`Failed to install hook: ${String(error)}`);
|
|
1823
|
+
cli.dim('You can install the hook manually later.');
|
|
1824
|
+
}
|
|
1825
|
+
}
|
|
1826
|
+
|
|
1827
|
+
/**
|
|
1828
|
+
* Step 12: Installation Summary
|
|
1829
|
+
*/
|
|
1830
|
+
private printSummary(): void {
|
|
1831
|
+
cli.blank();
|
|
1832
|
+
cli.header('Installation Complete!');
|
|
1833
|
+
|
|
1834
|
+
cli.blank();
|
|
1835
|
+
cli.info('📦 Configuration Summary:');
|
|
1836
|
+
cli.blank();
|
|
1837
|
+
cli.dim(`Installation Type: ${this.state.installType}`);
|
|
1838
|
+
cli.dim(`Database Backend: ${this.state.paiConfig.DATABASE_TYPE}`);
|
|
1839
|
+
cli.dim(`LLM Provider: ${this.state.llmProvider}`);
|
|
1840
|
+
cli.dim(`Model: ${this.state.modelName}`);
|
|
1841
|
+
cli.dim(`Concurrency: ${this.state.semaphoreLimit}`);
|
|
1842
|
+
cli.blank();
|
|
1843
|
+
|
|
1844
|
+
// Show different information based on installation type
|
|
1845
|
+
if (this.state.installType === 'local') {
|
|
1846
|
+
cli.info('Services (local):');
|
|
1847
|
+
cli.url(' MCP Server', 'http://localhost:8000/mcp/');
|
|
1848
|
+
if (this.state.paiConfig.DATABASE_TYPE === 'neo4j') {
|
|
1849
|
+
cli.url(' Neo4j Browser', 'http://localhost:7474');
|
|
1850
|
+
cli.dim(' Bolt URI: bolt://localhost:7687');
|
|
1851
|
+
} else {
|
|
1852
|
+
cli.url(' FalkorDB UI', 'http://localhost:3000');
|
|
1853
|
+
}
|
|
1854
|
+
cli.url(' Health Check', 'http://localhost:8000/health');
|
|
1855
|
+
cli.blank();
|
|
1856
|
+
} else if (this.state.installType === 'remote') {
|
|
1857
|
+
cli.info('Remote Server Configuration:');
|
|
1858
|
+
cli.dim(` Host: ${this.state.remoteProfile?.host}`);
|
|
1859
|
+
cli.dim(` Port: ${this.state.remoteProfile?.port}`);
|
|
1860
|
+
cli.dim(` Protocol: ${this.state.remoteProfile?.protocol}`);
|
|
1861
|
+
cli.dim(` Default Profile: remote`);
|
|
1862
|
+
cli.blank();
|
|
1863
|
+
cli.info('Connection Profile:');
|
|
1864
|
+
cli.dim(' Config file: ~/.claude/config/knowledge-profiles.yaml');
|
|
1865
|
+
cli.dim(' Use --profile flag to switch profiles');
|
|
1866
|
+
cli.blank();
|
|
1867
|
+
} else {
|
|
1868
|
+
// Hybrid
|
|
1869
|
+
cli.info('Connection Profiles:');
|
|
1870
|
+
cli.blank();
|
|
1871
|
+
cli.dim(' local: http://localhost:8000/mcp');
|
|
1872
|
+
cli.dim(` remote: ${this.state.remoteProfile?.protocol}://${this.state.remoteProfile?.host}:${this.state.remoteProfile?.port}/mcp`);
|
|
1873
|
+
cli.blank();
|
|
1874
|
+
cli.dim(` Default: ${this.state.defaultProfile}`);
|
|
1875
|
+
cli.dim(' Config file: ~/.claude/config/knowledge-profiles.yaml');
|
|
1876
|
+
cli.blank();
|
|
1877
|
+
}
|
|
1878
|
+
|
|
1879
|
+
cli.info('🎉 Next Steps:');
|
|
1880
|
+
cli.blank();
|
|
1881
|
+
cli.dim('1. Test the installation:');
|
|
1882
|
+
cli.dim(" Remember that I'm testing the Madeinoz Knowledge System.");
|
|
1883
|
+
cli.blank();
|
|
1884
|
+
cli.dim('2. Search your knowledge:');
|
|
1885
|
+
cli.dim(' What do I know about PAI?');
|
|
1886
|
+
cli.blank();
|
|
1887
|
+
cli.dim('3. Check system status:');
|
|
1888
|
+
cli.dim(' Show the knowledge graph status');
|
|
1889
|
+
cli.blank();
|
|
1890
|
+
|
|
1891
|
+
// Show different management commands based on installation type
|
|
1892
|
+
if (this.state.installType === 'local') {
|
|
1893
|
+
cli.dim('4. Memory sync (automatic):');
|
|
1894
|
+
cli.dim(' Learnings and research from PAI Memory System');
|
|
1895
|
+
cli.dim(' are automatically synced on session start.');
|
|
1896
|
+
cli.blank();
|
|
1897
|
+
|
|
1898
|
+
cli.info('Management Commands:');
|
|
1899
|
+
cli.dim(' View logs: bun run server-cli logs');
|
|
1900
|
+
cli.dim(' Restart: bun run server-cli restart');
|
|
1901
|
+
cli.dim(' Stop: bun run server-cli stop');
|
|
1902
|
+
cli.dim(' Start: bun run server-cli start');
|
|
1903
|
+
cli.dim(' Status: bun run server-cli status');
|
|
1904
|
+
} else if (this.state.installType === 'remote') {
|
|
1905
|
+
cli.dim('4. Memory sync (automatic):');
|
|
1906
|
+
cli.dim(' Learnings and research from PAI Memory System');
|
|
1907
|
+
cli.dim(' are automatically synced on session start.');
|
|
1908
|
+
cli.blank();
|
|
1909
|
+
|
|
1910
|
+
cli.info('Management Commands (remote):');
|
|
1911
|
+
cli.dim(' Status: bun run knowledge-cli.ts status');
|
|
1912
|
+
cli.dim(' Health: bun run knowledge-cli.ts health');
|
|
1913
|
+
cli.dim(' List profiles: bun run knowledge-cli.ts list_profiles');
|
|
1914
|
+
cli.blank();
|
|
1915
|
+
cli.info('Note: Remote server must be running on the remote host.');
|
|
1916
|
+
cli.dim(' Use server management commands on the remote host.');
|
|
1917
|
+
} else {
|
|
1918
|
+
// Hybrid
|
|
1919
|
+
cli.dim('4. Memory sync (automatic):');
|
|
1920
|
+
cli.dim(' Learnings and research from PAI Memory System');
|
|
1921
|
+
cli.dim(' are automatically synced on session start.');
|
|
1922
|
+
cli.blank();
|
|
1923
|
+
|
|
1924
|
+
cli.info('Local Server Management (manual):');
|
|
1925
|
+
cli.dim(' Start: bun run server-cli start');
|
|
1926
|
+
cli.dim(' Stop: bun run server-cli stop');
|
|
1927
|
+
cli.dim(' Status: bun run server-cli status');
|
|
1928
|
+
cli.blank();
|
|
1929
|
+
|
|
1930
|
+
cli.info('Knowledge CLI (works with any profile):');
|
|
1931
|
+
cli.dim(' Status (default): bun run knowledge-cli.ts status');
|
|
1932
|
+
cli.dim(` Status (local): bun run knowledge-cli.ts status --profile local`);
|
|
1933
|
+
cli.dim(` Status (remote): bun run knowledge-cli.ts status --profile remote`);
|
|
1934
|
+
cli.blank();
|
|
1935
|
+
cli.info(`Current default profile: ${this.state.defaultProfile}`);
|
|
1936
|
+
cli.dim('Switch profiles using --profile flag or MADEINOZ_KNOWLEDGE_PROFILE env var');
|
|
1937
|
+
}
|
|
1938
|
+
cli.blank();
|
|
1939
|
+
|
|
1940
|
+
cli.success('Installation complete!');
|
|
1941
|
+
}
|
|
1942
|
+
|
|
1943
|
+
/**
|
|
1944
|
+
* Run the full installation
|
|
1945
|
+
*/
|
|
1946
|
+
async run(): Promise<void> {
|
|
1947
|
+
cli.clear();
|
|
1948
|
+
|
|
1949
|
+
const modeLabel = isUpdateMode ? 'UPDATE' : isNonInteractive ? 'NON-INTERACTIVE' : 'v2.1.0';
|
|
1950
|
+
cli.header(`Madeinoz Knowledge System Installation (${modeLabel})`);
|
|
1951
|
+
cli.blank();
|
|
1952
|
+
|
|
1953
|
+
// In non-interactive mode, always read existing config first
|
|
1954
|
+
if (isNonInteractive) {
|
|
1955
|
+
cli.info('Non-interactive mode: Using defaults and existing configuration.');
|
|
1956
|
+
cli.blank();
|
|
1957
|
+
await this.readPAIConfig();
|
|
1958
|
+
|
|
1959
|
+
if (this.state.llmProvider) cli.dim(` LLM Provider: ${this.state.llmProvider}`);
|
|
1960
|
+
if (this.state.modelName) cli.dim(` Model: ${this.state.modelName}`);
|
|
1961
|
+
if (this.state.apiKeys.OPENAI_API_KEY)
|
|
1962
|
+
cli.dim(` OpenAI API key: ****${this.state.apiKeys.OPENAI_API_KEY.slice(-4)}`);
|
|
1963
|
+
cli.blank();
|
|
1964
|
+
}
|
|
1965
|
+
|
|
1966
|
+
if (isUpdateMode) {
|
|
1967
|
+
cli.info('Update mode: Preserving existing configuration where possible.');
|
|
1968
|
+
cli.blank();
|
|
1969
|
+
cli.dim(' - Will use existing API keys as defaults');
|
|
1970
|
+
cli.dim(' - Will preserve LLM provider settings');
|
|
1971
|
+
cli.dim(' - Only updates infrastructure files');
|
|
1972
|
+
cli.blank();
|
|
1973
|
+
|
|
1974
|
+
// Check for existing installation
|
|
1975
|
+
if (!this.configLoader.envExists()) {
|
|
1976
|
+
cli.error('Update mode requires an existing installation.');
|
|
1977
|
+
cli.dim('Run without --update for a fresh install.');
|
|
1978
|
+
process.exit(1);
|
|
1979
|
+
}
|
|
1980
|
+
|
|
1981
|
+
// Read existing config first (if not already read in non-interactive mode)
|
|
1982
|
+
if (!isNonInteractive) {
|
|
1983
|
+
await this.readPAIConfig();
|
|
1984
|
+
|
|
1985
|
+
cli.info('Found existing configuration:');
|
|
1986
|
+
if (this.state.llmProvider) cli.dim(` LLM Provider: ${this.state.llmProvider}`);
|
|
1987
|
+
if (this.state.modelName) cli.dim(` Model: ${this.state.modelName}`);
|
|
1988
|
+
if (this.state.apiKeys.OPENAI_API_KEY)
|
|
1989
|
+
cli.dim(` OpenAI API key: ****${this.state.apiKeys.OPENAI_API_KEY.slice(-4)}`);
|
|
1990
|
+
cli.blank();
|
|
1991
|
+
}
|
|
1992
|
+
|
|
1993
|
+
const proceed = await confirmWithDefault('Proceed with update?', true);
|
|
1994
|
+
|
|
1995
|
+
if (!proceed) {
|
|
1996
|
+
cli.info('Update cancelled.');
|
|
1997
|
+
process.exit(0);
|
|
1998
|
+
}
|
|
1999
|
+
} else if (!isNonInteractive) {
|
|
2000
|
+
cli.info('This script will install and configure the Madeinoz Knowledge System.');
|
|
2001
|
+
cli.blank();
|
|
2002
|
+
cli.info('Prerequisites:');
|
|
2003
|
+
cli.dim(' - Bun runtime (required for all installations)');
|
|
2004
|
+
cli.dim(' - At least one LLM provider API key');
|
|
2005
|
+
cli.dim(' - Podman/Docker (for local installations only)');
|
|
2006
|
+
cli.blank();
|
|
2007
|
+
cli.info('Installation will prompt you for:');
|
|
2008
|
+
cli.dim(' - Installation type (local/remote/hybrid)');
|
|
2009
|
+
cli.dim(' - Remote server details (if applicable)');
|
|
2010
|
+
cli.blank();
|
|
2011
|
+
|
|
2012
|
+
await pressEnterToContinue();
|
|
2013
|
+
}
|
|
2014
|
+
|
|
2015
|
+
await this.verifyPrerequisites();
|
|
2016
|
+
|
|
2017
|
+
if (!isUpdateMode && !isNonInteractive) {
|
|
2018
|
+
await this.confirmDirectory();
|
|
2019
|
+
}
|
|
2020
|
+
|
|
2021
|
+
// New: Installation type selection (Step 2.5)
|
|
2022
|
+
await this.selectInstallationType();
|
|
2023
|
+
|
|
2024
|
+
await this.selectDatabaseBackend();
|
|
2025
|
+
await this.selectProvider();
|
|
2026
|
+
await this.collectAPIKeys();
|
|
2027
|
+
await this.selectModel();
|
|
2028
|
+
await this.configureConcurrency();
|
|
2029
|
+
await this.createConfiguration();
|
|
2030
|
+
|
|
2031
|
+
// New: Create connection profiles for remote/hybrid installations
|
|
2032
|
+
await this.createConnectionProfiles();
|
|
2033
|
+
|
|
2034
|
+
await this.startServices();
|
|
2035
|
+
await this.installPAISkill();
|
|
2036
|
+
await this.installMemorySyncHook();
|
|
2037
|
+
this.printSummary();
|
|
2038
|
+
}
|
|
2039
|
+
}
|
|
2040
|
+
|
|
2041
|
+
/**
|
|
2042
|
+
* Main function
|
|
2043
|
+
*/
|
|
2044
|
+
async function main() {
|
|
2045
|
+
const installer = new Installer();
|
|
2046
|
+
await installer.run();
|
|
2047
|
+
}
|
|
2048
|
+
|
|
2049
|
+
// Run main function
|
|
2050
|
+
main().catch((error) => {
|
|
2051
|
+
cli.error('Unexpected error:');
|
|
2052
|
+
console.error(error);
|
|
2053
|
+
process.exit(1);
|
|
2054
|
+
});
|