kg-mcp 0.1.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
kg_mcp/cli/setup.py ADDED
@@ -0,0 +1,1100 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ MCP-KG-Memory Setup Wizard — GEMINI ONLY (Direct + LiteLLM), numeric menus
4
+
5
+ What it does:
6
+ - Guides setup with numeric choices (no strings to type for menus)
7
+ - Supports Gemini Direct (AI Studio / Gemini API) and Gemini via LiteLLM Gateway/Proxy
8
+ - Can configure either one, or both (and pick a primary)
9
+ - Generates a .env file (safe + backward compatible: LLM_MODEL still present)
10
+ - Optional: Neo4j local via Docker Compose or remote credentials
11
+ - Optional: apply Neo4j schema (if module exists)
12
+ - Optional: configure Antigravity MCP config (~/.gemini/antigravity/mcp_config.json)
13
+ - Optional: connectivity tests
14
+
15
+ Run:
16
+ python3 kg_mcp_setup.py
17
+ """
18
+
19
+ from __future__ import annotations
20
+
21
+ import json
22
+ import os
23
+ import re
24
+ import secrets
25
+ import shutil
26
+ import subprocess
27
+ import sys
28
+ import time
29
+ from pathlib import Path
30
+ from typing import Any, Dict, List, Optional, Tuple
31
+
32
+ # -------------------------
33
+ # Dependency bootstrap
34
+ # -------------------------
35
+ def ensure(import_name: str, pip_name: Optional[str] = None) -> None:
36
+ try:
37
+ __import__(import_name)
38
+ except ImportError:
39
+ pkg = pip_name or import_name
40
+ print(f"Installing dependency: {pkg} ...")
41
+ subprocess.check_call([sys.executable, "-m", "pip", "install", "-q", pkg])
42
+
43
+
44
+ ensure("rich")
45
+ ensure("requests")
46
+
47
+ from rich.console import Console
48
+ from rich.panel import Panel
49
+ from rich.prompt import Prompt, Confirm
50
+ from rich.table import Table
51
+ from rich.markdown import Markdown
52
+ from rich.progress import Progress, SpinnerColumn, TextColumn
53
+
54
+ import requests
55
+
56
+ console = Console()
57
+
58
+
59
+ # -------------------------
60
+ # Constants (curated)
61
+ # -------------------------
62
+ # Main Gemini text models for KG/RAG workloads (from official Gemini API model list)
63
+ GEMINI_DIRECT_MODELS = [
64
+ "gemini-2.5-flash", # best price/perf default
65
+ "gemini-2.5-pro", # heavier reasoning
66
+ "gemini-2.5-flash-lite", # fastest/cost-efficient
67
+ "gemini-2.5-flash-preview-09-2025", # preview
68
+ "gemini-2.5-flash-lite-preview-09-2025",
69
+ # Specialized (you can still pick them if needed)
70
+ "gemini-2.5-flash-image",
71
+ "gemini-2.5-flash-native-audio-preview-12-2025",
72
+ ]
73
+
74
+ # If you want to allow legacy / soon-to-retire models, keep them separated and warn.
75
+ GEMINI_LEGACY_OR_RISKY = [
76
+ "gemini-2.0-flash", # example: retirement warnings exist in some Google services
77
+ "gemini-2.0-flash-lite",
78
+ ]
79
+
80
+ # For LiteLLM, Gemini models must be sent as gemini/<model>
81
+ def litellm_wrap(model: str) -> str:
82
+ if model.startswith("gemini/"):
83
+ return model
84
+ return f"gemini/{model}"
85
+
86
+
87
+ # -------------------------
88
+ # Small helpers
89
+ # -------------------------
90
+ def normalize_url(url: str) -> str:
91
+ url = url.strip()
92
+ if not url:
93
+ return url
94
+ if not re.match(r"^https?://", url):
95
+ url = "https://" + url
96
+ url = url.rstrip("/") + "/"
97
+ return url
98
+
99
+
100
+ def mask(s: str, keep: int = 4) -> str:
101
+ if not s:
102
+ return ""
103
+ if len(s) <= keep:
104
+ return "*" * len(s)
105
+ return s[:keep] + "*" * (len(s) - keep)
106
+
107
+
108
+ def choose_numeric(title: str, options: List[str], default_index: int = 1) -> int:
109
+ if default_index < 1 or default_index > len(options):
110
+ default_index = 1
111
+
112
+ table = Table(title=title, show_header=True, header_style="bold cyan")
113
+ table.add_column("#", style="bold yellow", width=4)
114
+ table.add_column("Opzione", style="green")
115
+
116
+ for i, opt in enumerate(options, start=1):
117
+ suffix = " [dim](default)[/]" if i == default_index else ""
118
+ table.add_row(str(i), opt + suffix)
119
+
120
+ console.print(table)
121
+
122
+ while True:
123
+ raw = Prompt.ask("Seleziona un numero", default=str(default_index)).strip()
124
+ if raw.isdigit():
125
+ idx = int(raw)
126
+ if 1 <= idx <= len(options):
127
+ return idx
128
+ console.print("[red]Valore non valido. Inserisci un numero presente in lista.[/]")
129
+
130
+
131
+ def prompt_required(label: str, default: Optional[str] = None) -> str:
132
+ while True:
133
+ v = Prompt.ask(label, default=default or "").strip()
134
+ if v:
135
+ return v
136
+ console.print("[red]Campo obbligatorio.[/]")
137
+
138
+
139
+ def prompt_secret_required(label: str, allow_empty: bool = False) -> str:
140
+ while True:
141
+ v = Prompt.ask(label, password=True, default="").strip()
142
+ if v or allow_empty:
143
+ return v
144
+ console.print("[red]Campo obbligatorio.[/]")
145
+
146
+
147
+ def backup_file(path: Path) -> Optional[Path]:
148
+ if not path.exists():
149
+ return None
150
+ ts = time.strftime("%Y%m%d-%H%M%S")
151
+ bak = path.with_suffix(path.suffix + f".bak-{ts}")
152
+ shutil.copy2(path, bak)
153
+ return bak
154
+
155
+
156
+ def safe_write_text(path: Path, content: str) -> None:
157
+ path.parent.mkdir(parents=True, exist_ok=True)
158
+ path.write_text(content, encoding="utf-8")
159
+
160
+
161
+ def run_cmd(cmd: List[str], cwd: Optional[Path] = None, env: Optional[Dict[str, str]] = None, timeout: int = 30) -> subprocess.CompletedProcess:
162
+ return subprocess.run(
163
+ cmd,
164
+ cwd=str(cwd) if cwd else None,
165
+ env=env,
166
+ capture_output=True,
167
+ text=True,
168
+ timeout=timeout,
169
+ )
170
+
171
+
172
+ def docker_available() -> Tuple[bool, bool]:
173
+ """Returns (installed, daemon_running)."""
174
+ try:
175
+ r = run_cmd(["docker", "--version"], timeout=5)
176
+ installed = (r.returncode == 0)
177
+ except Exception:
178
+ installed = False
179
+
180
+ if not installed:
181
+ return False, False
182
+
183
+ try:
184
+ r = run_cmd(["docker", "info"], timeout=10)
185
+ running = (r.returncode == 0)
186
+ except Exception:
187
+ running = False
188
+
189
+ return True, running
190
+
191
+
192
+ def http_get(url: str, headers: Dict[str, str], timeout: int = 20) -> Tuple[bool, Any, str]:
193
+ try:
194
+ r = requests.get(url, headers=headers, timeout=timeout)
195
+ if 200 <= r.status_code < 300:
196
+ try:
197
+ return True, r.json(), ""
198
+ except Exception:
199
+ return True, r.text, ""
200
+ return False, None, f"HTTP {r.status_code}: {r.text[:250]}"
201
+ except Exception as e:
202
+ return False, None, str(e)
203
+
204
+
205
+ def http_post(url: str, headers: Dict[str, str], payload: Dict[str, Any], timeout: int = 30) -> Tuple[bool, Any, str]:
206
+ try:
207
+ r = requests.post(url, headers=headers, json=payload, timeout=timeout)
208
+ if 200 <= r.status_code < 300:
209
+ try:
210
+ return True, r.json(), ""
211
+ except Exception:
212
+ return True, r.text, ""
213
+ return False, None, f"HTTP {r.status_code}: {r.text[:250]}"
214
+ except Exception as e:
215
+ return False, None, str(e)
216
+
217
+
218
+ def find_project_root() -> Path:
219
+ """
220
+ Heuristics:
221
+ - If we see server/pyproject.toml -> current is root
222
+ - If current is server/ with pyproject.toml -> parent is root
223
+ - Else walk parents; else fallback ~/.kg-mcp
224
+ """
225
+ cur = Path.cwd()
226
+
227
+ if (cur / "server" / "pyproject.toml").exists():
228
+ return cur
229
+ if (cur / "pyproject.toml").exists() and cur.name == "server":
230
+ return cur.parent
231
+
232
+ for p in cur.parents:
233
+ if (p / "server" / "pyproject.toml").exists():
234
+ return p
235
+ if (p / "docker-compose.yml").exists() and (p / "server").exists():
236
+ return p
237
+
238
+ home = Path.home() / ".kg-mcp"
239
+ home.mkdir(parents=True, exist_ok=True)
240
+ return home
241
+
242
+
243
+ # -------------------------
244
+ # Wizard
245
+ # -------------------------
246
+ class SetupWizard:
247
+ def __init__(self) -> None:
248
+ self.project_root = find_project_root()
249
+ self.env_path = self.project_root / ".env"
250
+ self.config: Dict[str, str] = {}
251
+
252
+ def run(self) -> None:
253
+ self._welcome()
254
+ self._step_llm()
255
+ self._step_neo4j()
256
+ self._step_security()
257
+ self._write_env()
258
+ self._optional_start_neo4j()
259
+ self._optional_apply_schema()
260
+ self._optional_antigravity()
261
+ self._summary()
262
+
263
+ # -------------------------
264
+ # Step 0: Welcome
265
+ # -------------------------
266
+ def _welcome(self) -> None:
267
+ msg = f"""
268
+ # 🧠 MCP-KG-Memory Setup Wizard — Gemini only (Direct + LiteLLM)
269
+
270
+ Questo wizard crea una configurazione facile da usare.
271
+
272
+ **Supporta:**
273
+ - ✅ Gemini Direct (AI Studio key)
274
+ - ✅ Gemini via LiteLLM Gateway/Proxy (model prefix automatico)
275
+
276
+ **Output:**
277
+ - `.env` in: `{self.env_path}`
278
+
279
+ Andiamo.
280
+ """.strip()
281
+ console.print(Panel(Markdown(msg), border_style="green"))
282
+ console.print()
283
+
284
+ # -------------------------
285
+ # Step 1: LLM
286
+ # -------------------------
287
+ def _step_llm(self) -> None:
288
+ console.print(Panel("[bold]Step 1: LLM (Gemini)[/]", border_style="blue"))
289
+
290
+ mode_opts = [
291
+ "Solo Gemini Direct (AI Studio)",
292
+ "Solo LiteLLM Gateway/Proxy (Gemini routed)",
293
+ "Configura ENTRAMBI (e scegli primario)",
294
+ ]
295
+ mode = choose_numeric("Che modalità vuoi configurare?", mode_opts, default_index=1)
296
+
297
+ if mode == 1:
298
+ self._setup_gemini_direct()
299
+ self.config["LLM_MODE"] = "gemini_direct"
300
+ self.config["LLM_PROVIDER"] = "gemini"
301
+ self.config["LLM_MODEL"] = self.config["GEMINI_MODEL"]
302
+ elif mode == 2:
303
+ self._setup_litellm_gemini()
304
+ self.config["LLM_MODE"] = "litellm"
305
+ self.config["LLM_PROVIDER"] = "litellm"
306
+ self.config["LLM_MODEL"] = self.config["LITELLM_MODEL"]
307
+ else:
308
+ # both
309
+ self._setup_gemini_direct()
310
+ self._setup_litellm_gemini()
311
+
312
+ primary_opts = [
313
+ "Primario: Gemini Direct",
314
+ "Primario: LiteLLM",
315
+ ]
316
+ primary = choose_numeric("Quale vuoi usare come default primario?", primary_opts, default_index=2)
317
+ if primary == 1:
318
+ self.config["LLM_MODE"] = "both"
319
+ self.config["LLM_PRIMARY"] = "gemini_direct"
320
+ self.config["LLM_PROVIDER"] = "gemini"
321
+ self.config["LLM_MODEL"] = self.config["GEMINI_MODEL"]
322
+ else:
323
+ self.config["LLM_MODE"] = "both"
324
+ self.config["LLM_PRIMARY"] = "litellm"
325
+ self.config["LLM_PROVIDER"] = "litellm"
326
+ self.config["LLM_MODEL"] = self.config["LITELLM_MODEL"]
327
+
328
+ # Optional role-based models (useful for KG workloads)
329
+ if Confirm.ask("Vuoi configurare modelli diversi per RUOLO (default/fast/reason)?", default=False):
330
+ self._setup_role_models()
331
+
332
+ console.print()
333
+
334
+ def _setup_role_models(self) -> None:
335
+ # Pick from direct list (text oriented) and keep consistent with chosen backend.
336
+ # If primary is litellm, we store litellm-wrapped names; else direct names.
337
+ primary = self.config.get("LLM_PROVIDER", "litellm")
338
+ use_litellm_names = (primary == "litellm")
339
+
340
+ def pick(title: str, default_model: str) -> str:
341
+ base = GEMINI_DIRECT_MODELS[:3] + ["(legacy/risky) " + m for m in GEMINI_LEGACY_OR_RISKY] + ["Custom..."]
342
+ idx = choose_numeric(title, base, default_index=1)
343
+ choice = base[idx - 1]
344
+ if choice == "Custom...":
345
+ m = prompt_required("Inserisci model id (es: gemini-2.5-flash)")
346
+ else:
347
+ if choice.startswith("(legacy/risky) "):
348
+ m = choice.replace("(legacy/risky) ", "")
349
+ console.print("[yellow]![/] Nota: questo modello potrebbe avere retirement/deprecation in alcuni servizi. Usalo solo se necessario.")
350
+ else:
351
+ m = choice
352
+
353
+ if use_litellm_names:
354
+ return litellm_wrap(m)
355
+ return m
356
+
357
+ # Default recommendation: flash; fast: flash-lite; reason: pro
358
+ default_m = "gemini-2.5-flash"
359
+ fast_m = "gemini-2.5-flash-lite"
360
+ reason_m = "gemini-2.5-pro"
361
+
362
+ self.config["KG_MODEL_DEFAULT"] = pick("Scegli KG_MODEL_DEFAULT", default_m)
363
+ self.config["KG_MODEL_FAST"] = pick("Scegli KG_MODEL_FAST (operazioni veloci/high throughput)", fast_m)
364
+ self.config["KG_MODEL_REASON"] = pick("Scegli KG_MODEL_REASON (reasoning/diagnostica)", reason_m)
365
+
366
+ def _setup_gemini_direct(self) -> None:
367
+ console.print(Panel("[bold]Gemini Direct[/]", border_style="cyan"))
368
+
369
+ api_key = prompt_secret_required("GEMINI_API_KEY (da AI Studio)", allow_empty=False)
370
+ self.config["GEMINI_API_KEY"] = api_key
371
+ self.config["GEMINI_BASE_URL"] = "https://generativelanguage.googleapis.com/"
372
+
373
+ model_opts = [
374
+ "Scegli da lista consigliata (2.5 Flash/Pro/Flash-Lite)",
375
+ "Mostrami la lista LIVE dei modelli disponibili (richiede key)",
376
+ "Inserisci model id manualmente",
377
+ ]
378
+ pick_mode = choose_numeric("Come vuoi scegliere il modello?", model_opts, default_index=1)
379
+
380
+ if pick_mode == 1:
381
+ self.config["GEMINI_MODEL"] = self._pick_from_curated_direct()
382
+ elif pick_mode == 2:
383
+ live = self._gemini_list_models(api_key)
384
+ if live:
385
+ self.config["GEMINI_MODEL"] = self._pick_from_list("Scegli un modello (LIVE)", live, default="gemini-2.5-flash")
386
+ else:
387
+ console.print("[yellow]![/] Non sono riuscito a ottenere la lista LIVE. Uso lista consigliata.")
388
+ self.config["GEMINI_MODEL"] = self._pick_from_curated_direct()
389
+ else:
390
+ self.config["GEMINI_MODEL"] = prompt_required("Model id (es: gemini-2.5-flash)")
391
+
392
+ # Optional connectivity test
393
+ if Confirm.ask("Vuoi testare ora Gemini Direct (generateContent)?", default=True):
394
+ self._test_gemini_direct(api_key, self.config["GEMINI_MODEL"])
395
+
396
+ def _pick_from_curated_direct(self) -> str:
397
+ options = (
398
+ ["gemini-2.5-flash (default consigliato)"]
399
+ + ["gemini-2.5-pro"]
400
+ + ["gemini-2.5-flash-lite"]
401
+ + ["Altri (preview/specializzati)..."]
402
+ + ["Legacy/risky (2.0)..."]
403
+ + ["Custom..."]
404
+ )
405
+ idx = choose_numeric("Seleziona categoria modello", options, default_index=1)
406
+
407
+ if idx == 1:
408
+ return "gemini-2.5-flash"
409
+ if idx == 2:
410
+ return "gemini-2.5-pro"
411
+ if idx == 3:
412
+ return "gemini-2.5-flash-lite"
413
+ if idx == 4:
414
+ return self._pick_from_list("Scegli (preview/specializzati)", GEMINI_DIRECT_MODELS[3:], default=GEMINI_DIRECT_MODELS[3])
415
+ if idx == 5:
416
+ console.print("[yellow]![/] Warning: alcuni servizi indicano retirement per 2.0 Flash/Flash-Lite entro il 2026. Usa 2.5 Flash-Lite come sostituto se puoi.")
417
+ return self._pick_from_list("Scegli (legacy/risky)", GEMINI_LEGACY_OR_RISKY, default=GEMINI_LEGACY_OR_RISKY[0])
418
+ return prompt_required("Model id custom (es: gemini-2.5-flash)")
419
+
420
+ def _gemini_list_models(self, api_key: str) -> List[str]:
421
+ # Gemini REST: GET v1beta/models?key=...
422
+ url = f"https://generativelanguage.googleapis.com/v1beta/models?key={api_key}"
423
+ ok, data, err = http_get(url, headers={"Content-Type": "application/json"})
424
+ if not ok:
425
+ console.print(f"[yellow]![/] GET models fallito: {err}")
426
+ return []
427
+ try:
428
+ models = data.get("models", [])
429
+ names: List[str] = []
430
+ for m in models:
431
+ name = m.get("name", "") # "models/gemini-2.5-flash"
432
+ if name.startswith("models/"):
433
+ name = name[len("models/") :]
434
+ if name:
435
+ names.append(name)
436
+ # De-dup + stable ordering
437
+ names = sorted(set(names))
438
+ # prefer gemini-* first
439
+ names = sorted(names, key=lambda s: (0 if s.startswith("gemini-") else 1, s))
440
+ return names
441
+ except Exception:
442
+ console.print("[yellow]![/] Risposta ricevuta ma non parsabile come lista modelli.")
443
+ return []
444
+
445
+ def _test_gemini_direct(self, api_key: str, model: str) -> None:
446
+ # POST generateContent
447
+ url = f"https://generativelanguage.googleapis.com/v1beta/models/{model}:generateContent?key={api_key}"
448
+ payload = {"contents": [{"parts": [{"text": "ping (rispondi solo con 'pong')"}]}]}
449
+ with Progress(SpinnerColumn(), TextColumn("{task.description}"), console=console) as p:
450
+ p.add_task(description="Test Gemini Direct...", total=None)
451
+ ok, data, err = http_post(url, headers={"Content-Type": "application/json"}, payload=payload, timeout=30)
452
+
453
+ if ok:
454
+ console.print("[green]✓[/] Gemini Direct OK.")
455
+ try:
456
+ txt = data["candidates"][0]["content"]["parts"][0]["text"]
457
+ console.print(f" Reply: {txt!r}")
458
+ except Exception:
459
+ console.print(" (OK ma risposta inattesa)")
460
+ else:
461
+ console.print(f"[yellow]![/] Test fallito: {err}")
462
+
463
+ def _setup_litellm_gemini(self) -> None:
464
+ console.print(Panel("[bold]LiteLLM Gateway/Proxy (Gemini)[/]", border_style="cyan"))
465
+
466
+ base_url = prompt_required("LITELLM_BASE_URL (es: https://litellm.mycompany.com/ oppure http://localhost:4000/)")
467
+ base_url = normalize_url(base_url)
468
+ api_key = prompt_secret_required("LITELLM_API_KEY", allow_empty=False)
469
+
470
+ self.config["LITELLM_BASE_URL"] = base_url
471
+ self.config["LITELLM_API_KEY"] = api_key
472
+
473
+ # Model selection
474
+ model_opts = [
475
+ "Scegli da lista consigliata (Gemini 2.5) [prefix automatico gemini/...]",
476
+ "Prova a leggere la lista modelli dal Gateway (GET /v1/models) [se supportato]",
477
+ "Inserisci model id manualmente",
478
+ ]
479
+ pick_mode = choose_numeric("Come vuoi scegliere il modello (LiteLLM)?", model_opts, default_index=1)
480
+
481
+ if pick_mode == 1:
482
+ m = self._pick_from_curated_direct()
483
+ self.config["LITELLM_MODEL"] = litellm_wrap(m)
484
+ elif pick_mode == 2:
485
+ live = self._litellm_list_models(base_url, api_key)
486
+ if live:
487
+ picked = self._pick_from_list("Scegli un modello (Gateway)", live, default=self._best_default_from_gateway(live))
488
+ self.config["LITELLM_MODEL"] = picked
489
+ if not picked.startswith("gemini/"):
490
+ console.print("[yellow]![/] Nota: il gateway ha restituito un id senza prefisso 'gemini/'. Lo userò così com'è.")
491
+ else:
492
+ console.print("[yellow]![/] Non sono riuscito a ottenere la lista dal gateway. Uso lista consigliata.")
493
+ m = self._pick_from_curated_direct()
494
+ self.config["LITELLM_MODEL"] = litellm_wrap(m)
495
+ else:
496
+ raw = prompt_required("Model id (LiteLLM). Esempio: gemini/gemini-2.5-flash")
497
+ # auto-fix if user pasted direct model
498
+ self.config["LITELLM_MODEL"] = litellm_wrap(raw) if raw.startswith("gemini-") else raw
499
+
500
+ # Optional connectivity test
501
+ if Confirm.ask("Vuoi testare ora LiteLLM (POST /v1/chat/completions)?", default=True):
502
+ self._test_litellm(base_url, api_key, self.config["LITELLM_MODEL"])
503
+
504
+ def _litellm_list_models(self, base_url: str, api_key: str) -> List[str]:
505
+ # common: /v1/models (OpenAI compatible)
506
+ url = base_url.rstrip("/") + "/v1/models"
507
+ ok, data, err = http_get(url, headers={"Authorization": f"Bearer {api_key}"}, timeout=20)
508
+ if not ok:
509
+ console.print(f"[yellow]![/] GET /v1/models fallito: {err}")
510
+ return []
511
+ try:
512
+ items = data.get("data", [])
513
+ ids = [it.get("id", "") for it in items if isinstance(it, dict)]
514
+ ids = [x for x in ids if x]
515
+ ids = sorted(set(ids))
516
+ return ids
517
+ except Exception:
518
+ console.print("[yellow]![/] Risposta /v1/models ricevuta ma non parsabile.")
519
+ return []
520
+
521
+ def _best_default_from_gateway(self, ids: List[str]) -> str:
522
+ # prefer gemini/gemini-2.5-flash if present
523
+ preferred = [
524
+ "gemini/gemini-2.5-flash",
525
+ "gemini/gemini-2.5-flash-lite",
526
+ "gemini/gemini-2.5-pro",
527
+ ]
528
+ for p in preferred:
529
+ if p in ids:
530
+ return p
531
+ return ids[0] if ids else "gemini/gemini-2.5-flash"
532
+
533
+ def _test_litellm(self, base_url: str, api_key: str, model: str) -> None:
534
+ url = base_url.rstrip("/") + "/v1/chat/completions"
535
+ payload = {
536
+ "model": model,
537
+ "messages": [{"role": "user", "content": "ping (rispondi solo con 'pong')"}],
538
+ "max_tokens": 16,
539
+ }
540
+ with Progress(SpinnerColumn(), TextColumn("{task.description}"), console=console) as p:
541
+ p.add_task(description="Test LiteLLM...", total=None)
542
+ ok, data, err = http_post(
543
+ url,
544
+ headers={"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"},
545
+ payload=payload,
546
+ timeout=30,
547
+ )
548
+
549
+ if ok:
550
+ console.print("[green]✓[/] LiteLLM OK.")
551
+ try:
552
+ txt = data["choices"][0]["message"]["content"]
553
+ console.print(f" Reply: {txt!r}")
554
+ except Exception:
555
+ console.print(" (OK ma risposta inattesa)")
556
+ else:
557
+ console.print(f"[yellow]![/] Test fallito: {err}")
558
+
559
+ def _pick_from_list(self, title: str, items: List[str], default: str) -> str:
560
+ # build numeric menu for up to N entries, else provide pagination-ish
561
+ if not items:
562
+ return default
563
+
564
+ # Put default first if exists
565
+ items_sorted = items[:]
566
+ if default in items_sorted:
567
+ items_sorted.remove(default)
568
+ items_sorted.insert(0, default)
569
+
570
+ # If huge list, show first 40 + custom
571
+ max_show = 40
572
+ shown = items_sorted[:max_show]
573
+ options = shown + ["Custom..."]
574
+ idx = choose_numeric(title, options, default_index=1)
575
+ if idx == len(options):
576
+ return prompt_required("Model id custom")
577
+ return options[idx - 1]
578
+
579
+ # -------------------------
580
+ # Step 2: Neo4j
581
+ # -------------------------
582
+ def _step_neo4j(self) -> None:
583
+ console.print(Panel("[bold]Step 2: Neo4j[/]", border_style="blue"))
584
+
585
+ opts = [
586
+ "Docker locale (compose auto se manca)",
587
+ "Neo4j remoto (inserisco URI/user/pass)",
588
+ "Skip Neo4j (lo configuro dopo)",
589
+ ]
590
+ c = choose_numeric("Come vuoi configurare Neo4j?", opts, default_index=1)
591
+
592
+ if c == 3:
593
+ self.config["NEO4J_CONFIGURED"] = "0"
594
+ console.print("[yellow]Neo4j skipped.[/]\n")
595
+ return
596
+
597
+ self.config["NEO4J_CONFIGURED"] = "1"
598
+
599
+ if c == 1:
600
+ self.config["NEO4J_URI"] = "bolt://localhost:7687"
601
+ self.config["NEO4J_USER"] = "neo4j"
602
+ pw = Prompt.ask("Password Neo4j (invio = genera)", password=True, default="").strip()
603
+ if not pw:
604
+ pw = secrets.token_urlsafe(18)
605
+ console.print(f"[green]✓[/] Password Neo4j generata: [yellow]{pw}[/]")
606
+ self.config["NEO4J_PASSWORD"] = pw
607
+
608
+ installed, running = docker_available()
609
+ self.config["NEO4J_DOCKER_ENABLED"] = "1" if installed else "0"
610
+ self.config["NEO4J_DOCKER_AUTOSTART"] = "1" if (installed and running and Confirm.ask("Avvio Neo4j ora?", default=True)) else "0"
611
+ else:
612
+ self.config["NEO4J_URI"] = prompt_required("NEO4J_URI (bolt://host:7687)", default="bolt://your-neo4j-host:7687")
613
+ self.config["NEO4J_USER"] = Prompt.ask("NEO4J_USER", default="neo4j").strip() or "neo4j"
614
+ self.config["NEO4J_PASSWORD"] = prompt_secret_required("NEO4J_PASSWORD", allow_empty=False)
615
+ self.config["NEO4J_DOCKER_ENABLED"] = "0"
616
+ self.config["NEO4J_DOCKER_AUTOSTART"] = "0"
617
+
618
+ # Always apply schema (best practice)
619
+ self.config["NEO4J_APPLY_SCHEMA"] = "1"
620
+ console.print("[dim]Schema (constraints/indexes) will be applied automatically.[/]")
621
+ console.print()
622
+
623
+ # -------------------------
624
+ # Step 3: Security
625
+ # -------------------------
626
+ def _step_security(self) -> None:
627
+ console.print(Panel("[bold]Step 3: Security[/]", border_style="blue"))
628
+
629
+ token = Prompt.ask("MCP auth token (invio = genera)", password=True, default="").strip()
630
+ if not token:
631
+ token = f"kg-mcp-{secrets.token_urlsafe(24)}"
632
+ console.print("[green]✓[/] Token generato.")
633
+ self.config["KG_MCP_TOKEN"] = token
634
+
635
+ # safe defaults
636
+ self.config["MCP_HOST"] = "127.0.0.1"
637
+ self.config["MCP_PORT"] = "8000"
638
+ self.config["LOG_LEVEL"] = "INFO"
639
+ self.config["KG_ALLOWED_ORIGINS"] = "localhost,127.0.0.1"
640
+ console.print()
641
+
642
+ # -------------------------
643
+ # Write .env
644
+ # -------------------------
645
+ def _write_env(self) -> None:
646
+ console.print(Panel("[bold]Step 4: Genero .env[/]", border_style="blue"))
647
+
648
+ if self.env_path.exists():
649
+ bak = backup_file(self.env_path)
650
+ console.print(f"[yellow]![/] Esiste già .env → backup creato: {bak}")
651
+
652
+ # Stable ordering (grouped)
653
+ groups: List[Tuple[str, List[str]]] = [
654
+ ("# --- MCP ---", ["MCP_HOST", "MCP_PORT", "LOG_LEVEL", "KG_MCP_TOKEN", "KG_ALLOWED_ORIGINS"]),
655
+ ("# --- LLM (primary) ---", ["LLM_MODE", "LLM_PRIMARY", "LLM_PROVIDER", "LLM_MODEL"]),
656
+ ("# --- Gemini Direct ---", ["GEMINI_API_KEY", "GEMINI_MODEL", "GEMINI_BASE_URL"]),
657
+ ("# --- LiteLLM (Gemini) ---", ["LITELLM_BASE_URL", "LITELLM_API_KEY", "LITELLM_MODEL"]),
658
+ ("# --- KG Role Models (optional) ---", ["KG_MODEL_DEFAULT", "KG_MODEL_FAST", "KG_MODEL_REASON"]),
659
+ ("# --- Neo4j ---", ["NEO4J_CONFIGURED", "NEO4J_URI", "NEO4J_USER", "NEO4J_PASSWORD", "NEO4J_DOCKER_ENABLED", "NEO4J_DOCKER_AUTOSTART", "NEO4J_APPLY_SCHEMA"]),
660
+ ]
661
+
662
+ lines: List[str] = ["# Generated by MCP-KG-Memory setup wizard", ""]
663
+ for header, keys in groups:
664
+ present = [k for k in keys if k in self.config and self.config[k] != ""]
665
+ if not present:
666
+ continue
667
+ lines.append(header)
668
+ for k in keys:
669
+ if k in self.config and self.config[k] != "":
670
+ lines.append(f"{k}={self.config[k]}")
671
+ lines.append("")
672
+
673
+ safe_write_text(self.env_path, "\n".join(lines).rstrip() + "\n")
674
+ console.print(f"[green]✓[/] Scritto: {self.env_path}")
675
+ console.print()
676
+
677
+ # -------------------------
678
+ # Optional: start Neo4j via Docker
679
+ # -------------------------
680
+ def _optional_start_neo4j(self) -> None:
681
+ if self.config.get("NEO4J_DOCKER_AUTOSTART") != "1":
682
+ return
683
+
684
+ console.print(Panel("[bold]Step 5: Avvio Neo4j (Docker)[/]", border_style="blue"))
685
+
686
+ installed, running = docker_available()
687
+ if not installed:
688
+ console.print("[red]✗ Docker non installato.[/]")
689
+ return
690
+ if not running:
691
+ console.print("[red]✗ Docker daemon non in esecuzione. Avvia Docker Desktop e rilancia.[/]")
692
+ return
693
+
694
+ # Check for port conflicts and offer to cleanup
695
+ if self._check_port_conflict(7687) or self._check_port_conflict(7474):
696
+ console.print("[yellow]![/] Le porte Neo4j (7474/7687) sono già in uso.")
697
+ # Try to find and stop conflicting containers
698
+ conflicting = self._find_neo4j_containers()
699
+ if conflicting:
700
+ console.print(f"[dim]Container esistenti: {', '.join(conflicting)}[/]")
701
+ console.print("[dim]Nota: verranno rimossi anche i volumi per evitare conflitti password.[/]")
702
+ if Confirm.ask("Fermo e rimuovo i container esistenti (e i volumi)?", default=True):
703
+ for c in conflicting:
704
+ run_cmd(["docker", "stop", c], timeout=30)
705
+ run_cmd(["docker", "rm", "-v", c], timeout=10) # -v removes volumes too
706
+ # Also try to remove named volumes from our compose
707
+ run_cmd(["docker", "volume", "rm", "kg-mcp_neo4j_data", "kg-mcp_neo4j_logs"], timeout=10)
708
+ console.print("[green]✓[/] Container e volumi rimossi.")
709
+ time.sleep(2)
710
+ else:
711
+ console.print("[yellow]Skipping Neo4j start - risolvere conflitto manualmente.[/]")
712
+ return
713
+
714
+ compose_path = self.project_root / "docker-compose.yml"
715
+ if not compose_path.exists():
716
+ self._write_minimal_compose(compose_path)
717
+
718
+ # Ensure password is set in compose or via env
719
+ env = os.environ.copy()
720
+ env["NEO4J_AUTH"] = f"neo4j/{self.config.get('NEO4J_PASSWORD','')}"
721
+ cmd = ["docker", "compose", "up", "-d", "neo4j"]
722
+
723
+ with Progress(SpinnerColumn(), TextColumn("{task.description}"), console=console) as p:
724
+ p.add_task(description="Avvio container neo4j...", total=None)
725
+ r = run_cmd(cmd, cwd=self.project_root, env=env, timeout=120)
726
+
727
+ if r.returncode == 0:
728
+ console.print("[green]✓[/] Neo4j avviato.")
729
+ console.print("[dim]Attendo 10s per startup...[/]")
730
+ time.sleep(10)
731
+ console.print("[dim]Neo4j Browser: http://localhost:7474[/]")
732
+ else:
733
+ console.print("[red]✗ Avvio fallito[/]")
734
+ console.print(r.stderr[:400])
735
+
736
+ console.print()
737
+
738
+ def _check_port_conflict(self, port: int) -> bool:
739
+ """Check if a port is already in use."""
740
+ import socket
741
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
742
+ sock.settimeout(1)
743
+ result = sock.connect_ex(('localhost', port))
744
+ sock.close()
745
+ return result == 0
746
+
747
+ def _find_neo4j_containers(self) -> List[str]:
748
+ """Find any running Neo4j-related containers."""
749
+ try:
750
+ r = run_cmd(["docker", "ps", "--format", "{{.Names}}"], timeout=10)
751
+ if r.returncode == 0 and r.stdout:
752
+ containers = r.stdout.strip().split('\n')
753
+ return [c for c in containers if 'neo4j' in c.lower() or 'kg-' in c.lower()]
754
+ except Exception:
755
+ pass
756
+ return []
757
+
758
+ def _check_neo4j_volumes(self) -> List[str]:
759
+ """Check if Neo4j volumes already exist."""
760
+ try:
761
+ r = run_cmd(["docker", "volume", "ls", "--format", "{{.Name}}"], timeout=10)
762
+ if r.returncode == 0 and r.stdout:
763
+ volumes = r.stdout.strip().split('\n')
764
+ return [v for v in volumes if 'neo4j' in v.lower() or 'kg-mcp' in v.lower()]
765
+ except Exception:
766
+ pass
767
+ return []
768
+
769
+ def _write_minimal_compose(self, path: Path) -> None:
770
+ pw = self.config.get("NEO4J_PASSWORD", "neo4j")
771
+ # Note: removed 'version' attribute as it's obsolete in modern docker compose
772
+ content = f"""services:
773
+ neo4j:
774
+ image: neo4j:5
775
+ container_name: kg-neo4j
776
+ restart: always
777
+ environment:
778
+ - NEO4J_AUTH=neo4j/{pw}
779
+ - NEO4J_server_memory_pagecache_size=512M
780
+ - NEO4J_server_memory_heap_initial__size=512M
781
+ - NEO4J_server_memory_heap_max__size=1024M
782
+ ports:
783
+ - "7474:7474"
784
+ - "7687:7687"
785
+ volumes:
786
+ - neo4j_data:/data
787
+ - neo4j_logs:/logs
788
+ volumes:
789
+ neo4j_data:
790
+ neo4j_logs:
791
+ """
792
+ safe_write_text(path, content)
793
+ console.print(f"[green]✓[/] Creato docker-compose.yml minimale: {path}")
794
+
795
+ def _get_container_status(self, container_name: str) -> str:
796
+ """Get the status of a Docker container."""
797
+ try:
798
+ r = run_cmd(["docker", "inspect", "--format", "{{.State.Status}}", container_name], timeout=10)
799
+ if r.returncode == 0 and r.stdout:
800
+ return r.stdout.strip()
801
+ return "not_found"
802
+ except Exception:
803
+ return "unknown"
804
+
805
+ def _get_container_health(self, container_name: str) -> str:
806
+ """Get the health status of a Docker container."""
807
+ try:
808
+ r = run_cmd(["docker", "inspect", "--format", "{{if .State.Health}}{{.State.Health.Status}}{{else}}none{{end}}", container_name], timeout=10)
809
+ if r.returncode == 0 and r.stdout:
810
+ return r.stdout.strip()
811
+ return "unknown"
812
+ except Exception:
813
+ return "unknown"
814
+
815
+ def _show_docker_logs(self, container_name: str) -> None:
816
+ """Show last few lines of Docker container logs."""
817
+ console.print()
818
+ console.print(Panel(
819
+ "[bold]📋 Docker Logs (ultime 20 righe)[/]",
820
+ border_style="red"
821
+ ))
822
+ try:
823
+ r = run_cmd(["docker", "logs", "--tail", "20", container_name], timeout=10)
824
+ if r.stdout:
825
+ console.print(f"[dim]{r.stdout[:1000]}[/]")
826
+ if r.stderr:
827
+ console.print(f"[red]{r.stderr[:500]}[/]")
828
+ except Exception as e:
829
+ console.print(f"[red]Impossibile ottenere i log: {e}[/]")
830
+ console.print()
831
+
832
+ def _show_docker_troubleshooting(self) -> None:
833
+ """Show troubleshooting guide for Docker issues."""
834
+ console.print()
835
+ console.print(Panel(
836
+ "[bold red]⚠️ Troubleshooting Docker[/]\n\n"
837
+ "[bold]1. Verifica che Docker sia in esecuzione:[/]\n"
838
+ " [cyan]docker info[/]\n\n"
839
+ "[bold]2. Prova ad avviare manualmente:[/]\n"
840
+ f" [cyan]cd {self.project_root} && docker compose up -d neo4j[/]\n\n"
841
+ "[bold]3. Controlla i container:[/]\n"
842
+ " [cyan]docker ps -a | grep neo4j[/]\n\n"
843
+ "[bold]4. Vedi i log:[/]\n"
844
+ " [cyan]docker logs kg-neo4j[/]\n\n"
845
+ "[dim]Se il problema persiste, prova a riavviare Docker Desktop.[/]",
846
+ title="🔧 Come risolvere",
847
+ border_style="yellow"
848
+ ))
849
+ console.print()
850
+
851
+ # -------------------------
852
+ # Optional: apply schema
853
+ # -------------------------
854
+ def _optional_apply_schema(self) -> None:
855
+ if self.config.get("NEO4J_APPLY_SCHEMA") != "1":
856
+ return
857
+
858
+ console.print(Panel("[bold]Step 6: Apply Neo4j schema (opzionale)[/]", border_style="blue"))
859
+
860
+ # Wait for Neo4j to be ready if we just started Docker
861
+ if self.config.get("NEO4J_DOCKER_AUTOSTART") == "1":
862
+ console.print("[dim]Attendo che Neo4j sia pronto (può richiedere fino a 90s)...[/]")
863
+ neo4j_ready = False
864
+ container_status = "unknown"
865
+
866
+ for attempt in range(45): # Max 90 seconds (45 * 2s)
867
+ # Check container status AND health
868
+ container_status = self._get_container_status("kg-neo4j")
869
+ health_status = self._get_container_health("kg-neo4j")
870
+
871
+ if container_status == "not_found":
872
+ console.print("[red]✗[/] Container kg-neo4j non trovato!")
873
+ self._show_docker_troubleshooting()
874
+ break
875
+ elif container_status == "exited":
876
+ console.print("[red]✗[/] Container kg-neo4j è crashato!")
877
+ self._show_docker_logs("kg-neo4j")
878
+ break
879
+ elif container_status == "running":
880
+ # Check if healthy (Docker health check)
881
+ if health_status == "healthy":
882
+ neo4j_ready = True
883
+ console.print("[green]✓[/] Neo4j è healthy e pronto.")
884
+ break
885
+ elif health_status == "unhealthy":
886
+ console.print("[red]✗[/] Neo4j è unhealthy!")
887
+ self._show_docker_logs("kg-neo4j")
888
+ break
889
+ # If no health check or starting, try port
890
+ elif health_status in ("none", "starting"):
891
+ try:
892
+ import socket
893
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
894
+ sock.settimeout(2)
895
+ result = sock.connect_ex(('localhost', 7687))
896
+ sock.close()
897
+ if result == 0:
898
+ time.sleep(5) # Extra time for Neo4j to fully initialize
899
+ neo4j_ready = True
900
+ console.print("[green]✓[/] Neo4j è pronto.")
901
+ break
902
+ except Exception:
903
+ pass
904
+
905
+ time.sleep(2)
906
+ if attempt % 5 == 0 and attempt > 0:
907
+ status_str = f"container: {container_status}"
908
+ if health_status not in ("none", "unknown"):
909
+ status_str += f", health: {health_status}"
910
+ console.print(f"[dim] ...ancora in attesa ({attempt * 2}s) - {status_str}[/]")
911
+
912
+ if not neo4j_ready and container_status == "running":
913
+ console.print("[yellow]![/] Neo4j non risponde dopo 90s.")
914
+ self._show_docker_logs("kg-neo4j")
915
+ console.print("[yellow]Provo comunque ad applicare lo schema...[/]")
916
+
917
+ # Try running module if present
918
+ server_dir = self.project_root / "server"
919
+ cwd = server_dir if server_dir.exists() else self.project_root
920
+
921
+ env = os.environ.copy()
922
+ for k in ["NEO4J_URI", "NEO4J_USER", "NEO4J_PASSWORD"]:
923
+ if k in self.config:
924
+ env[k] = self.config[k]
925
+
926
+ # Add server/src to PYTHONPATH so kg_mcp is found
927
+ src_dir = server_dir / "src"
928
+ if src_dir.exists():
929
+ current_path = env.get("PYTHONPATH", "")
930
+ env["PYTHONPATH"] = f"{src_dir.absolute()}{os.pathsep}{current_path}"
931
+
932
+ cmd = [sys.executable, "-m", "kg_mcp.kg.apply_schema"]
933
+
934
+ with Progress(SpinnerColumn(), TextColumn("{task.description}"), console=console) as p:
935
+ p.add_task(description="Applico schema...", total=None)
936
+ try:
937
+ r = run_cmd(cmd, cwd=cwd, env=env, timeout=120)
938
+ if r.returncode == 0:
939
+ console.print("[green]✓[/] Schema applicato.")
940
+ else:
941
+ console.print("[yellow]![/] apply_schema ha restituito errori (potrebbe non essere presente nel progetto):")
942
+ console.print((r.stderr or r.stdout)[:500])
943
+ except Exception as e:
944
+ console.print("[yellow]![/] Impossibile eseguire apply_schema (modulo mancante o errore runtime).")
945
+ console.print(str(e)[:300])
946
+
947
+ console.print()
948
+
949
+ # -------------------------
950
+ # Optional: Antigravity config
951
+ # -------------------------
952
+ def _optional_antigravity(self) -> None:
953
+ if not Confirm.ask("Vuoi aggiornare Antigravity MCP config?", default=True):
954
+ return
955
+
956
+ console.print(Panel("[bold]Step 7: Antigravity MCP config[/]", border_style="blue"))
957
+
958
+ cfg_path = Path.home() / ".gemini" / "antigravity" / "mcp_config.json"
959
+ cfg_path.parent.mkdir(parents=True, exist_ok=True)
960
+
961
+ existing: Dict[str, Any] = {}
962
+ if cfg_path.exists():
963
+ try:
964
+ existing = json.loads(cfg_path.read_text(encoding="utf-8"))
965
+ except Exception:
966
+ existing = {}
967
+
968
+ # Determine python command to run kg_mcp (prefer venv if exists)
969
+ venv_python = self.project_root / "server" / ".venv" / ("Scripts" if sys.platform.startswith("win") else "bin") / ("python.exe" if sys.platform.startswith("win") else "python")
970
+ python_cmd = str(venv_python) if venv_python.exists() else sys.executable
971
+
972
+ # Build env for server execution via stdio
973
+ env = {
974
+ "LOG_LEVEL": self.config.get("LOG_LEVEL", "INFO"),
975
+ "KG_MCP_TOKEN": self.config.get("KG_MCP_TOKEN", ""),
976
+ "LLM_MODE": self.config.get("LLM_MODE", ""),
977
+ "LLM_PRIMARY": self.config.get("LLM_PRIMARY", ""),
978
+ "LLM_PROVIDER": self.config.get("LLM_PROVIDER", ""),
979
+ "LLM_MODEL": self.config.get("LLM_MODEL", ""),
980
+ }
981
+
982
+ # LLM vars
983
+ for k in [
984
+ "GEMINI_API_KEY",
985
+ "GEMINI_MODEL",
986
+ "GEMINI_BASE_URL",
987
+ "LITELLM_BASE_URL",
988
+ "LITELLM_API_KEY",
989
+ "LITELLM_MODEL",
990
+ "KG_MODEL_DEFAULT",
991
+ "KG_MODEL_FAST",
992
+ "KG_MODEL_REASON",
993
+ ]:
994
+ if k in self.config and self.config[k] != "":
995
+ env[k] = self.config[k]
996
+
997
+ # Neo4j vars
998
+ for k in ["NEO4J_URI", "NEO4J_USER", "NEO4J_PASSWORD"]:
999
+ if k in self.config and self.config[k] != "":
1000
+ env[k] = self.config[k]
1001
+
1002
+ server_cfg = {
1003
+ "command": python_cmd,
1004
+ "args": ["-m", "kg_mcp", "--transport", "stdio"],
1005
+ "env": env,
1006
+ }
1007
+
1008
+ if "mcpServers" not in existing or not isinstance(existing["mcpServers"], dict):
1009
+ existing["mcpServers"] = {}
1010
+ existing["mcpServers"]["kg-memory"] = server_cfg
1011
+
1012
+ backup_file(cfg_path)
1013
+ safe_write_text(cfg_path, json.dumps(existing, indent=2))
1014
+ console.print(f"[green]✓[/] Aggiornato: {cfg_path}")
1015
+
1016
+ console.print("\n[bold]In Antigravity:[/]")
1017
+ console.print("1) Apri sidebar Agent → MCP Servers")
1018
+ console.print("2) Manage/Refresh")
1019
+ console.print("3) Dovresti vedere 'kg-memory'\n")
1020
+
1021
+ # -------------------------
1022
+ # Summary
1023
+ # -------------------------
1024
+ def _summary(self) -> None:
1025
+ console.print(Panel("[bold green]✓ Setup completato[/]", border_style="green"))
1026
+
1027
+ table = Table(title="Riepilogo", show_header=True, header_style="bold cyan")
1028
+ table.add_column("Chiave", style="cyan")
1029
+ table.add_column("Valore", style="green")
1030
+
1031
+ # show key info
1032
+ show_keys = [
1033
+ "LLM_MODE",
1034
+ "LLM_PRIMARY",
1035
+ "LLM_PROVIDER",
1036
+ "LLM_MODEL",
1037
+ "GEMINI_MODEL",
1038
+ "LITELLM_MODEL",
1039
+ "NEO4J_URI",
1040
+ "MCP_HOST",
1041
+ "MCP_PORT",
1042
+ "LOG_LEVEL",
1043
+ "KG_MCP_TOKEN",
1044
+ "ENV_PATH",
1045
+ ]
1046
+
1047
+ temp = dict(self.config)
1048
+ temp["ENV_PATH"] = str(self.env_path)
1049
+
1050
+ for k in show_keys:
1051
+ if k not in temp or temp[k] == "":
1052
+ continue
1053
+ v = temp[k]
1054
+ if "KEY" in k or "PASSWORD" in k or "TOKEN" in k:
1055
+ v = mask(v, keep=6)
1056
+ table.add_row(k, v)
1057
+
1058
+ console.print(table)
1059
+
1060
+ # Neo4j Quick Start Tutorial
1061
+ if self.config.get("NEO4J_CONFIGURED") == "1":
1062
+ neo4j_pass = self.config.get("NEO4J_PASSWORD", "")
1063
+ console.print()
1064
+ console.print(Panel(
1065
+ "[bold cyan]📊 Neo4j Browser - Visualizza il Knowledge Graph[/]\n\n"
1066
+ f"[bold]URL:[/] [link=http://localhost:7474]http://localhost:7474[/link]\n"
1067
+ f"[bold]User:[/] neo4j\n"
1068
+ f"[bold]Password:[/] {neo4j_pass}\n\n"
1069
+ "[bold]Query per vedere il grafo:[/]\n"
1070
+ "[cyan]MATCH (n)-[r]->(m) RETURN n, r, m LIMIT 100[/]\n\n"
1071
+ "[dim]Copia la query sopra nel Neo4j Browser per visualizzare tutti i nodi e relazioni![/]",
1072
+ title="🔗 Quick Start",
1073
+ border_style="cyan"
1074
+ ))
1075
+
1076
+ # Antigravity restart reminder
1077
+ console.print()
1078
+ console.print(Panel(
1079
+ "[bold yellow]⚠️ Se Antigravity era già aperto:[/]\n\n"
1080
+ "1. Chiudi completamente Antigravity\n"
1081
+ "2. Riaprilo per caricare la nuova configurazione MCP\n\n"
1082
+ "[dim]Oppure: Agent sidebar → MCP Servers → Manage → Refresh[/]",
1083
+ title="🔄 Attiva KG-Memory",
1084
+ border_style="yellow"
1085
+ ))
1086
+
1087
+ console.print(f"\n[bold]File .env:[/] {self.env_path}\n")
1088
+
1089
+
1090
+ def main():
1091
+ """Entry point for kg-mcp-setup command."""
1092
+ try:
1093
+ SetupWizard().run()
1094
+ except KeyboardInterrupt:
1095
+ console.print("\n[yellow]Interrotto dall'utente.[/]")
1096
+ sys.exit(1)
1097
+
1098
+
1099
+ if __name__ == "__main__":
1100
+ main()