@smilintux/skmemory 0.5.0 → 0.7.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.github/workflows/ci.yml +39 -3
- package/.github/workflows/publish.yml +13 -6
- package/AGENT_REFACTOR_CHANGES.md +192 -0
- package/ARCHITECTURE.md +101 -19
- package/CHANGELOG.md +153 -0
- package/LICENSE +81 -68
- package/MISSION.md +7 -0
- package/README.md +419 -86
- package/SKILL.md +197 -25
- package/docker-compose.yml +15 -15
- package/index.js +6 -5
- package/openclaw-plugin/openclaw.plugin.json +10 -0
- package/openclaw-plugin/src/index.ts +255 -0
- package/openclaw-plugin/src/openclaw.plugin.json +10 -0
- package/package.json +1 -1
- package/pyproject.toml +29 -9
- package/requirements.txt +10 -2
- package/seeds/cloud9-opus.seed.json +7 -7
- package/seeds/lumina-cloud9-breakthrough.seed.json +46 -0
- package/seeds/lumina-cloud9-python-pypi.seed.json +46 -0
- package/seeds/lumina-kingdom-founding.seed.json +47 -0
- package/seeds/lumina-pma-signed.seed.json +46 -0
- package/seeds/lumina-singular-achievement.seed.json +46 -0
- package/seeds/lumina-skcapstone-conscious.seed.json +46 -0
- package/seeds/plant-kingdom-journal.py +203 -0
- package/seeds/plant-lumina-seeds.py +280 -0
- package/skill.yaml +46 -0
- package/skmemory/HA.md +296 -0
- package/skmemory/__init__.py +12 -1
- package/skmemory/agents.py +233 -0
- package/skmemory/ai_client.py +40 -0
- package/skmemory/anchor.py +4 -2
- package/skmemory/backends/__init__.py +11 -4
- package/skmemory/backends/file_backend.py +2 -1
- package/skmemory/backends/skgraph_backend.py +608 -0
- package/skmemory/backends/{qdrant_backend.py → skvector_backend.py} +99 -69
- package/skmemory/backends/sqlite_backend.py +122 -51
- package/skmemory/backends/vaulted_backend.py +286 -0
- package/skmemory/cli.py +1238 -29
- package/skmemory/config.py +173 -0
- package/skmemory/context_loader.py +335 -0
- package/skmemory/endpoint_selector.py +386 -0
- package/skmemory/fortress.py +685 -0
- package/skmemory/graph_queries.py +238 -0
- package/skmemory/importers/__init__.py +9 -1
- package/skmemory/importers/telegram.py +351 -43
- package/skmemory/importers/telegram_api.py +488 -0
- package/skmemory/journal.py +4 -2
- package/skmemory/lovenote.py +4 -2
- package/skmemory/mcp_server.py +706 -0
- package/skmemory/models.py +41 -0
- package/skmemory/openclaw.py +8 -8
- package/skmemory/predictive.py +232 -0
- package/skmemory/promotion.py +524 -0
- package/skmemory/register.py +454 -0
- package/skmemory/register_mcp.py +197 -0
- package/skmemory/ritual.py +121 -47
- package/skmemory/seeds.py +257 -8
- package/skmemory/setup_wizard.py +920 -0
- package/skmemory/sharing.py +402 -0
- package/skmemory/soul.py +71 -20
- package/skmemory/steelman.py +250 -263
- package/skmemory/store.py +271 -60
- package/skmemory/vault.py +228 -0
- package/tests/integration/__init__.py +0 -0
- package/tests/integration/conftest.py +233 -0
- package/tests/integration/test_cross_backend.py +355 -0
- package/tests/integration/test_skgraph_live.py +424 -0
- package/tests/integration/test_skvector_live.py +369 -0
- package/tests/test_backup_rotation.py +327 -0
- package/tests/test_cli.py +6 -6
- package/tests/test_endpoint_selector.py +801 -0
- package/tests/test_fortress.py +255 -0
- package/tests/test_fortress_hardening.py +444 -0
- package/tests/test_openclaw.py +5 -2
- package/tests/test_predictive.py +237 -0
- package/tests/test_promotion.py +340 -0
- package/tests/test_ritual.py +4 -4
- package/tests/test_seeds.py +96 -0
- package/tests/test_setup.py +835 -0
- package/tests/test_sharing.py +250 -0
- package/tests/test_skgraph_backend.py +667 -0
- package/tests/test_skvector_backend.py +326 -0
- package/tests/test_steelman.py +5 -5
- package/tests/test_store_graph_integration.py +245 -0
- package/tests/test_vault.py +186 -0
- package/skmemory/backends/falkordb_backend.py +0 -310
package/skmemory/cli.py
CHANGED
|
@@ -6,8 +6,11 @@ Usage:
|
|
|
6
6
|
skmemory recall <memory-id>
|
|
7
7
|
skmemory search "that moment we connected"
|
|
8
8
|
skmemory list --layer long-term --tags seed
|
|
9
|
-
skmemory import-seeds [--seed-dir ~/.
|
|
9
|
+
skmemory import-seeds [--seed-dir ~/.skcapstone/agent/{agent}/seeds]
|
|
10
10
|
skmemory promote <memory-id> --to mid-term --summary "..."
|
|
11
|
+
skmemory sweep # Auto-promote all qualifying memories
|
|
12
|
+
skmemory sweep --dry-run # Preview what would be promoted
|
|
13
|
+
skmemory sweep --daemon # Run continuously every 6 hours
|
|
11
14
|
skmemory consolidate <session-id> --summary "..."
|
|
12
15
|
skmemory soul show | soul set-name "Lumina" | soul add-relationship ...
|
|
13
16
|
skmemory journal write "Session title" --moments "..." --intensity 9.0
|
|
@@ -34,45 +37,123 @@ from .store import MemoryStore
|
|
|
34
37
|
from .backends.sqlite_backend import SQLiteBackend
|
|
35
38
|
|
|
36
39
|
|
|
40
|
+
_active_selector = None # Module-level reference for routing commands
|
|
41
|
+
|
|
42
|
+
|
|
37
43
|
def _get_store(
|
|
38
|
-
|
|
44
|
+
skvector_url: Optional[str] = None,
|
|
39
45
|
api_key: Optional[str] = None,
|
|
40
46
|
legacy_files: bool = False,
|
|
41
47
|
) -> MemoryStore:
|
|
42
48
|
"""Create a MemoryStore with configured backends.
|
|
43
49
|
|
|
50
|
+
Resolves backend URLs with precedence: CLI args > env vars > config file.
|
|
51
|
+
When multi-endpoint config is present, uses EndpointSelector to pick
|
|
52
|
+
the best URLs. Falls back to single-URL behavior otherwise.
|
|
53
|
+
|
|
44
54
|
Args:
|
|
45
|
-
|
|
46
|
-
api_key: Optional
|
|
55
|
+
skvector_url: Optional SKVector server URL.
|
|
56
|
+
api_key: Optional SKVector API key.
|
|
47
57
|
legacy_files: Use old FileBackend instead of SQLite index.
|
|
48
58
|
|
|
49
59
|
Returns:
|
|
50
60
|
MemoryStore: Configured store instance.
|
|
51
61
|
"""
|
|
62
|
+
global _active_selector
|
|
63
|
+
|
|
64
|
+
from .config import merge_env_and_config, load_config, build_endpoint_list
|
|
65
|
+
|
|
66
|
+
final_skvector_url, final_skvector_key, final_skgraph_url = merge_env_and_config(
|
|
67
|
+
cli_skvector_url=skvector_url,
|
|
68
|
+
cli_skvector_key=api_key,
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
# Try endpoint selector when multi-endpoint config exists
|
|
72
|
+
cfg = load_config()
|
|
73
|
+
skvector_eps = build_endpoint_list(
|
|
74
|
+
final_skvector_url,
|
|
75
|
+
cfg.skvector_endpoints if cfg else [],
|
|
76
|
+
)
|
|
77
|
+
skgraph_eps = build_endpoint_list(
|
|
78
|
+
final_skgraph_url,
|
|
79
|
+
cfg.skgraph_endpoints if cfg else [],
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
if len(skvector_eps) > 1 or len(skgraph_eps) > 1 or (cfg and cfg.heartbeat_discovery):
|
|
83
|
+
try:
|
|
84
|
+
from .endpoint_selector import EndpointSelector, RoutingConfig
|
|
85
|
+
|
|
86
|
+
routing_strategy = cfg.routing_strategy if cfg else "failover"
|
|
87
|
+
selector = EndpointSelector(
|
|
88
|
+
skvector_endpoints=skvector_eps,
|
|
89
|
+
skgraph_endpoints=skgraph_eps,
|
|
90
|
+
config=RoutingConfig(strategy=routing_strategy),
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
if cfg and cfg.heartbeat_discovery:
|
|
94
|
+
selector.discover_from_heartbeats()
|
|
95
|
+
|
|
96
|
+
_active_selector = selector
|
|
97
|
+
|
|
98
|
+
best_skvector = selector.select_skvector()
|
|
99
|
+
if best_skvector:
|
|
100
|
+
final_skvector_url = best_skvector.url
|
|
101
|
+
|
|
102
|
+
best_skgraph = selector.select_skgraph()
|
|
103
|
+
if best_skgraph:
|
|
104
|
+
final_skgraph_url = best_skgraph.url
|
|
105
|
+
except Exception:
|
|
106
|
+
click.echo("Warning: EndpointSelector failed, using single URLs", err=True)
|
|
107
|
+
|
|
52
108
|
vector = None
|
|
109
|
+
graph = None
|
|
110
|
+
|
|
111
|
+
if final_skvector_url:
|
|
112
|
+
try:
|
|
113
|
+
from .backends.skvector_backend import SKVectorBackend
|
|
53
114
|
|
|
54
|
-
|
|
115
|
+
vector = SKVectorBackend(url=final_skvector_url, api_key=final_skvector_key)
|
|
116
|
+
except Exception:
|
|
117
|
+
click.echo("Warning: Could not initialize SKVector backend", err=True)
|
|
118
|
+
|
|
119
|
+
if final_skgraph_url:
|
|
55
120
|
try:
|
|
56
|
-
from .backends.
|
|
57
|
-
|
|
121
|
+
from .backends.skgraph_backend import SKGraphBackend
|
|
122
|
+
|
|
123
|
+
graph = SKGraphBackend(url=final_skgraph_url)
|
|
58
124
|
except Exception:
|
|
59
|
-
click.echo("Warning: Could not initialize
|
|
125
|
+
click.echo("Warning: Could not initialize SKGraph backend", err=True)
|
|
60
126
|
|
|
61
|
-
return MemoryStore(primary=None, vector=vector, use_sqlite=not legacy_files)
|
|
127
|
+
return MemoryStore(primary=None, vector=vector, graph=graph, use_sqlite=not legacy_files)
|
|
62
128
|
|
|
63
129
|
|
|
64
130
|
@click.group()
|
|
65
131
|
@click.version_option(__version__, prog_name="skmemory")
|
|
66
|
-
@click.option(
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
@click.option(
|
|
132
|
+
@click.option(
|
|
133
|
+
"--skvector-url", envvar="SKMEMORY_SKVECTOR_URL", default=None, help="SKVector server URL"
|
|
134
|
+
)
|
|
135
|
+
@click.option(
|
|
136
|
+
"--skvector-key", envvar="SKMEMORY_SKVECTOR_KEY", default=None, help="SKVector API key"
|
|
137
|
+
)
|
|
138
|
+
@click.option(
|
|
139
|
+
"--ai",
|
|
140
|
+
"use_ai",
|
|
141
|
+
is_flag=True,
|
|
142
|
+
envvar="SKMEMORY_AI",
|
|
143
|
+
help="Enable AI-powered features (requires Ollama)",
|
|
144
|
+
)
|
|
145
|
+
@click.option(
|
|
146
|
+
"--ai-model",
|
|
147
|
+
envvar="SKMEMORY_AI_MODEL",
|
|
148
|
+
default=None,
|
|
149
|
+
help="Ollama model name (default: llama3.2)",
|
|
150
|
+
)
|
|
70
151
|
@click.option("--ai-url", envvar="SKMEMORY_AI_URL", default=None, help="Ollama server URL")
|
|
71
152
|
@click.pass_context
|
|
72
153
|
def cli(
|
|
73
154
|
ctx: click.Context,
|
|
74
|
-
|
|
75
|
-
|
|
155
|
+
skvector_url: Optional[str],
|
|
156
|
+
skvector_key: Optional[str],
|
|
76
157
|
use_ai: bool,
|
|
77
158
|
ai_model: Optional[str],
|
|
78
159
|
ai_url: Optional[str],
|
|
@@ -85,7 +166,8 @@ def cli(
|
|
|
85
166
|
smart search reranking, enhanced rituals). Requires Ollama.
|
|
86
167
|
"""
|
|
87
168
|
ctx.ensure_object(dict)
|
|
88
|
-
|
|
169
|
+
if "store" not in ctx.obj:
|
|
170
|
+
ctx.obj["store"] = _get_store(skvector_url, skvector_key)
|
|
89
171
|
|
|
90
172
|
if use_ai:
|
|
91
173
|
ai = AIClient(base_url=ai_url, model=ai_model)
|
|
@@ -105,8 +187,12 @@ def cli(
|
|
|
105
187
|
@cli.command()
|
|
106
188
|
@click.argument("title")
|
|
107
189
|
@click.argument("content")
|
|
108
|
-
@click.option(
|
|
109
|
-
|
|
190
|
+
@click.option(
|
|
191
|
+
"--layer", type=click.Choice(["short-term", "mid-term", "long-term"]), default="short-term"
|
|
192
|
+
)
|
|
193
|
+
@click.option(
|
|
194
|
+
"--role", type=click.Choice(["dev", "ops", "sec", "ai", "general"]), default="general"
|
|
195
|
+
)
|
|
110
196
|
@click.option("--tags", default="", help="Comma-separated tags")
|
|
111
197
|
@click.option("--intensity", type=float, default=0.0, help="Emotional intensity 0-10")
|
|
112
198
|
@click.option("--valence", type=float, default=0.0, help="Emotional valence -1 to +1")
|
|
@@ -194,9 +280,7 @@ def search(ctx: click.Context, query: str, limit: int) -> None:
|
|
|
194
280
|
id_order = [s.get("title") for s in reranked]
|
|
195
281
|
results = sorted(
|
|
196
282
|
results,
|
|
197
|
-
key=lambda m: (
|
|
198
|
-
id_order.index(m.title) if m.title in id_order else 999
|
|
199
|
-
),
|
|
283
|
+
key=lambda m: (id_order.index(m.title) if m.title in id_order else 999),
|
|
200
284
|
)
|
|
201
285
|
click.echo("(AI-reranked results)\n")
|
|
202
286
|
|
|
@@ -275,6 +359,149 @@ def promote(ctx: click.Context, memory_id: str, target: str, summary: str) -> No
|
|
|
275
359
|
click.echo(f" Linked to original: {memory_id}")
|
|
276
360
|
|
|
277
361
|
|
|
362
|
+
@cli.command("sweep")
|
|
363
|
+
@click.option("--dry-run", is_flag=True, help="Show what would be promoted without making changes")
|
|
364
|
+
@click.option("--daemon", is_flag=True, help="Run continuously at the configured interval")
|
|
365
|
+
@click.option(
|
|
366
|
+
"--interval",
|
|
367
|
+
type=float,
|
|
368
|
+
default=6.0,
|
|
369
|
+
metavar="HOURS",
|
|
370
|
+
help="Sweep interval in hours (daemon mode only, default: 6)",
|
|
371
|
+
)
|
|
372
|
+
@click.option("--max-promotions", type=int, default=50, help="Max promotions per sweep")
|
|
373
|
+
@click.option("--json", "as_json", is_flag=True, help="Output results as JSON")
|
|
374
|
+
@click.pass_context
|
|
375
|
+
def sweep_cmd(
|
|
376
|
+
ctx: click.Context,
|
|
377
|
+
dry_run: bool,
|
|
378
|
+
daemon: bool,
|
|
379
|
+
interval: float,
|
|
380
|
+
max_promotions: int,
|
|
381
|
+
as_json: bool,
|
|
382
|
+
) -> None:
|
|
383
|
+
"""Run the auto-promotion engine.
|
|
384
|
+
|
|
385
|
+
Evaluates all memories and promotes qualifying ones to the next tier:
|
|
386
|
+
|
|
387
|
+
\b
|
|
388
|
+
short-term -> mid-term: high emotional intensity, frequently accessed,
|
|
389
|
+
or sufficiently old with multiple accesses
|
|
390
|
+
mid-term -> long-term: very high intensity, key tags (milestone,
|
|
391
|
+
breakthrough, cloud9:achieved), or Cloud 9
|
|
392
|
+
|
|
393
|
+
By default runs a single sweep and exits. Use --daemon to keep running.
|
|
394
|
+
"""
|
|
395
|
+
from .promotion import PromotionCriteria, PromotionEngine, PromotionScheduler
|
|
396
|
+
|
|
397
|
+
store: MemoryStore = ctx.obj["store"]
|
|
398
|
+
criteria = PromotionCriteria(max_promotions_per_sweep=max_promotions)
|
|
399
|
+
|
|
400
|
+
if dry_run:
|
|
401
|
+
# Inspect without modifying anything
|
|
402
|
+
engine = PromotionEngine(store, criteria)
|
|
403
|
+
short_mems = store.list_memories(
|
|
404
|
+
layer=MemoryLayer.SHORT, limit=criteria.max_promotions_per_sweep * 2
|
|
405
|
+
)
|
|
406
|
+
mid_mems = store.list_memories(
|
|
407
|
+
layer=MemoryLayer.MID, limit=criteria.max_promotions_per_sweep * 2
|
|
408
|
+
)
|
|
409
|
+
|
|
410
|
+
would_promote: list[dict] = []
|
|
411
|
+
for mem in short_mems:
|
|
412
|
+
target = engine.evaluate(mem)
|
|
413
|
+
if target is not None:
|
|
414
|
+
would_promote.append(
|
|
415
|
+
{
|
|
416
|
+
"id": mem.id,
|
|
417
|
+
"title": mem.title,
|
|
418
|
+
"from": mem.layer.value,
|
|
419
|
+
"to": target.value,
|
|
420
|
+
"reason": engine._promotion_reason(mem),
|
|
421
|
+
}
|
|
422
|
+
)
|
|
423
|
+
for mem in mid_mems:
|
|
424
|
+
target = engine.evaluate(mem)
|
|
425
|
+
if target is not None:
|
|
426
|
+
would_promote.append(
|
|
427
|
+
{
|
|
428
|
+
"id": mem.id,
|
|
429
|
+
"title": mem.title,
|
|
430
|
+
"from": mem.layer.value,
|
|
431
|
+
"to": target.value,
|
|
432
|
+
"reason": engine._promotion_reason(mem),
|
|
433
|
+
}
|
|
434
|
+
)
|
|
435
|
+
|
|
436
|
+
if as_json:
|
|
437
|
+
click.echo(json.dumps({"dry_run": True, "would_promote": would_promote}, indent=2))
|
|
438
|
+
else:
|
|
439
|
+
if not would_promote:
|
|
440
|
+
click.echo("[dry-run] Nothing qualifies for promotion right now.")
|
|
441
|
+
else:
|
|
442
|
+
click.echo(f"[dry-run] {len(would_promote)} memory/memories would be promoted:")
|
|
443
|
+
for entry in would_promote:
|
|
444
|
+
click.echo(
|
|
445
|
+
f" {entry['id'][:12]} {entry['from']} -> {entry['to']}"
|
|
446
|
+
f" [{entry['title'][:50]}] reason: {entry['reason']}"
|
|
447
|
+
)
|
|
448
|
+
|
|
449
|
+
elif daemon:
|
|
450
|
+
import signal
|
|
451
|
+
import time
|
|
452
|
+
|
|
453
|
+
scheduler = PromotionScheduler(
|
|
454
|
+
store,
|
|
455
|
+
criteria=criteria,
|
|
456
|
+
interval_seconds=interval * 3600,
|
|
457
|
+
)
|
|
458
|
+
|
|
459
|
+
def _handle_signal(signum: int, frame: object) -> None:
|
|
460
|
+
click.echo("\nShutting down promotion scheduler...", err=True)
|
|
461
|
+
scheduler.stop(timeout=10.0)
|
|
462
|
+
sys.exit(0)
|
|
463
|
+
|
|
464
|
+
signal.signal(signal.SIGINT, _handle_signal)
|
|
465
|
+
signal.signal(signal.SIGTERM, _handle_signal)
|
|
466
|
+
|
|
467
|
+
click.echo(
|
|
468
|
+
f"Promotion scheduler running (interval: {interval:.1f}h). Press Ctrl+C to stop.",
|
|
469
|
+
err=True,
|
|
470
|
+
)
|
|
471
|
+
|
|
472
|
+
# Run first sweep immediately, then hand off to background thread
|
|
473
|
+
result = scheduler.run_once()
|
|
474
|
+
if as_json:
|
|
475
|
+
click.echo(json.dumps(result.model_dump(), indent=2, default=str))
|
|
476
|
+
else:
|
|
477
|
+
click.echo(result.summary())
|
|
478
|
+
|
|
479
|
+
scheduler.start()
|
|
480
|
+
|
|
481
|
+
# Keep the main thread alive so signal handlers fire
|
|
482
|
+
while scheduler.is_running():
|
|
483
|
+
time.sleep(1)
|
|
484
|
+
|
|
485
|
+
else:
|
|
486
|
+
# Single one-shot sweep
|
|
487
|
+
engine = PromotionEngine(store, criteria)
|
|
488
|
+
result = engine.sweep()
|
|
489
|
+
|
|
490
|
+
if as_json:
|
|
491
|
+
click.echo(json.dumps(result.model_dump(), indent=2, default=str))
|
|
492
|
+
else:
|
|
493
|
+
click.echo(result.summary())
|
|
494
|
+
if result.short_evaluated or result.mid_evaluated:
|
|
495
|
+
click.echo(
|
|
496
|
+
f" Evaluated: {result.short_evaluated} short-term, {result.mid_evaluated} mid-term"
|
|
497
|
+
)
|
|
498
|
+
if result.promoted_ids:
|
|
499
|
+
ids_preview = ", ".join(p[:12] for p in result.promoted_ids[:5])
|
|
500
|
+
if len(result.promoted_ids) > 5:
|
|
501
|
+
ids_preview += f" (+{len(result.promoted_ids) - 5} more)"
|
|
502
|
+
click.echo(f" Promoted: {ids_preview}")
|
|
503
|
+
|
|
504
|
+
|
|
278
505
|
@cli.command()
|
|
279
506
|
@click.argument("session_id")
|
|
280
507
|
@click.option("--summary", required=True, help="Summary of the session")
|
|
@@ -316,6 +543,67 @@ def health(ctx: click.Context) -> None:
|
|
|
316
543
|
click.echo(json.dumps(status, indent=2))
|
|
317
544
|
|
|
318
545
|
|
|
546
|
+
# ═══════════════════════════════════════════════════════════
|
|
547
|
+
# Routing commands (HA endpoint selection)
|
|
548
|
+
# ═══════════════════════════════════════════════════════════
|
|
549
|
+
|
|
550
|
+
|
|
551
|
+
@cli.group()
|
|
552
|
+
def routing() -> None:
|
|
553
|
+
"""Manage HA endpoint routing for SKVector and SKGraph backends."""
|
|
554
|
+
|
|
555
|
+
|
|
556
|
+
@routing.command("status")
|
|
557
|
+
def routing_status() -> None:
|
|
558
|
+
"""Show endpoint rankings, latency, and health for each backend."""
|
|
559
|
+
if _active_selector is None:
|
|
560
|
+
click.echo("No endpoint selector active (single-URL mode).")
|
|
561
|
+
click.echo("Configure multiple endpoints in ~/.skcapstone/config.yaml to enable routing.")
|
|
562
|
+
return
|
|
563
|
+
|
|
564
|
+
info = _active_selector.status()
|
|
565
|
+
click.echo(f"Strategy: {info['strategy']}")
|
|
566
|
+
click.echo(f"Probe interval: {info['probe_interval_seconds']}s")
|
|
567
|
+
age = info["last_probe_age_seconds"]
|
|
568
|
+
click.echo(f"Last probe: {age}s ago" if age >= 0 else "Last probe: never")
|
|
569
|
+
|
|
570
|
+
for backend in ("skvector", "skgraph"):
|
|
571
|
+
eps = info.get(f"{backend}_endpoints", [])
|
|
572
|
+
if not eps:
|
|
573
|
+
continue
|
|
574
|
+
click.echo(f"\n{backend.upper()} endpoints:")
|
|
575
|
+
for ep in eps:
|
|
576
|
+
health_icon = "OK" if ep["healthy"] else "DOWN"
|
|
577
|
+
latency = f"{ep['latency_ms']:.1f}ms" if ep["latency_ms"] >= 0 else "n/a"
|
|
578
|
+
click.echo(
|
|
579
|
+
f" [{health_icon}] {ep['url']} "
|
|
580
|
+
f"role={ep['role']} latency={latency} "
|
|
581
|
+
f"fails={ep['fail_count']}"
|
|
582
|
+
)
|
|
583
|
+
|
|
584
|
+
|
|
585
|
+
@routing.command("probe")
|
|
586
|
+
def routing_probe() -> None:
|
|
587
|
+
"""Force re-probe all endpoints and display results."""
|
|
588
|
+
if _active_selector is None:
|
|
589
|
+
click.echo("No endpoint selector active (single-URL mode).")
|
|
590
|
+
return
|
|
591
|
+
|
|
592
|
+
click.echo("Probing all endpoints...")
|
|
593
|
+
results = _active_selector.probe_all()
|
|
594
|
+
|
|
595
|
+
for backend, endpoints in results.items():
|
|
596
|
+
if not endpoints:
|
|
597
|
+
continue
|
|
598
|
+
click.echo(f"\n{backend.upper()}:")
|
|
599
|
+
for ep in endpoints:
|
|
600
|
+
health_icon = "OK" if ep.healthy else "DOWN"
|
|
601
|
+
latency = f"{ep.latency_ms:.1f}ms" if ep.latency_ms >= 0 else "timeout"
|
|
602
|
+
click.echo(f" [{health_icon}] {ep.url} latency={latency} fails={ep.fail_count}")
|
|
603
|
+
|
|
604
|
+
click.echo("\nProbe complete.")
|
|
605
|
+
|
|
606
|
+
|
|
319
607
|
@cli.command()
|
|
320
608
|
@click.pass_context
|
|
321
609
|
def reindex(ctx: click.Context) -> None:
|
|
@@ -332,8 +620,13 @@ def reindex(ctx: click.Context) -> None:
|
|
|
332
620
|
|
|
333
621
|
|
|
334
622
|
@cli.command("export")
|
|
335
|
-
@click.option(
|
|
336
|
-
|
|
623
|
+
@click.option(
|
|
624
|
+
"--output",
|
|
625
|
+
"-o",
|
|
626
|
+
default=None,
|
|
627
|
+
type=click.Path(),
|
|
628
|
+
help="Output file path (default: ~/.skcapstone/backups/skmemory-backup-YYYY-MM-DD.json)",
|
|
629
|
+
)
|
|
337
630
|
@click.pass_context
|
|
338
631
|
def export_backup(ctx: click.Context, output: Optional[str]) -> None:
|
|
339
632
|
"""Export all memories to a dated JSON backup.
|
|
@@ -352,8 +645,9 @@ def export_backup(ctx: click.Context, output: Optional[str]) -> None:
|
|
|
352
645
|
|
|
353
646
|
@cli.command("import-backup")
|
|
354
647
|
@click.argument("backup_file", type=click.Path(exists=True))
|
|
355
|
-
@click.option(
|
|
356
|
-
|
|
648
|
+
@click.option(
|
|
649
|
+
"--reindex/--no-reindex", default=True, help="Rebuild the index after import (default: yes)"
|
|
650
|
+
)
|
|
357
651
|
@click.pass_context
|
|
358
652
|
def import_backup(ctx: click.Context, backup_file: str, reindex: bool) -> None:
|
|
359
653
|
"""Restore memories from a JSON backup file.
|
|
@@ -374,6 +668,91 @@ def import_backup(ctx: click.Context, backup_file: str, reindex: bool) -> None:
|
|
|
374
668
|
sys.exit(1)
|
|
375
669
|
|
|
376
670
|
|
|
671
|
+
@cli.command("backup")
|
|
672
|
+
@click.option("--list", "do_list", is_flag=True, help="Show all backups with date and size.")
|
|
673
|
+
@click.option(
|
|
674
|
+
"--prune",
|
|
675
|
+
"prune_n",
|
|
676
|
+
type=int,
|
|
677
|
+
default=None,
|
|
678
|
+
metavar="N",
|
|
679
|
+
help="Keep only the N most recent backups, delete older ones.",
|
|
680
|
+
)
|
|
681
|
+
@click.option(
|
|
682
|
+
"--restore",
|
|
683
|
+
"restore_file",
|
|
684
|
+
type=click.Path(),
|
|
685
|
+
default=None,
|
|
686
|
+
metavar="FILE",
|
|
687
|
+
help="Restore memories from backup (alias for import-backup).",
|
|
688
|
+
)
|
|
689
|
+
@click.option(
|
|
690
|
+
"--reindex/--no-reindex", default=True, help="Rebuild index after --restore (default: yes)."
|
|
691
|
+
)
|
|
692
|
+
@click.pass_context
|
|
693
|
+
def backup_cmd(
|
|
694
|
+
ctx: click.Context,
|
|
695
|
+
do_list: bool,
|
|
696
|
+
prune_n: Optional[int],
|
|
697
|
+
restore_file: Optional[str],
|
|
698
|
+
reindex: bool,
|
|
699
|
+
) -> None:
|
|
700
|
+
"""Manage memory backups: list, prune old ones, or restore.
|
|
701
|
+
|
|
702
|
+
\b
|
|
703
|
+
Examples:
|
|
704
|
+
skmemory backup --list
|
|
705
|
+
skmemory backup --prune 7
|
|
706
|
+
skmemory backup --restore ~/.skcapstone/backups/skmemory-backup-2026-03-01.json
|
|
707
|
+
"""
|
|
708
|
+
store: MemoryStore = ctx.obj["store"]
|
|
709
|
+
|
|
710
|
+
if do_list:
|
|
711
|
+
backups = store.list_backups()
|
|
712
|
+
if not backups:
|
|
713
|
+
click.echo("No backups found.")
|
|
714
|
+
return
|
|
715
|
+
click.echo(f"{'Date':<12} {'Size':>10} Path")
|
|
716
|
+
click.echo("-" * 60)
|
|
717
|
+
for b in backups:
|
|
718
|
+
size_kb = b["size_bytes"] / 1024
|
|
719
|
+
click.echo(f"{b['date']:<12} {size_kb:>8.1f} KB {b['path']}")
|
|
720
|
+
return
|
|
721
|
+
|
|
722
|
+
if prune_n is not None:
|
|
723
|
+
if prune_n < 0:
|
|
724
|
+
click.echo("Error: N must be >= 0", err=True)
|
|
725
|
+
sys.exit(1)
|
|
726
|
+
deleted = store.prune_backups(keep=prune_n)
|
|
727
|
+
if deleted:
|
|
728
|
+
for p in deleted:
|
|
729
|
+
click.echo(f"Deleted: {p}")
|
|
730
|
+
click.echo(f"Pruned {len(deleted)} backup(s), kept {prune_n} most recent.")
|
|
731
|
+
else:
|
|
732
|
+
click.echo("Nothing to prune.")
|
|
733
|
+
return
|
|
734
|
+
|
|
735
|
+
if restore_file is not None:
|
|
736
|
+
from pathlib import Path as _Path
|
|
737
|
+
|
|
738
|
+
if not _Path(restore_file).exists():
|
|
739
|
+
click.echo(f"Error: backup file not found: {restore_file}", err=True)
|
|
740
|
+
sys.exit(1)
|
|
741
|
+
try:
|
|
742
|
+
count = store.import_backup(restore_file)
|
|
743
|
+
click.echo(f"Restored {count} memories from: {restore_file}")
|
|
744
|
+
if reindex:
|
|
745
|
+
idx = store.reindex()
|
|
746
|
+
if idx >= 0:
|
|
747
|
+
click.echo(f"Re-indexed {idx} memories.")
|
|
748
|
+
except (FileNotFoundError, ValueError, RuntimeError) as e:
|
|
749
|
+
click.echo(str(e), err=True)
|
|
750
|
+
sys.exit(1)
|
|
751
|
+
return
|
|
752
|
+
|
|
753
|
+
click.echo(ctx.get_help())
|
|
754
|
+
|
|
755
|
+
|
|
377
756
|
@cli.command()
|
|
378
757
|
@click.option("--max-tokens", type=int, default=3000, help="Token budget for context")
|
|
379
758
|
@click.option("--strongest", type=int, default=5, help="Top emotional memories")
|
|
@@ -723,6 +1102,249 @@ def anchor_update(
|
|
|
723
1102
|
click.echo(f" Warmth: {a.warmth} | Trust: {a.trust} | Connection: {a.connection_strength}")
|
|
724
1103
|
|
|
725
1104
|
|
|
1105
|
+
# ═══════════════════════════════════════════════════════════
|
|
1106
|
+
# Setup commands — Docker orchestration for backends
|
|
1107
|
+
# ═══════════════════════════════════════════════════════════
|
|
1108
|
+
|
|
1109
|
+
|
|
1110
|
+
@cli.group()
|
|
1111
|
+
def setup() -> None:
|
|
1112
|
+
"""Deploy and manage SKVector & SKGraph Docker containers."""
|
|
1113
|
+
|
|
1114
|
+
|
|
1115
|
+
@setup.command("wizard")
|
|
1116
|
+
@click.option("--skvector/--no-skvector", default=True, help="Enable SKVector (vector search)")
|
|
1117
|
+
@click.option("--skgraph/--no-skgraph", default=True, help="Enable SKGraph (graph)")
|
|
1118
|
+
@click.option("--skip-deps", is_flag=True, help="Skip Python dependency installation")
|
|
1119
|
+
@click.option("--yes", "-y", "non_interactive", is_flag=True, help="Non-interactive mode")
|
|
1120
|
+
@click.option(
|
|
1121
|
+
"--local",
|
|
1122
|
+
"deployment_mode",
|
|
1123
|
+
flag_value="local",
|
|
1124
|
+
default=None,
|
|
1125
|
+
help="Run SKVector/SKGraph locally via Docker (skip local/remote prompt)",
|
|
1126
|
+
)
|
|
1127
|
+
@click.option(
|
|
1128
|
+
"--remote",
|
|
1129
|
+
"deployment_mode",
|
|
1130
|
+
flag_value="remote",
|
|
1131
|
+
help="Connect to a remote/SaaS URL (skip local/remote prompt)",
|
|
1132
|
+
)
|
|
1133
|
+
def setup_wizard(
|
|
1134
|
+
skvector: bool,
|
|
1135
|
+
skgraph: bool,
|
|
1136
|
+
skip_deps: bool,
|
|
1137
|
+
non_interactive: bool,
|
|
1138
|
+
deployment_mode: str,
|
|
1139
|
+
) -> None:
|
|
1140
|
+
"""Interactive wizard — deploy Docker containers or configure remote URLs.
|
|
1141
|
+
|
|
1142
|
+
Without --local or --remote the wizard asks which deployment mode you want.
|
|
1143
|
+
Use --local to go straight to Docker setup (checks Docker, offers to install
|
|
1144
|
+
it if missing). Use --remote to enter a Qdrant Cloud / self-hosted URL
|
|
1145
|
+
without touching Docker at all.
|
|
1146
|
+
"""
|
|
1147
|
+
from .setup_wizard import run_setup_wizard
|
|
1148
|
+
|
|
1149
|
+
result = run_setup_wizard(
|
|
1150
|
+
enable_skvector=skvector,
|
|
1151
|
+
enable_skgraph=skgraph,
|
|
1152
|
+
skip_deps=skip_deps,
|
|
1153
|
+
non_interactive=non_interactive,
|
|
1154
|
+
deployment_mode=deployment_mode,
|
|
1155
|
+
echo=click.echo,
|
|
1156
|
+
)
|
|
1157
|
+
if not result["success"]:
|
|
1158
|
+
sys.exit(1)
|
|
1159
|
+
|
|
1160
|
+
|
|
1161
|
+
@setup.command("status")
|
|
1162
|
+
def setup_status() -> None:
|
|
1163
|
+
"""Show Docker container state and backend connectivity."""
|
|
1164
|
+
from .setup_wizard import (
|
|
1165
|
+
check_skgraph_health,
|
|
1166
|
+
check_skvector_health,
|
|
1167
|
+
compose_ps,
|
|
1168
|
+
detect_platform,
|
|
1169
|
+
find_compose_file,
|
|
1170
|
+
)
|
|
1171
|
+
from .config import load_config
|
|
1172
|
+
|
|
1173
|
+
cfg = load_config()
|
|
1174
|
+
if cfg is None:
|
|
1175
|
+
click.echo("No setup config found. Run: skmemory setup wizard")
|
|
1176
|
+
return
|
|
1177
|
+
|
|
1178
|
+
click.echo("SKMemory Backend Status")
|
|
1179
|
+
click.echo("=" * 40)
|
|
1180
|
+
|
|
1181
|
+
if cfg.setup_completed_at:
|
|
1182
|
+
click.echo(f"Setup completed: {cfg.setup_completed_at}")
|
|
1183
|
+
click.echo(f"Backends enabled: {', '.join(cfg.backends_enabled) or 'none'}")
|
|
1184
|
+
click.echo("")
|
|
1185
|
+
|
|
1186
|
+
# Container status
|
|
1187
|
+
plat = detect_platform()
|
|
1188
|
+
if plat.compose_available:
|
|
1189
|
+
compose_file = None
|
|
1190
|
+
if cfg.docker_compose_file:
|
|
1191
|
+
from pathlib import Path
|
|
1192
|
+
|
|
1193
|
+
compose_file = Path(cfg.docker_compose_file)
|
|
1194
|
+
ps = compose_ps(compose_file=compose_file, use_legacy=plat.compose_legacy)
|
|
1195
|
+
click.echo("Containers:")
|
|
1196
|
+
if ps.stdout.strip():
|
|
1197
|
+
click.echo(ps.stdout)
|
|
1198
|
+
else:
|
|
1199
|
+
click.echo(" No containers running")
|
|
1200
|
+
click.echo("")
|
|
1201
|
+
|
|
1202
|
+
# Connectivity
|
|
1203
|
+
click.echo("Connectivity:")
|
|
1204
|
+
if cfg.skvector_url:
|
|
1205
|
+
healthy = check_skvector_health(url=cfg.skvector_url, timeout=5)
|
|
1206
|
+
status = "healthy" if healthy else "unreachable"
|
|
1207
|
+
click.echo(f" SKVector ({cfg.skvector_url}): {status}")
|
|
1208
|
+
|
|
1209
|
+
if cfg.skgraph_url:
|
|
1210
|
+
healthy = check_skgraph_health(timeout=5)
|
|
1211
|
+
status = "healthy" if healthy else "unreachable"
|
|
1212
|
+
click.echo(f" SKGraph ({cfg.skgraph_url}): {status}")
|
|
1213
|
+
|
|
1214
|
+
|
|
1215
|
+
@setup.command("start")
|
|
1216
|
+
@click.option(
|
|
1217
|
+
"--service",
|
|
1218
|
+
type=click.Choice(["skvector", "skgraph", "all"]),
|
|
1219
|
+
default="all",
|
|
1220
|
+
help="Which service to start",
|
|
1221
|
+
)
|
|
1222
|
+
def setup_start(service: str) -> None:
|
|
1223
|
+
"""Start previously configured containers."""
|
|
1224
|
+
from .setup_wizard import compose_up, detect_platform, find_compose_file
|
|
1225
|
+
from .config import load_config
|
|
1226
|
+
|
|
1227
|
+
cfg = load_config()
|
|
1228
|
+
plat = detect_platform()
|
|
1229
|
+
if not plat.compose_available:
|
|
1230
|
+
click.echo("Docker Compose not available.", err=True)
|
|
1231
|
+
sys.exit(1)
|
|
1232
|
+
|
|
1233
|
+
compose_file = None
|
|
1234
|
+
if cfg and cfg.docker_compose_file:
|
|
1235
|
+
from pathlib import Path
|
|
1236
|
+
|
|
1237
|
+
compose_file = Path(cfg.docker_compose_file)
|
|
1238
|
+
|
|
1239
|
+
services = None
|
|
1240
|
+
if service != "all":
|
|
1241
|
+
services = [service]
|
|
1242
|
+
elif cfg and cfg.backends_enabled:
|
|
1243
|
+
services = cfg.backends_enabled
|
|
1244
|
+
|
|
1245
|
+
result = compose_up(
|
|
1246
|
+
services=services,
|
|
1247
|
+
compose_file=compose_file,
|
|
1248
|
+
use_legacy=plat.compose_legacy,
|
|
1249
|
+
)
|
|
1250
|
+
if result.returncode == 0:
|
|
1251
|
+
click.echo(f"Started: {service}")
|
|
1252
|
+
else:
|
|
1253
|
+
click.echo(f"Failed: {result.stderr.strip()}", err=True)
|
|
1254
|
+
sys.exit(1)
|
|
1255
|
+
|
|
1256
|
+
|
|
1257
|
+
@setup.command("stop")
|
|
1258
|
+
@click.option(
|
|
1259
|
+
"--service",
|
|
1260
|
+
type=click.Choice(["skvector", "skgraph", "all"]),
|
|
1261
|
+
default="all",
|
|
1262
|
+
help="Which service to stop",
|
|
1263
|
+
)
|
|
1264
|
+
def setup_stop(service: str) -> None:
|
|
1265
|
+
"""Stop containers (preserves data)."""
|
|
1266
|
+
from .setup_wizard import detect_platform
|
|
1267
|
+
from .config import load_config
|
|
1268
|
+
import subprocess
|
|
1269
|
+
|
|
1270
|
+
cfg = load_config()
|
|
1271
|
+
plat = detect_platform()
|
|
1272
|
+
if not plat.compose_available:
|
|
1273
|
+
click.echo("Docker Compose not available.", err=True)
|
|
1274
|
+
sys.exit(1)
|
|
1275
|
+
|
|
1276
|
+
if service == "all":
|
|
1277
|
+
from .setup_wizard import compose_down
|
|
1278
|
+
|
|
1279
|
+
compose_file = None
|
|
1280
|
+
if cfg and cfg.docker_compose_file:
|
|
1281
|
+
from pathlib import Path
|
|
1282
|
+
|
|
1283
|
+
compose_file = Path(cfg.docker_compose_file)
|
|
1284
|
+
|
|
1285
|
+
result = compose_down(
|
|
1286
|
+
compose_file=compose_file,
|
|
1287
|
+
use_legacy=plat.compose_legacy,
|
|
1288
|
+
)
|
|
1289
|
+
if result.returncode == 0:
|
|
1290
|
+
click.echo("All containers stopped.")
|
|
1291
|
+
else:
|
|
1292
|
+
click.echo(f"Failed: {result.stderr.strip()}", err=True)
|
|
1293
|
+
sys.exit(1)
|
|
1294
|
+
else:
|
|
1295
|
+
# Stop individual container
|
|
1296
|
+
container = f"skmemory-{service}"
|
|
1297
|
+
result = subprocess.run(
|
|
1298
|
+
["docker", "stop", container],
|
|
1299
|
+
capture_output=True,
|
|
1300
|
+
text=True,
|
|
1301
|
+
timeout=30,
|
|
1302
|
+
)
|
|
1303
|
+
if result.returncode == 0:
|
|
1304
|
+
click.echo(f"Stopped: {container}")
|
|
1305
|
+
else:
|
|
1306
|
+
click.echo(f"Failed to stop {container}: {result.stderr.strip()}", err=True)
|
|
1307
|
+
sys.exit(1)
|
|
1308
|
+
|
|
1309
|
+
|
|
1310
|
+
@setup.command("reset")
|
|
1311
|
+
@click.option("--remove-data", is_flag=True, help="Also delete data volumes")
|
|
1312
|
+
@click.confirmation_option(prompt="This will remove containers. Continue?")
|
|
1313
|
+
def setup_reset(remove_data: bool) -> None:
|
|
1314
|
+
"""Remove containers, optionally delete data volumes."""
|
|
1315
|
+
from .setup_wizard import compose_down, detect_platform
|
|
1316
|
+
from .config import load_config, CONFIG_PATH
|
|
1317
|
+
|
|
1318
|
+
cfg = load_config()
|
|
1319
|
+
plat = detect_platform()
|
|
1320
|
+
if not plat.compose_available:
|
|
1321
|
+
click.echo("Docker Compose not available.", err=True)
|
|
1322
|
+
sys.exit(1)
|
|
1323
|
+
|
|
1324
|
+
compose_file = None
|
|
1325
|
+
if cfg and cfg.docker_compose_file:
|
|
1326
|
+
from pathlib import Path
|
|
1327
|
+
|
|
1328
|
+
compose_file = Path(cfg.docker_compose_file)
|
|
1329
|
+
|
|
1330
|
+
result = compose_down(
|
|
1331
|
+
compose_file=compose_file,
|
|
1332
|
+
remove_volumes=remove_data,
|
|
1333
|
+
use_legacy=plat.compose_legacy,
|
|
1334
|
+
)
|
|
1335
|
+
if result.returncode == 0:
|
|
1336
|
+
vol_msg = " and data volumes" if remove_data else ""
|
|
1337
|
+
click.echo(f"Containers{vol_msg} removed.")
|
|
1338
|
+
|
|
1339
|
+
# Remove config
|
|
1340
|
+
if CONFIG_PATH.exists():
|
|
1341
|
+
CONFIG_PATH.unlink()
|
|
1342
|
+
click.echo(f"Config removed: {CONFIG_PATH}")
|
|
1343
|
+
else:
|
|
1344
|
+
click.echo(f"Failed: {result.stderr.strip()}", err=True)
|
|
1345
|
+
sys.exit(1)
|
|
1346
|
+
|
|
1347
|
+
|
|
726
1348
|
# ═══════════════════════════════════════════════════════════
|
|
727
1349
|
# Quadrant commands (Queen Ara's idea #3)
|
|
728
1350
|
# ═══════════════════════════════════════════════════════════
|
|
@@ -855,9 +1477,7 @@ def steelman_verify_soul() -> None:
|
|
|
855
1477
|
for value in soul.values:
|
|
856
1478
|
claims.append(f"I value {value}")
|
|
857
1479
|
for rel in soul.relationships:
|
|
858
|
-
claims.append(
|
|
859
|
-
f"{rel.name} is my {rel.role} (bond: {rel.bond_strength}/10)"
|
|
860
|
-
)
|
|
1480
|
+
claims.append(f"{rel.name} is my {rel.role} (bond: {rel.bond_strength}/10)")
|
|
861
1481
|
|
|
862
1482
|
if not claims:
|
|
863
1483
|
click.echo("No identity claims to verify. Add traits and values to your soul blueprint.")
|
|
@@ -954,7 +1574,148 @@ def import_telegram_cmd(
|
|
|
954
1574
|
|
|
955
1575
|
ai: Optional[AIClient] = ctx.obj.get("ai")
|
|
956
1576
|
if ai:
|
|
957
|
-
click.echo(
|
|
1577
|
+
click.echo(
|
|
1578
|
+
"\nTip: Run 'skmemory search --ai \"<topic>\"' to semantically search your imported chats."
|
|
1579
|
+
)
|
|
1580
|
+
|
|
1581
|
+
|
|
1582
|
+
@cli.command("import-telegram-api")
|
|
1583
|
+
@click.argument("chat", type=str)
|
|
1584
|
+
@click.option(
|
|
1585
|
+
"--mode",
|
|
1586
|
+
type=click.Choice(["daily", "message"]),
|
|
1587
|
+
default="daily",
|
|
1588
|
+
help="'daily' consolidates per day (recommended), 'message' imports each message",
|
|
1589
|
+
)
|
|
1590
|
+
@click.option("--limit", type=int, default=None, help="Max messages to fetch")
|
|
1591
|
+
@click.option("--since", default=None, help="Only fetch messages after this date (YYYY-MM-DD)")
|
|
1592
|
+
@click.option("--min-length", type=int, default=30, help="Skip messages shorter than N chars")
|
|
1593
|
+
@click.option("--chat-name", default=None, help="Override chat name")
|
|
1594
|
+
@click.option("--tags", default="", help="Extra comma-separated tags")
|
|
1595
|
+
@click.pass_context
|
|
1596
|
+
def import_telegram_api_cmd(
|
|
1597
|
+
ctx: click.Context,
|
|
1598
|
+
chat: str,
|
|
1599
|
+
mode: str,
|
|
1600
|
+
limit: Optional[int],
|
|
1601
|
+
since: Optional[str],
|
|
1602
|
+
min_length: int,
|
|
1603
|
+
chat_name: Optional[str],
|
|
1604
|
+
tags: str,
|
|
1605
|
+
) -> None:
|
|
1606
|
+
"""Import messages directly from Telegram API (requires Telethon).
|
|
1607
|
+
|
|
1608
|
+
Connects to Telegram using API credentials and pulls messages
|
|
1609
|
+
directly — no manual export needed.
|
|
1610
|
+
|
|
1611
|
+
Requires TELEGRAM_API_ID and TELEGRAM_API_HASH environment variables.
|
|
1612
|
+
|
|
1613
|
+
\b
|
|
1614
|
+
Examples:
|
|
1615
|
+
skmemory import-telegram-api @username
|
|
1616
|
+
skmemory import-telegram-api "Chat Name" --mode message --limit 500
|
|
1617
|
+
skmemory import-telegram-api @group --since 2025-01-01
|
|
1618
|
+
"""
|
|
1619
|
+
try:
|
|
1620
|
+
from .importers.telegram_api import import_telegram_api
|
|
1621
|
+
except ImportError:
|
|
1622
|
+
click.echo(
|
|
1623
|
+
"Error: Telethon is required for direct API import.\n"
|
|
1624
|
+
"\n"
|
|
1625
|
+
"Install it:\n"
|
|
1626
|
+
" pipx inject skmemory telethon\n"
|
|
1627
|
+
" # or: pip install skmemory[telegram]\n"
|
|
1628
|
+
"\n"
|
|
1629
|
+
"Then run: skmemory telegram-setup (to verify full setup)",
|
|
1630
|
+
err=True,
|
|
1631
|
+
)
|
|
1632
|
+
sys.exit(1)
|
|
1633
|
+
|
|
1634
|
+
store: MemoryStore = ctx.obj["store"]
|
|
1635
|
+
extra_tags = [t.strip() for t in tags.split(",") if t.strip()]
|
|
1636
|
+
|
|
1637
|
+
click.echo(f"Fetching from Telegram API: {chat}")
|
|
1638
|
+
if limit:
|
|
1639
|
+
click.echo(f" Limit: {limit} messages")
|
|
1640
|
+
if since:
|
|
1641
|
+
click.echo(f" Since: {since}")
|
|
1642
|
+
click.echo(f" Mode: {mode} | Min length: {min_length}")
|
|
1643
|
+
|
|
1644
|
+
try:
|
|
1645
|
+
stats = import_telegram_api(
|
|
1646
|
+
store,
|
|
1647
|
+
chat,
|
|
1648
|
+
mode=mode,
|
|
1649
|
+
limit=limit,
|
|
1650
|
+
since=since,
|
|
1651
|
+
min_message_length=min_length,
|
|
1652
|
+
chat_name=chat_name,
|
|
1653
|
+
tags=extra_tags or None,
|
|
1654
|
+
)
|
|
1655
|
+
except RuntimeError as e:
|
|
1656
|
+
click.echo(f"Error: {e}", err=True)
|
|
1657
|
+
sys.exit(1)
|
|
1658
|
+
except Exception as e:
|
|
1659
|
+
click.echo(f"Error: {e}", err=True)
|
|
1660
|
+
sys.exit(1)
|
|
1661
|
+
|
|
1662
|
+
click.echo(f"\nImport complete for: {stats.get('chat_name', 'unknown')}")
|
|
1663
|
+
if mode == "daily":
|
|
1664
|
+
click.echo(f" Days processed: {stats.get('days_processed', 0)}")
|
|
1665
|
+
click.echo(f" Messages imported: {stats.get('messages_imported', 0)}")
|
|
1666
|
+
else:
|
|
1667
|
+
click.echo(f" Imported: {stats.get('imported', 0)}")
|
|
1668
|
+
click.echo(f" Skipped: {stats.get('skipped', 0)}")
|
|
1669
|
+
click.echo(f" Total messages scanned: {stats.get('total_messages', 0)}")
|
|
1670
|
+
|
|
1671
|
+
|
|
1672
|
+
@cli.command("telegram-setup")
|
|
1673
|
+
def telegram_setup_cmd() -> None:
|
|
1674
|
+
"""Check Telegram API import setup and show next steps.
|
|
1675
|
+
|
|
1676
|
+
Verifies that Telethon is installed, API credentials are set,
|
|
1677
|
+
and a session file exists. Prints actionable instructions for
|
|
1678
|
+
anything that's missing.
|
|
1679
|
+
|
|
1680
|
+
\b
|
|
1681
|
+
Example:
|
|
1682
|
+
skmemory telegram-setup
|
|
1683
|
+
"""
|
|
1684
|
+
try:
|
|
1685
|
+
from .importers.telegram_api import check_setup
|
|
1686
|
+
except ImportError:
|
|
1687
|
+
click.echo("Telethon is not installed.", err=True)
|
|
1688
|
+
click.echo("")
|
|
1689
|
+
click.echo("To fix, run one of:")
|
|
1690
|
+
click.echo(" pipx inject skmemory telethon")
|
|
1691
|
+
click.echo(" pip install skmemory[telegram]")
|
|
1692
|
+
sys.exit(1)
|
|
1693
|
+
|
|
1694
|
+
status = check_setup()
|
|
1695
|
+
|
|
1696
|
+
click.echo("Telegram API Import Setup")
|
|
1697
|
+
click.echo("=" * 40)
|
|
1698
|
+
click.echo(f" Telethon installed: {'yes' if status['telethon'] else 'NO'}")
|
|
1699
|
+
click.echo(f" API credentials: {'yes' if status['credentials'] else 'NO'}")
|
|
1700
|
+
click.echo(
|
|
1701
|
+
f" Session file: {'yes' if status['session'] else 'not yet (created on first auth)'}"
|
|
1702
|
+
)
|
|
1703
|
+
click.echo("")
|
|
1704
|
+
|
|
1705
|
+
if status["ready"]:
|
|
1706
|
+
click.echo("Ready to import! Run:")
|
|
1707
|
+
click.echo(" skmemory import-telegram-api @username")
|
|
1708
|
+
click.echo(' skmemory import-telegram-api "Group Name" --mode daily')
|
|
1709
|
+
if not status["session"]:
|
|
1710
|
+
click.echo("")
|
|
1711
|
+
click.echo("First run will prompt for phone number + verification code.")
|
|
1712
|
+
click.echo("Session is saved at ~/.skcapstone/telegram.session for future use.")
|
|
1713
|
+
else:
|
|
1714
|
+
click.echo("Setup incomplete. Fix these issues:")
|
|
1715
|
+
click.echo("")
|
|
1716
|
+
for msg in status["messages"]:
|
|
1717
|
+
click.echo(f" - {msg}")
|
|
1718
|
+
sys.exit(1)
|
|
958
1719
|
|
|
959
1720
|
|
|
960
1721
|
@steelman_group.command("install")
|
|
@@ -995,6 +1756,454 @@ def steelman_info() -> None:
|
|
|
995
1756
|
click.echo(f" Definitions: {len(fw.definitions)}")
|
|
996
1757
|
|
|
997
1758
|
|
|
1759
|
+
# ---------------------------------------------------------------------------
|
|
1760
|
+
# Fortress commands — integrity verification and audit trail
|
|
1761
|
+
# ---------------------------------------------------------------------------
|
|
1762
|
+
|
|
1763
|
+
|
|
1764
|
+
@cli.group("fortress")
|
|
1765
|
+
def fortress_group() -> None:
|
|
1766
|
+
"""Memory Fortress — integrity verification, tamper alerts, and audit trail."""
|
|
1767
|
+
|
|
1768
|
+
|
|
1769
|
+
@fortress_group.command("verify")
|
|
1770
|
+
@click.option("--json", "as_json", is_flag=True, help="Output result as JSON")
|
|
1771
|
+
@click.pass_context
|
|
1772
|
+
def fortress_verify(ctx: click.Context, as_json: bool) -> None:
|
|
1773
|
+
"""Verify integrity hashes for all stored memories.
|
|
1774
|
+
|
|
1775
|
+
Loads every memory and checks its SHA-256 integrity hash.
|
|
1776
|
+
Tampered memories are reported with CRITICAL severity.
|
|
1777
|
+
"""
|
|
1778
|
+
from .fortress import FortifiedMemoryStore
|
|
1779
|
+
from .config import SKMEMORY_HOME
|
|
1780
|
+
from .backends.sqlite_backend import SQLiteBackend
|
|
1781
|
+
|
|
1782
|
+
store = ctx.obj.get("store")
|
|
1783
|
+
audit_path = SKMEMORY_HOME / "audit.jsonl"
|
|
1784
|
+
|
|
1785
|
+
fortress = FortifiedMemoryStore(
|
|
1786
|
+
primary=store.primary,
|
|
1787
|
+
use_sqlite=False,
|
|
1788
|
+
audit_path=audit_path,
|
|
1789
|
+
)
|
|
1790
|
+
result = fortress.verify_all()
|
|
1791
|
+
|
|
1792
|
+
if as_json:
|
|
1793
|
+
click.echo(json.dumps(result, indent=2))
|
|
1794
|
+
return
|
|
1795
|
+
|
|
1796
|
+
total = result["total"]
|
|
1797
|
+
passed = result["passed"]
|
|
1798
|
+
tampered = result["tampered"]
|
|
1799
|
+
unsealed = result["unsealed"]
|
|
1800
|
+
|
|
1801
|
+
click.echo(f"Fortress Integrity Report")
|
|
1802
|
+
click.echo(f" Total memories : {total}")
|
|
1803
|
+
click.echo(f" Passed : {passed}")
|
|
1804
|
+
click.echo(f" Tampered : {len(tampered)}")
|
|
1805
|
+
click.echo(f" Unsealed : {len(unsealed)}")
|
|
1806
|
+
|
|
1807
|
+
if tampered:
|
|
1808
|
+
click.echo("\nTAMPERED MEMORIES (CRITICAL):")
|
|
1809
|
+
for mid in tampered:
|
|
1810
|
+
click.echo(f" !! {mid}")
|
|
1811
|
+
sys.exit(2)
|
|
1812
|
+
elif total == 0:
|
|
1813
|
+
click.echo("\nNo memories found.")
|
|
1814
|
+
else:
|
|
1815
|
+
click.echo("\nAll memories passed integrity check.")
|
|
1816
|
+
|
|
1817
|
+
|
|
1818
|
+
@fortress_group.command("audit")
|
|
1819
|
+
@click.option("--last", "n", type=int, default=20, help="Number of recent entries to show")
|
|
1820
|
+
@click.option("--json", "as_json", is_flag=True, help="Output as JSON")
|
|
1821
|
+
def fortress_audit(n: int, as_json: bool) -> None:
|
|
1822
|
+
"""Show the most recent audit trail entries.
|
|
1823
|
+
|
|
1824
|
+
The audit trail is a chain-hashed JSONL log of every store/recall/delete
|
|
1825
|
+
operation. Each entry is cryptographically chained so tampering is detectable.
|
|
1826
|
+
"""
|
|
1827
|
+
from .fortress import AuditLog
|
|
1828
|
+
from .config import SKMEMORY_HOME
|
|
1829
|
+
|
|
1830
|
+
audit = AuditLog(path=SKMEMORY_HOME / "audit.jsonl")
|
|
1831
|
+
records = audit.tail(n)
|
|
1832
|
+
|
|
1833
|
+
if as_json:
|
|
1834
|
+
click.echo(json.dumps(records, indent=2))
|
|
1835
|
+
return
|
|
1836
|
+
|
|
1837
|
+
if not records:
|
|
1838
|
+
click.echo("No audit records found.")
|
|
1839
|
+
return
|
|
1840
|
+
|
|
1841
|
+
click.echo(f"Audit Trail — last {len(records)} entries:")
|
|
1842
|
+
for r in records:
|
|
1843
|
+
ok_flag = "OK" if r.get("ok") else "FAIL"
|
|
1844
|
+
op = r.get("op", "?").upper()
|
|
1845
|
+
mid = r.get("id", "?")[:12]
|
|
1846
|
+
ts = r.get("ts", "?")[:19]
|
|
1847
|
+
extra = {k: v for k, v in r.items() if k not in ("ts", "op", "id", "ok", "chain_hash")}
|
|
1848
|
+
extras = ", ".join(f"{k}={v}" for k, v in extra.items()) if extra else ""
|
|
1849
|
+
line = f" [{ts}] {op:8s} {ok_flag:4s} id={mid}"
|
|
1850
|
+
if extras:
|
|
1851
|
+
line += f" | {extras}"
|
|
1852
|
+
click.echo(line)
|
|
1853
|
+
|
|
1854
|
+
|
|
1855
|
+
@fortress_group.command("verify-chain")
|
|
1856
|
+
@click.option("--json", "as_json", is_flag=True, help="Output as JSON")
|
|
1857
|
+
def fortress_verify_chain(as_json: bool) -> None:
|
|
1858
|
+
"""Verify the cryptographic chain of the audit log itself.
|
|
1859
|
+
|
|
1860
|
+
Each audit log entry contains a chain hash linking it to the previous entry.
|
|
1861
|
+
A broken chain indicates the audit log was tampered with.
|
|
1862
|
+
"""
|
|
1863
|
+
from .fortress import AuditLog
|
|
1864
|
+
from .config import SKMEMORY_HOME
|
|
1865
|
+
|
|
1866
|
+
audit = AuditLog(path=SKMEMORY_HOME / "audit.jsonl")
|
|
1867
|
+
ok, errors = audit.verify_chain()
|
|
1868
|
+
|
|
1869
|
+
if as_json:
|
|
1870
|
+
click.echo(json.dumps({"ok": ok, "errors": errors}))
|
|
1871
|
+
return
|
|
1872
|
+
|
|
1873
|
+
if ok:
|
|
1874
|
+
click.echo("Audit chain is VALID — log integrity confirmed.")
|
|
1875
|
+
else:
|
|
1876
|
+
click.echo("Audit chain BROKEN — log may have been tampered!")
|
|
1877
|
+
for err in errors:
|
|
1878
|
+
click.echo(f" !! {err}")
|
|
1879
|
+
sys.exit(2)
|
|
1880
|
+
|
|
1881
|
+
|
|
1882
|
+
# ---------------------------------------------------------------------------
|
|
1883
|
+
# Vault commands — at-rest encryption management
|
|
1884
|
+
# ---------------------------------------------------------------------------
|
|
1885
|
+
|
|
1886
|
+
|
|
1887
|
+
@cli.group("vault")
|
|
1888
|
+
def vault_group() -> None:
|
|
1889
|
+
"""Memory Vault — AES-256-GCM at-rest encryption for memory files."""
|
|
1890
|
+
|
|
1891
|
+
|
|
1892
|
+
@vault_group.command("seal")
|
|
1893
|
+
@click.option(
|
|
1894
|
+
"--passphrase",
|
|
1895
|
+
envvar="SKMEMORY_VAULT_PASSPHRASE",
|
|
1896
|
+
required=True,
|
|
1897
|
+
help="Encryption passphrase (or set SKMEMORY_VAULT_PASSPHRASE env var)",
|
|
1898
|
+
prompt="Vault passphrase",
|
|
1899
|
+
hide_input=True,
|
|
1900
|
+
confirmation_prompt=True,
|
|
1901
|
+
)
|
|
1902
|
+
@click.option("--yes", "-y", is_flag=True, help="Skip confirmation prompt")
|
|
1903
|
+
@click.pass_context
|
|
1904
|
+
def vault_seal(ctx: click.Context, passphrase: str, yes: bool) -> None:
|
|
1905
|
+
"""Encrypt all plaintext memory files with AES-256-GCM.
|
|
1906
|
+
|
|
1907
|
+
Already-encrypted files are skipped. Safe to run multiple times.
|
|
1908
|
+
Requires the 'cryptography' package: pip install skmemory[fortress]
|
|
1909
|
+
"""
|
|
1910
|
+
from .backends.vaulted_backend import VaultedSQLiteBackend
|
|
1911
|
+
from .config import SKMEMORY_HOME
|
|
1912
|
+
from .fortress import AuditLog
|
|
1913
|
+
|
|
1914
|
+
store = ctx.obj.get("store")
|
|
1915
|
+
memories_path = (
|
|
1916
|
+
store.primary.base_path
|
|
1917
|
+
if hasattr(store.primary, "base_path")
|
|
1918
|
+
else (SKMEMORY_HOME / "memories")
|
|
1919
|
+
)
|
|
1920
|
+
|
|
1921
|
+
if not yes:
|
|
1922
|
+
click.confirm(
|
|
1923
|
+
f"This will encrypt all memory files in {memories_path}. Continue?",
|
|
1924
|
+
abort=True,
|
|
1925
|
+
)
|
|
1926
|
+
|
|
1927
|
+
backend = VaultedSQLiteBackend(passphrase=passphrase, base_path=str(memories_path))
|
|
1928
|
+
count = backend.seal_all()
|
|
1929
|
+
|
|
1930
|
+
audit = AuditLog(path=SKMEMORY_HOME / "audit.jsonl")
|
|
1931
|
+
audit.append("vault_seal", "ALL", ok=True, files_sealed=count)
|
|
1932
|
+
|
|
1933
|
+
click.echo(f"Vault sealed: {count} file(s) encrypted.")
|
|
1934
|
+
if count == 0:
|
|
1935
|
+
click.echo("(All files were already encrypted or no memories exist.)")
|
|
1936
|
+
|
|
1937
|
+
|
|
1938
|
+
@vault_group.command("unseal")
|
|
1939
|
+
@click.option(
|
|
1940
|
+
"--passphrase",
|
|
1941
|
+
envvar="SKMEMORY_VAULT_PASSPHRASE",
|
|
1942
|
+
required=True,
|
|
1943
|
+
help="Decryption passphrase",
|
|
1944
|
+
prompt="Vault passphrase",
|
|
1945
|
+
hide_input=True,
|
|
1946
|
+
)
|
|
1947
|
+
@click.option("--yes", "-y", is_flag=True, help="Skip confirmation prompt")
|
|
1948
|
+
@click.pass_context
|
|
1949
|
+
def vault_unseal(ctx: click.Context, passphrase: str, yes: bool) -> None:
|
|
1950
|
+
"""Decrypt all vault-encrypted memory files back to plaintext.
|
|
1951
|
+
|
|
1952
|
+
Use this to migrate away from encryption or to inspect raw files.
|
|
1953
|
+
"""
|
|
1954
|
+
from .backends.vaulted_backend import VaultedSQLiteBackend
|
|
1955
|
+
from .config import SKMEMORY_HOME
|
|
1956
|
+
from .fortress import AuditLog
|
|
1957
|
+
|
|
1958
|
+
store = ctx.obj.get("store")
|
|
1959
|
+
memories_path = (
|
|
1960
|
+
store.primary.base_path
|
|
1961
|
+
if hasattr(store.primary, "base_path")
|
|
1962
|
+
else (SKMEMORY_HOME / "memories")
|
|
1963
|
+
)
|
|
1964
|
+
|
|
1965
|
+
if not yes:
|
|
1966
|
+
click.confirm(
|
|
1967
|
+
f"This will decrypt all vault files in {memories_path}. Continue?",
|
|
1968
|
+
abort=True,
|
|
1969
|
+
)
|
|
1970
|
+
|
|
1971
|
+
backend = VaultedSQLiteBackend(passphrase=passphrase, base_path=str(memories_path))
|
|
1972
|
+
count = backend.unseal_all()
|
|
1973
|
+
|
|
1974
|
+
audit = AuditLog(path=SKMEMORY_HOME / "audit.jsonl")
|
|
1975
|
+
audit.append("vault_unseal", "ALL", ok=True, files_decrypted=count)
|
|
1976
|
+
|
|
1977
|
+
click.echo(f"Vault unsealed: {count} file(s) decrypted.")
|
|
1978
|
+
|
|
1979
|
+
|
|
1980
|
+
@vault_group.command("status")
|
|
1981
|
+
@click.option("--json", "as_json", is_flag=True, help="Output as JSON")
|
|
1982
|
+
@click.pass_context
|
|
1983
|
+
def vault_status_cmd(ctx: click.Context, as_json: bool) -> None:
|
|
1984
|
+
"""Show encryption coverage for memory files.
|
|
1985
|
+
|
|
1986
|
+
Reports how many memory files are encrypted vs. plaintext.
|
|
1987
|
+
Does not require a passphrase — only checks file headers.
|
|
1988
|
+
"""
|
|
1989
|
+
from .config import SKMEMORY_HOME
|
|
1990
|
+
from .vault import VAULT_HEADER
|
|
1991
|
+
from .models import MemoryLayer
|
|
1992
|
+
|
|
1993
|
+
store = ctx.obj.get("store")
|
|
1994
|
+
memories_path = (
|
|
1995
|
+
store.primary.base_path
|
|
1996
|
+
if hasattr(store.primary, "base_path")
|
|
1997
|
+
else (SKMEMORY_HOME / "memories")
|
|
1998
|
+
)
|
|
1999
|
+
|
|
2000
|
+
total = encrypted = 0
|
|
2001
|
+
header_len = len(VAULT_HEADER)
|
|
2002
|
+
for layer in MemoryLayer:
|
|
2003
|
+
layer_dir = memories_path / layer.value
|
|
2004
|
+
if not layer_dir.exists():
|
|
2005
|
+
continue
|
|
2006
|
+
for json_file in layer_dir.glob("*.json"):
|
|
2007
|
+
total += 1
|
|
2008
|
+
try:
|
|
2009
|
+
with json_file.open("rb") as fh:
|
|
2010
|
+
header = fh.read(header_len)
|
|
2011
|
+
if header == VAULT_HEADER:
|
|
2012
|
+
encrypted += 1
|
|
2013
|
+
except OSError:
|
|
2014
|
+
pass
|
|
2015
|
+
|
|
2016
|
+
plaintext = total - encrypted
|
|
2017
|
+
pct = (encrypted / total * 100) if total else 100.0
|
|
2018
|
+
result = {
|
|
2019
|
+
"total": total,
|
|
2020
|
+
"encrypted": encrypted,
|
|
2021
|
+
"plaintext": plaintext,
|
|
2022
|
+
"coverage_pct": round(pct, 1),
|
|
2023
|
+
}
|
|
2024
|
+
|
|
2025
|
+
if as_json:
|
|
2026
|
+
click.echo(json.dumps(result, indent=2))
|
|
2027
|
+
return
|
|
2028
|
+
|
|
2029
|
+
click.echo(f"Vault Status — {memories_path}")
|
|
2030
|
+
click.echo(f" Total files : {total}")
|
|
2031
|
+
click.echo(f" Encrypted : {encrypted}")
|
|
2032
|
+
click.echo(f" Plaintext : {plaintext}")
|
|
2033
|
+
click.echo(f" Coverage : {pct:.1f}%")
|
|
2034
|
+
if total == 0:
|
|
2035
|
+
click.echo("\n (No memory files found.)")
|
|
2036
|
+
elif pct == 100.0:
|
|
2037
|
+
click.echo("\n All memories are encrypted.")
|
|
2038
|
+
elif pct == 0.0:
|
|
2039
|
+
click.echo("\n No memories are encrypted. Run: skmemory vault seal")
|
|
2040
|
+
else:
|
|
2041
|
+
click.echo(f"\n Partial encryption! Run: skmemory vault seal --yes")
|
|
2042
|
+
|
|
2043
|
+
|
|
2044
|
+
@cli.command("register")
|
|
2045
|
+
@click.option(
|
|
2046
|
+
"--workspace",
|
|
2047
|
+
default=None,
|
|
2048
|
+
type=click.Path(),
|
|
2049
|
+
help="Workspace root directory (default: ~/clawd/).",
|
|
2050
|
+
)
|
|
2051
|
+
@click.option("--env", "target_env", default=None, help="Target a specific environment.")
|
|
2052
|
+
@click.option(
|
|
2053
|
+
"--dry-run",
|
|
2054
|
+
is_flag=True,
|
|
2055
|
+
default=False,
|
|
2056
|
+
help="Show what would be done without making changes.",
|
|
2057
|
+
)
|
|
2058
|
+
def register_cmd(workspace, target_env, dry_run):
|
|
2059
|
+
"""Register skmemory skill and MCP server in detected environments.
|
|
2060
|
+
|
|
2061
|
+
Auto-detects development environments (Claude Code, Cursor, VS Code,
|
|
2062
|
+
OpenClaw, OpenCode, mcporter) and ensures skmemory SKILL.md and MCP
|
|
2063
|
+
server entries are properly configured.
|
|
2064
|
+
|
|
2065
|
+
Examples:
|
|
2066
|
+
|
|
2067
|
+
skmemory register # auto-detect and register
|
|
2068
|
+
skmemory register --dry-run # preview what would happen
|
|
2069
|
+
skmemory register --env claude-code # target Claude Code only
|
|
2070
|
+
"""
|
|
2071
|
+
from pathlib import Path as _Path
|
|
2072
|
+
from .register import detect_environments, register_package
|
|
2073
|
+
|
|
2074
|
+
workspace_path = _Path(workspace).expanduser() if workspace else None
|
|
2075
|
+
environments = [target_env] if target_env else None
|
|
2076
|
+
|
|
2077
|
+
detected = detect_environments()
|
|
2078
|
+
click.echo("Detected environments: " + ", ".join(detected) if detected else " (none)")
|
|
2079
|
+
|
|
2080
|
+
if dry_run:
|
|
2081
|
+
click.echo("Dry run — no changes will be made.")
|
|
2082
|
+
|
|
2083
|
+
skill_md = _Path(__file__).parent.parent / "SKILL.md"
|
|
2084
|
+
if not skill_md.exists():
|
|
2085
|
+
skill_md = _Path(__file__).parent / "SKILL.md"
|
|
2086
|
+
|
|
2087
|
+
result = register_package(
|
|
2088
|
+
name="skmemory",
|
|
2089
|
+
skill_md_path=skill_md,
|
|
2090
|
+
mcp_command="skmemory-mcp",
|
|
2091
|
+
mcp_args=[],
|
|
2092
|
+
workspace=workspace_path,
|
|
2093
|
+
environments=environments,
|
|
2094
|
+
dry_run=dry_run,
|
|
2095
|
+
)
|
|
2096
|
+
|
|
2097
|
+
click.echo(f"Skill: {result.get('skill', {}).get('action', '—')}")
|
|
2098
|
+
mcp = result.get("mcp", {})
|
|
2099
|
+
if mcp:
|
|
2100
|
+
for env_name, action in mcp.items():
|
|
2101
|
+
click.echo(f"MCP ({env_name}): {action}")
|
|
2102
|
+
else:
|
|
2103
|
+
click.echo("MCP: no environments matched")
|
|
2104
|
+
|
|
2105
|
+
|
|
2106
|
+
@cli.command("show-context")
|
|
2107
|
+
@click.pass_context
|
|
2108
|
+
@click.option("--agent", default=None, help="Agent name (default: active agent)")
|
|
2109
|
+
def show_context(ctx, agent: Optional[str]):
|
|
2110
|
+
"""Show token-optimized memory context for current session.
|
|
2111
|
+
|
|
2112
|
+
Loads today's memories (full) + yesterday's summaries (brief).
|
|
2113
|
+
Historical memories shown as reference count only.
|
|
2114
|
+
|
|
2115
|
+
Examples:
|
|
2116
|
+
skmemory context
|
|
2117
|
+
skmemory context --agent lumina
|
|
2118
|
+
"""
|
|
2119
|
+
from .context_loader import get_context_for_session
|
|
2120
|
+
|
|
2121
|
+
try:
|
|
2122
|
+
context_str = get_context_for_session(agent)
|
|
2123
|
+
click.echo(context_str)
|
|
2124
|
+
except Exception as e:
|
|
2125
|
+
click.echo(f"Error loading context: {e}", err=True)
|
|
2126
|
+
raise click.Abort()
|
|
2127
|
+
|
|
2128
|
+
|
|
2129
|
+
@cli.command()
|
|
2130
|
+
@click.pass_context
|
|
2131
|
+
@click.argument("query")
|
|
2132
|
+
@click.option("--agent", default=None, help="Agent name (default: active agent)")
|
|
2133
|
+
@click.option("--limit", type=int, default=10, help="Maximum results (default: 10)")
|
|
2134
|
+
def search_deep(ctx, query: str, agent: Optional[str], limit: int):
|
|
2135
|
+
"""Deep search all memory tiers (on demand).
|
|
2136
|
+
|
|
2137
|
+
Searches SQLite + SKVector + SKGraph for matches.
|
|
2138
|
+
Returns full memory details (token-heavy).
|
|
2139
|
+
|
|
2140
|
+
Examples:
|
|
2141
|
+
skmemory search-deep "project gentis"
|
|
2142
|
+
skmemory search-deep "architecture decisions" --limit 20
|
|
2143
|
+
"""
|
|
2144
|
+
from .context_loader import LazyMemoryLoader
|
|
2145
|
+
|
|
2146
|
+
try:
|
|
2147
|
+
loader = LazyMemoryLoader(agent)
|
|
2148
|
+
results = loader.deep_search(query, max_results=limit)
|
|
2149
|
+
|
|
2150
|
+
if not results:
|
|
2151
|
+
click.echo("No memories found.")
|
|
2152
|
+
return
|
|
2153
|
+
|
|
2154
|
+
click.echo(f"Found {len(results)} memories:\n")
|
|
2155
|
+
for i, mem in enumerate(results, 1):
|
|
2156
|
+
layer_icon = {"short-term": "⚡", "mid-term": "📅", "long-term": "🗃️"}.get(
|
|
2157
|
+
mem.get("layer", "short-term"), "•"
|
|
2158
|
+
)
|
|
2159
|
+
click.echo(f"{i}. {layer_icon} {mem.get('title', 'Untitled')}")
|
|
2160
|
+
click.echo(f" {mem.get('content', '')[:200]}...")
|
|
2161
|
+
click.echo(
|
|
2162
|
+
f" Layer: {mem.get('layer', 'unknown')} | "
|
|
2163
|
+
f"Date: {mem.get('created_at', 'unknown')}"
|
|
2164
|
+
)
|
|
2165
|
+
if mem.get("tags"):
|
|
2166
|
+
click.echo(f" Tags: {', '.join(mem.get('tags', []))}")
|
|
2167
|
+
click.echo()
|
|
2168
|
+
|
|
2169
|
+
except Exception as e:
|
|
2170
|
+
click.echo(f"Error searching: {e}", err=True)
|
|
2171
|
+
raise click.Abort()
|
|
2172
|
+
|
|
2173
|
+
|
|
2174
|
+
@cli.command()
|
|
2175
|
+
@click.argument("memory_id")
|
|
2176
|
+
@click.argument("to_layer", type=click.Choice(["short-term", "mid-term", "long-term"]))
|
|
2177
|
+
@click.option("--agent", default=None, help="Agent name (default: active agent)")
|
|
2178
|
+
def promote(ctx, memory_id: str, to_layer: str, agent: Optional[str]):
|
|
2179
|
+
"""Promote memory to different tier and generate summary.
|
|
2180
|
+
|
|
2181
|
+
Moves memory between short/medium/long term and auto-generates
|
|
2182
|
+
a summary if promoting to medium or long term.
|
|
2183
|
+
|
|
2184
|
+
Examples:
|
|
2185
|
+
skmemory promote abc123 mid-term
|
|
2186
|
+
skmemory promote def456 long-term --agent lumina
|
|
2187
|
+
"""
|
|
2188
|
+
from .context_loader import LazyMemoryLoader
|
|
2189
|
+
|
|
2190
|
+
try:
|
|
2191
|
+
loader = LazyMemoryLoader(agent)
|
|
2192
|
+
success = loader.promote_memory(memory_id, to_layer)
|
|
2193
|
+
|
|
2194
|
+
if success:
|
|
2195
|
+
click.echo(f"✓ Promoted {memory_id} to {to_layer}")
|
|
2196
|
+
if to_layer in ("mid-term", "long-term"):
|
|
2197
|
+
click.echo(" Summary generated automatically.")
|
|
2198
|
+
else:
|
|
2199
|
+
click.echo(f"✗ Failed to promote {memory_id}", err=True)
|
|
2200
|
+
raise click.Abort()
|
|
2201
|
+
|
|
2202
|
+
except Exception as e:
|
|
2203
|
+
click.echo(f"Error promoting memory: {e}", err=True)
|
|
2204
|
+
raise click.Abort()
|
|
2205
|
+
|
|
2206
|
+
|
|
998
2207
|
def main() -> None:
|
|
999
2208
|
"""Entry point for the CLI."""
|
|
1000
2209
|
cli()
|