skillmind 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- skillmind/__init__.py +3 -0
- skillmind/cli/__init__.py +1 -0
- skillmind/cli/main.py +457 -0
- skillmind/config.py +131 -0
- skillmind/context.py +218 -0
- skillmind/embeddings.py +126 -0
- skillmind/listener.py +250 -0
- skillmind/mcp/__init__.py +1 -0
- skillmind/mcp/server.py +429 -0
- skillmind/migration.py +201 -0
- skillmind/models.py +112 -0
- skillmind/sanitizer.py +233 -0
- skillmind/setup.py +380 -0
- skillmind/store/__init__.py +45 -0
- skillmind/store/base.py +148 -0
- skillmind/store/chroma_store.py +184 -0
- skillmind/store/faiss_store.py +286 -0
- skillmind/store/pinecone_store.py +204 -0
- skillmind/store/qdrant_store.py +256 -0
- skillmind/store/supabase_store.py +267 -0
- skillmind/trainer.py +283 -0
- skillmind/video/__init__.py +1 -0
- skillmind/video/screen_recorder.py +207 -0
- skillmind/video/video_learner.py +454 -0
- skillmind/video/youtube_learner.py +459 -0
- skillmind-0.1.0.dist-info/METADATA +300 -0
- skillmind-0.1.0.dist-info/RECORD +30 -0
- skillmind-0.1.0.dist-info/WHEEL +4 -0
- skillmind-0.1.0.dist-info/entry_points.txt +2 -0
- skillmind-0.1.0.dist-info/licenses/LICENSE +21 -0
skillmind/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""SkillMind CLI."""
|
skillmind/cli/main.py
ADDED
|
@@ -0,0 +1,457 @@
|
|
|
1
|
+
"""
|
|
2
|
+
SkillMind CLI — command-line interface for memory management.
|
|
3
|
+
|
|
4
|
+
Usage:
|
|
5
|
+
skillmind init # Initialize SkillMind in current project
|
|
6
|
+
skillmind remember "content" # Store a memory
|
|
7
|
+
skillmind recall "query" # Semantic search
|
|
8
|
+
skillmind list # List all memories
|
|
9
|
+
skillmind forget <id> # Delete a memory
|
|
10
|
+
skillmind import # Import from Claude Code markdown files
|
|
11
|
+
skillmind consolidate # Cleanup and merge duplicates
|
|
12
|
+
skillmind context # Generate context for current situation
|
|
13
|
+
skillmind stats # Show store statistics
|
|
14
|
+
skillmind serve # Start MCP server
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
from __future__ import annotations
|
|
18
|
+
|
|
19
|
+
import json
|
|
20
|
+
import sys
|
|
21
|
+
from pathlib import Path
|
|
22
|
+
|
|
23
|
+
import click
|
|
24
|
+
from rich.console import Console
|
|
25
|
+
from rich.table import Table
|
|
26
|
+
|
|
27
|
+
from ..config import SkillMindConfig
|
|
28
|
+
from ..embeddings import EmbeddingEngine
|
|
29
|
+
from ..models import MemorySource, MemoryType, QueryFilter
|
|
30
|
+
from ..store import create_store
|
|
31
|
+
from ..trainer import Trainer
|
|
32
|
+
|
|
33
|
+
console = Console()
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def _get_components(config_path: str | None = None):
|
|
37
|
+
"""Initialize all SkillMind components."""
|
|
38
|
+
config = SkillMindConfig.load(config_path).resolve_env()
|
|
39
|
+
engine = EmbeddingEngine(config.embedding)
|
|
40
|
+
store = create_store(config, engine)
|
|
41
|
+
store.initialize()
|
|
42
|
+
trainer = Trainer(store)
|
|
43
|
+
return config, engine, store, trainer
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
@click.group()
|
|
47
|
+
@click.option("--config", "-c", default=None, help="Path to config.yml")
|
|
48
|
+
@click.pass_context
|
|
49
|
+
def cli(ctx: click.Context, config: str | None) -> None:
|
|
50
|
+
"""SkillMind — Active memory & skill layer for AI coding assistants."""
|
|
51
|
+
ctx.ensure_object(dict)
|
|
52
|
+
ctx.obj["config_path"] = config
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
@cli.command()
|
|
56
|
+
@click.option("--backend", "-b", default="chroma", type=click.Choice(["chroma", "pinecone", "supabase", "qdrant", "faiss"]))
|
|
57
|
+
@click.option("--data-dir", "-d", default=".skillmind")
|
|
58
|
+
def init(backend: str, data_dir: str) -> None:
|
|
59
|
+
"""Initialize SkillMind in the current project."""
|
|
60
|
+
config = SkillMindConfig(
|
|
61
|
+
data_dir=data_dir,
|
|
62
|
+
store={"backend": backend},
|
|
63
|
+
)
|
|
64
|
+
config.save()
|
|
65
|
+
|
|
66
|
+
# Create directory structure
|
|
67
|
+
data_path = Path(data_dir)
|
|
68
|
+
data_path.mkdir(parents=True, exist_ok=True)
|
|
69
|
+
(data_path / "exports").mkdir(exist_ok=True)
|
|
70
|
+
|
|
71
|
+
console.print(f"[green]Initialized SkillMind with {backend} backend[/green]")
|
|
72
|
+
console.print(f" Config: {data_dir}/config.yml")
|
|
73
|
+
console.print(f" Data: {data_dir}/")
|
|
74
|
+
console.print()
|
|
75
|
+
console.print("[dim]Next steps:[/dim]")
|
|
76
|
+
console.print(" skillmind import # Import existing Claude Code memories")
|
|
77
|
+
console.print(" skillmind remember '...' # Store a new memory")
|
|
78
|
+
console.print(" skillmind recall '...' # Search memories")
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
@cli.command()
|
|
82
|
+
@click.argument("content")
|
|
83
|
+
@click.option("--title", "-t", default="", help="Short title")
|
|
84
|
+
@click.option("--type", "-T", "mem_type", default="", type=click.Choice(["", "user", "feedback", "project", "reference", "skill"]))
|
|
85
|
+
@click.option("--topic", default="", help="Primary topic tag")
|
|
86
|
+
@click.option("--tags", default="", help="Comma-separated tags")
|
|
87
|
+
@click.pass_context
|
|
88
|
+
def remember(ctx: click.Context, content: str, title: str, mem_type: str, topic: str, tags: str) -> None:
|
|
89
|
+
"""Store a new memory."""
|
|
90
|
+
_, _, store, trainer = _get_components(ctx.obj.get("config_path"))
|
|
91
|
+
|
|
92
|
+
tag_list = [t.strip() for t in tags.split(",") if t.strip()] if tags else None
|
|
93
|
+
force_type = MemoryType(mem_type) if mem_type else None
|
|
94
|
+
|
|
95
|
+
memory = trainer.learn(
|
|
96
|
+
content=content,
|
|
97
|
+
title=title or None,
|
|
98
|
+
source=MemorySource.MANUAL,
|
|
99
|
+
force_type=force_type,
|
|
100
|
+
force_topic=topic or None,
|
|
101
|
+
tags=tag_list,
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
if memory:
|
|
105
|
+
console.print(f"[green]Stored:[/green] {memory.title}")
|
|
106
|
+
console.print(f" ID: {memory.id}")
|
|
107
|
+
console.print(f" Type: {memory.type.value}")
|
|
108
|
+
console.print(f" Topic: {memory.topic}")
|
|
109
|
+
else:
|
|
110
|
+
console.print("[yellow]Skipped: similar memory already exists[/yellow]")
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
@cli.command()
|
|
114
|
+
@click.argument("query")
|
|
115
|
+
@click.option("--limit", "-n", default=5, help="Max results")
|
|
116
|
+
@click.option("--type", "-T", "mem_type", default="")
|
|
117
|
+
@click.option("--topic", default="")
|
|
118
|
+
@click.pass_context
|
|
119
|
+
def recall(ctx: click.Context, query: str, limit: int, mem_type: str, topic: str) -> None:
|
|
120
|
+
"""Semantic search across all memories."""
|
|
121
|
+
_, _, store, _ = _get_components(ctx.obj.get("config_path"))
|
|
122
|
+
|
|
123
|
+
qf = QueryFilter()
|
|
124
|
+
if mem_type:
|
|
125
|
+
qf.types = [MemoryType(mem_type)]
|
|
126
|
+
if topic:
|
|
127
|
+
qf.topics = [topic]
|
|
128
|
+
|
|
129
|
+
results = store.query(query, limit=limit, filter=qf)
|
|
130
|
+
|
|
131
|
+
if not results:
|
|
132
|
+
console.print("[dim]No matching memories found.[/dim]")
|
|
133
|
+
return
|
|
134
|
+
|
|
135
|
+
table = Table(title=f"Results for: {query}")
|
|
136
|
+
table.add_column("Score", style="cyan", width=6)
|
|
137
|
+
table.add_column("Type", style="magenta", width=10)
|
|
138
|
+
table.add_column("Topic", style="green", width=15)
|
|
139
|
+
table.add_column("Title", width=40)
|
|
140
|
+
table.add_column("ID", style="dim", width=12)
|
|
141
|
+
|
|
142
|
+
for r in results:
|
|
143
|
+
table.add_row(
|
|
144
|
+
f"{r.score:.2f}",
|
|
145
|
+
r.memory.type.value,
|
|
146
|
+
r.memory.topic,
|
|
147
|
+
r.memory.title[:40],
|
|
148
|
+
r.memory.id[:12],
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
console.print(table)
|
|
152
|
+
|
|
153
|
+
# Show full content of top result
|
|
154
|
+
if results:
|
|
155
|
+
top = results[0]
|
|
156
|
+
console.print(f"\n[bold]Top result:[/bold] {top.memory.title}")
|
|
157
|
+
console.print(top.memory.content)
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
@cli.command("list")
|
|
161
|
+
@click.option("--type", "-T", "mem_type", default="")
|
|
162
|
+
@click.option("--topic", default="")
|
|
163
|
+
@click.option("--limit", "-n", default=20)
|
|
164
|
+
@click.pass_context
|
|
165
|
+
def list_memories(ctx: click.Context, mem_type: str, topic: str, limit: int) -> None:
|
|
166
|
+
"""List all memories with optional filtering."""
|
|
167
|
+
_, _, store, _ = _get_components(ctx.obj.get("config_path"))
|
|
168
|
+
|
|
169
|
+
qf = QueryFilter()
|
|
170
|
+
if mem_type:
|
|
171
|
+
qf.types = [MemoryType(mem_type)]
|
|
172
|
+
if topic:
|
|
173
|
+
qf.topics = [topic]
|
|
174
|
+
|
|
175
|
+
memories = store.list_all(filter=qf, limit=limit)
|
|
176
|
+
|
|
177
|
+
table = Table(title=f"Memories ({len(memories)})")
|
|
178
|
+
table.add_column("Type", style="magenta", width=10)
|
|
179
|
+
table.add_column("Topic", style="green", width=15)
|
|
180
|
+
table.add_column("Title", width=45)
|
|
181
|
+
table.add_column("Confidence", style="cyan", width=10)
|
|
182
|
+
table.add_column("ID", style="dim", width=12)
|
|
183
|
+
|
|
184
|
+
for m in memories:
|
|
185
|
+
table.add_row(
|
|
186
|
+
m.type.value,
|
|
187
|
+
m.topic,
|
|
188
|
+
m.title[:45],
|
|
189
|
+
f"{m.confidence:.2f}",
|
|
190
|
+
m.id[:12],
|
|
191
|
+
)
|
|
192
|
+
|
|
193
|
+
console.print(table)
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
@cli.command()
|
|
197
|
+
@click.argument("memory_id")
|
|
198
|
+
@click.pass_context
|
|
199
|
+
def forget(ctx: click.Context, memory_id: str) -> None:
|
|
200
|
+
"""Delete a memory by ID."""
|
|
201
|
+
_, _, store, _ = _get_components(ctx.obj.get("config_path"))
|
|
202
|
+
|
|
203
|
+
# Support partial ID matching
|
|
204
|
+
if len(memory_id) < 36:
|
|
205
|
+
all_mems = store.list_all(limit=10000)
|
|
206
|
+
matches = [m for m in all_mems if m.id.startswith(memory_id)]
|
|
207
|
+
if len(matches) == 0:
|
|
208
|
+
console.print(f"[red]No memory found starting with {memory_id}[/red]")
|
|
209
|
+
return
|
|
210
|
+
if len(matches) > 1:
|
|
211
|
+
console.print(f"[yellow]Ambiguous ID, {len(matches)} matches. Be more specific.[/yellow]")
|
|
212
|
+
return
|
|
213
|
+
memory_id = matches[0].id
|
|
214
|
+
|
|
215
|
+
success = store.delete(memory_id)
|
|
216
|
+
if success:
|
|
217
|
+
console.print(f"[green]Deleted memory {memory_id}[/green]")
|
|
218
|
+
else:
|
|
219
|
+
console.print(f"[red]Memory not found: {memory_id}[/red]")
|
|
220
|
+
|
|
221
|
+
|
|
222
|
+
@cli.command("import")
|
|
223
|
+
@click.option("--source", "-s", default="", help="Path to memory directory")
|
|
224
|
+
@click.option("--dry-run", is_flag=True, help="Preview without importing")
|
|
225
|
+
@click.pass_context
|
|
226
|
+
def import_memories(ctx: click.Context, source: str, dry_run: bool) -> None:
|
|
227
|
+
"""Import existing Claude Code markdown memories into SkillMind."""
|
|
228
|
+
from ..migration import migrate_memories
|
|
229
|
+
|
|
230
|
+
_, _, store, trainer = _get_components(ctx.obj.get("config_path"))
|
|
231
|
+
|
|
232
|
+
console.print(f"[bold]{'DRY RUN — ' if dry_run else ''}Importing memories...[/bold]")
|
|
233
|
+
|
|
234
|
+
stats = migrate_memories(
|
|
235
|
+
trainer=trainer,
|
|
236
|
+
source_dir=source or None,
|
|
237
|
+
dry_run=dry_run,
|
|
238
|
+
)
|
|
239
|
+
|
|
240
|
+
console.print(f" Files found: {stats['files_found']}")
|
|
241
|
+
console.print(f" [green]Imported: {stats['imported']}[/green]")
|
|
242
|
+
console.print(f" [yellow]Skipped (dupes): {stats['skipped_duplicate']}[/yellow]")
|
|
243
|
+
console.print(f" [red]Skipped (error): {stats['skipped_error']}[/red]")
|
|
244
|
+
|
|
245
|
+
if dry_run and stats["memories"]:
|
|
246
|
+
console.print("\n[bold]Would import:[/bold]")
|
|
247
|
+
for m in stats["memories"]:
|
|
248
|
+
console.print(f" [{m.get('type', '?')}] {m.get('name', '?')}: {m.get('content_preview', '')[:60]}")
|
|
249
|
+
console.print("\n[dim]Run without --dry-run to actually import.[/dim]")
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
@cli.command()
|
|
253
|
+
@click.pass_context
|
|
254
|
+
def consolidate(ctx: click.Context) -> None:
|
|
255
|
+
"""Cleanup: merge duplicates, expire stale memories."""
|
|
256
|
+
_, _, store, trainer = _get_components(ctx.obj.get("config_path"))
|
|
257
|
+
|
|
258
|
+
console.print("[bold]Running consolidation...[/bold]")
|
|
259
|
+
stats = trainer.consolidate()
|
|
260
|
+
|
|
261
|
+
console.print(f" [green]Merged: {stats['merged']}[/green]")
|
|
262
|
+
console.print(f" [yellow]Expired: {stats['expired']}[/yellow]")
|
|
263
|
+
console.print(f" [cyan]Updated: {stats['updated']}[/cyan]")
|
|
264
|
+
|
|
265
|
+
|
|
266
|
+
@cli.command()
|
|
267
|
+
@click.option("--file", "-f", default="", help="Current file path")
|
|
268
|
+
@click.option("--topic", "-t", default="", help="Current topic")
|
|
269
|
+
@click.option("--query", "-q", default="", help="Direct query")
|
|
270
|
+
@click.option("--output", "-o", default="", help="Write to file instead of stdout")
|
|
271
|
+
@click.pass_context
|
|
272
|
+
def context(ctx: click.Context, file: str, topic: str, query: str, output: str) -> None:
|
|
273
|
+
"""Generate focused context for current situation."""
|
|
274
|
+
from ..context import ContextGenerator
|
|
275
|
+
|
|
276
|
+
_, _, store, _ = _get_components(ctx.obj.get("config_path"))
|
|
277
|
+
gen = ContextGenerator(store)
|
|
278
|
+
|
|
279
|
+
ctx_text = gen.generate(
|
|
280
|
+
current_file=file or None,
|
|
281
|
+
current_topic=topic or None,
|
|
282
|
+
query=query or None,
|
|
283
|
+
)
|
|
284
|
+
|
|
285
|
+
if output:
|
|
286
|
+
Path(output).write_text(ctx_text, encoding="utf-8")
|
|
287
|
+
console.print(f"[green]Context written to {output}[/green]")
|
|
288
|
+
else:
|
|
289
|
+
console.print(ctx_text)
|
|
290
|
+
|
|
291
|
+
|
|
292
|
+
@cli.command()
|
|
293
|
+
@click.pass_context
|
|
294
|
+
def stats(ctx: click.Context) -> None:
|
|
295
|
+
"""Show memory store statistics."""
|
|
296
|
+
config, _, store, _ = _get_components(ctx.obj.get("config_path"))
|
|
297
|
+
|
|
298
|
+
total = store.count()
|
|
299
|
+
|
|
300
|
+
table = Table(title="SkillMind Statistics")
|
|
301
|
+
table.add_column("Metric", style="bold")
|
|
302
|
+
table.add_column("Value", style="cyan")
|
|
303
|
+
|
|
304
|
+
table.add_row("Backend", config.store.backend)
|
|
305
|
+
table.add_row("Embedding model", config.embedding.model)
|
|
306
|
+
table.add_row("Total memories", str(total))
|
|
307
|
+
|
|
308
|
+
for mt in MemoryType:
|
|
309
|
+
n = store.count(filter=QueryFilter(types=[mt]))
|
|
310
|
+
table.add_row(f" {mt.value}", str(n))
|
|
311
|
+
|
|
312
|
+
console.print(table)
|
|
313
|
+
|
|
314
|
+
|
|
315
|
+
@cli.command()
|
|
316
|
+
@click.option("--transport", "-t", default="stdio", type=click.Choice(["stdio", "http"]))
|
|
317
|
+
@click.option("--port", "-p", default=8765, type=int)
|
|
318
|
+
def serve(transport: str, port: int) -> None:
|
|
319
|
+
"""Start the SkillMind MCP server."""
|
|
320
|
+
from ..mcp.server import create_server
|
|
321
|
+
|
|
322
|
+
server = create_server()
|
|
323
|
+
|
|
324
|
+
console.print(f"[bold green]Starting SkillMind MCP server ({transport})...[/bold green]")
|
|
325
|
+
|
|
326
|
+
if transport == "stdio":
|
|
327
|
+
server.run()
|
|
328
|
+
else:
|
|
329
|
+
server.run(transport="streamable-http", host="0.0.0.0", port=port)
|
|
330
|
+
|
|
331
|
+
|
|
332
|
+
# ── Video & YouTube Commands ──────────────────────────────────
|
|
333
|
+
|
|
334
|
+
|
|
335
|
+
@cli.command("learn-youtube")
|
|
336
|
+
@click.argument("video_url")
|
|
337
|
+
@click.option("--topic", default="", help="Override topic")
|
|
338
|
+
@click.option("--tags", default="", help="Comma-separated tags")
|
|
339
|
+
@click.pass_context
|
|
340
|
+
def learn_youtube(ctx: click.Context, video_url: str, topic: str, tags: str) -> None:
|
|
341
|
+
"""Learn knowledge from a YouTube video."""
|
|
342
|
+
from ..video.youtube_learner import YouTubeLearner
|
|
343
|
+
|
|
344
|
+
_, _, store, trainer = _get_components(ctx.obj.get("config_path"))
|
|
345
|
+
yt = YouTubeLearner(trainer=trainer)
|
|
346
|
+
|
|
347
|
+
tag_list = [t.strip() for t in tags.split(",") if t.strip()] if tags else None
|
|
348
|
+
console.print(f"[bold]Learning from YouTube: {video_url}[/bold]")
|
|
349
|
+
|
|
350
|
+
memories = yt.learn(video_url, force_topic=topic or None, tags=tag_list)
|
|
351
|
+
|
|
352
|
+
console.print(f"[green]{len(memories)} memories created:[/green]")
|
|
353
|
+
for m in memories:
|
|
354
|
+
console.print(f" [{m.type.value}] {m.title}")
|
|
355
|
+
|
|
356
|
+
|
|
357
|
+
@cli.command("learn-channel")
|
|
358
|
+
@click.argument("channel_id")
|
|
359
|
+
@click.option("--max", "-m", "max_videos", default=5, help="Max videos to process")
|
|
360
|
+
@click.option("--topic", default="", help="Override topic")
|
|
361
|
+
@click.pass_context
|
|
362
|
+
def learn_channel(ctx: click.Context, channel_id: str, max_videos: int, topic: str) -> None:
|
|
363
|
+
"""Learn from latest videos of a YouTube channel."""
|
|
364
|
+
from ..video.youtube_learner import YouTubeLearner
|
|
365
|
+
|
|
366
|
+
_, _, store, trainer = _get_components(ctx.obj.get("config_path"))
|
|
367
|
+
yt = YouTubeLearner(trainer=trainer)
|
|
368
|
+
|
|
369
|
+
console.print(f"[bold]Learning from channel: {channel_id} (max {max_videos} videos)[/bold]")
|
|
370
|
+
memories = yt.learn_channel(channel_id, max_videos, force_topic=topic or None)
|
|
371
|
+
console.print(f"[green]{len(memories)} memories created[/green]")
|
|
372
|
+
|
|
373
|
+
|
|
374
|
+
@cli.command("learn-video")
|
|
375
|
+
@click.argument("video_path")
|
|
376
|
+
@click.option("--topic", default="", help="Override topic")
|
|
377
|
+
@click.option("--tags", default="", help="Comma-separated tags")
|
|
378
|
+
@click.option("--audio", is_flag=True, help="Also transcribe audio via Whisper")
|
|
379
|
+
@click.pass_context
|
|
380
|
+
def learn_video(ctx: click.Context, video_path: str, topic: str, tags: str, audio: bool) -> None:
|
|
381
|
+
"""Learn from a local video file or screen recording."""
|
|
382
|
+
from ..video.video_learner import VideoLearner
|
|
383
|
+
|
|
384
|
+
_, _, store, trainer = _get_components(ctx.obj.get("config_path"))
|
|
385
|
+
vl = VideoLearner(trainer=trainer)
|
|
386
|
+
|
|
387
|
+
tag_list = [t.strip() for t in tags.split(",") if t.strip()] if tags else None
|
|
388
|
+
console.print(f"[bold]Learning from video: {video_path}[/bold]")
|
|
389
|
+
|
|
390
|
+
memories = vl.learn(video_path, force_topic=topic or None, tags=tag_list, extract_audio=audio)
|
|
391
|
+
console.print(f"[green]{len(memories)} memories created[/green]")
|
|
392
|
+
|
|
393
|
+
|
|
394
|
+
@cli.command("record")
|
|
395
|
+
@click.option("--duration", "-d", default=30, help="Duration in seconds")
|
|
396
|
+
@click.option("--fps", default=15, help="Frames per second")
|
|
397
|
+
@click.option("--output", "-o", default="", help="Output filename")
|
|
398
|
+
@click.pass_context
|
|
399
|
+
def record_screen(ctx: click.Context, duration: int, fps: int, output: str) -> None:
|
|
400
|
+
"""Record the screen."""
|
|
401
|
+
from ..video.screen_recorder import ScreenRecorder
|
|
402
|
+
|
|
403
|
+
config, _, _, _ = _get_components(ctx.obj.get("config_path"))
|
|
404
|
+
recorder = ScreenRecorder(output_dir=f"{config.data_dir}/recordings")
|
|
405
|
+
|
|
406
|
+
console.print(f"[bold]Recording screen for {duration}s at {fps} FPS...[/bold]")
|
|
407
|
+
path = recorder.record(duration=duration, fps=fps, output=output or None)
|
|
408
|
+
console.print(f"[green]Saved: {path}[/green]")
|
|
409
|
+
|
|
410
|
+
|
|
411
|
+
@cli.command("screenshot")
|
|
412
|
+
@click.option("--output", "-o", default="", help="Output filename")
|
|
413
|
+
@click.pass_context
|
|
414
|
+
def take_screenshot(ctx: click.Context, output: str) -> None:
|
|
415
|
+
"""Take a screenshot."""
|
|
416
|
+
from ..video.screen_recorder import ScreenRecorder
|
|
417
|
+
|
|
418
|
+
config, _, _, _ = _get_components(ctx.obj.get("config_path"))
|
|
419
|
+
recorder = ScreenRecorder(output_dir=f"{config.data_dir}/recordings")
|
|
420
|
+
|
|
421
|
+
path = recorder.screenshot(output=output or None)
|
|
422
|
+
console.print(f"[green]Saved: {path}[/green]")
|
|
423
|
+
|
|
424
|
+
|
|
425
|
+
# ── Setup Command ─────────────────────────────────────────────
|
|
426
|
+
|
|
427
|
+
|
|
428
|
+
@cli.command("setup")
|
|
429
|
+
@click.option("--backend", "-b", default="chroma", type=click.Choice(["chroma", "pinecone", "supabase", "qdrant", "faiss"]))
|
|
430
|
+
@click.option("--scan-dir", "-s", multiple=True, help="Additional directories to scan")
|
|
431
|
+
@click.option("--dry-run", is_flag=True, help="Preview without importing")
|
|
432
|
+
@click.pass_context
|
|
433
|
+
def setup(ctx: click.Context, backend: str, scan_dir: tuple, dry_run: bool) -> None:
|
|
434
|
+
"""Full setup: discover, import, and structure ALL existing knowledge."""
|
|
435
|
+
from ..setup import run_setup
|
|
436
|
+
|
|
437
|
+
config_path = ctx.obj.get("config_path")
|
|
438
|
+
data_dir = ".skillmind"
|
|
439
|
+
if config_path:
|
|
440
|
+
cfg = SkillMindConfig.load(config_path)
|
|
441
|
+
data_dir = cfg.data_dir
|
|
442
|
+
|
|
443
|
+
console.print("[bold]SkillMind Setup[/bold]")
|
|
444
|
+
console.print("=" * 50)
|
|
445
|
+
|
|
446
|
+
stats = run_setup(
|
|
447
|
+
backend=backend,
|
|
448
|
+
data_dir=data_dir,
|
|
449
|
+
scan_dirs=list(scan_dir) if scan_dir else None,
|
|
450
|
+
dry_run=dry_run,
|
|
451
|
+
)
|
|
452
|
+
|
|
453
|
+
console.print(f"\n[bold green]Setup complete! {stats['total_imported']} items imported.[/bold green]")
|
|
454
|
+
|
|
455
|
+
|
|
456
|
+
if __name__ == "__main__":
|
|
457
|
+
cli()
|
skillmind/config.py
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
1
|
+
"""Configuration management for SkillMind."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import os
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Any
|
|
8
|
+
|
|
9
|
+
import yaml
|
|
10
|
+
from pydantic import BaseModel, Field
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class EmbeddingConfig(BaseModel):
|
|
14
|
+
"""Embedding model configuration."""
|
|
15
|
+
|
|
16
|
+
provider: str = Field(default="sentence-transformers", description="sentence-transformers | openai")
|
|
17
|
+
model: str = Field(default="all-MiniLM-L6-v2", description="Model name")
|
|
18
|
+
dimension: int = Field(default=384, description="Embedding dimension")
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class StoreConfig(BaseModel):
|
|
22
|
+
"""Vector store backend configuration."""
|
|
23
|
+
|
|
24
|
+
backend: str = Field(default="chroma", description="chroma | pinecone | supabase | qdrant | faiss")
|
|
25
|
+
|
|
26
|
+
# Chroma
|
|
27
|
+
chroma_path: str = Field(default=".skillmind/chroma", description="ChromaDB persistence path")
|
|
28
|
+
|
|
29
|
+
# Pinecone
|
|
30
|
+
pinecone_api_key: str = Field(default="", description="Pinecone API key")
|
|
31
|
+
pinecone_index: str = Field(default="skillmind", description="Pinecone index name")
|
|
32
|
+
pinecone_environment: str = Field(default="", description="Pinecone environment")
|
|
33
|
+
|
|
34
|
+
# Supabase
|
|
35
|
+
supabase_url: str = Field(default="", description="Supabase project URL")
|
|
36
|
+
supabase_key: str = Field(default="", description="Supabase anon/service key")
|
|
37
|
+
supabase_table: str = Field(default="memories", description="Table name")
|
|
38
|
+
|
|
39
|
+
# Qdrant
|
|
40
|
+
qdrant_url: str = Field(default="http://localhost:6333", description="Qdrant server URL")
|
|
41
|
+
qdrant_api_key: str = Field(default="", description="Qdrant API key")
|
|
42
|
+
qdrant_collection: str = Field(default="skillmind", description="Collection name")
|
|
43
|
+
|
|
44
|
+
# FAISS
|
|
45
|
+
faiss_path: str = Field(default=".skillmind/faiss", description="FAISS index path")
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
class ListenerConfig(BaseModel):
|
|
49
|
+
"""Listener configuration."""
|
|
50
|
+
|
|
51
|
+
watch_git: bool = Field(default=True, description="Watch git events")
|
|
52
|
+
watch_files: bool = Field(default=True, description="Watch file changes")
|
|
53
|
+
auto_learn: bool = Field(default=True, description="Auto-extract memories from conversations")
|
|
54
|
+
consolidation_interval: int = Field(default=86400, description="Consolidation interval in seconds")
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
class SanitizerConfig(BaseModel):
|
|
58
|
+
"""Sanitizer / anonymization configuration."""
|
|
59
|
+
|
|
60
|
+
enabled: bool = Field(default=True, description="Enable automatic sanitization of sensitive data")
|
|
61
|
+
redact_api_keys: bool = Field(default=True, description="Redact API keys, tokens, secrets")
|
|
62
|
+
redact_personal: bool = Field(default=True, description="Redact emails, phones, IBANs")
|
|
63
|
+
redact_paths: bool = Field(default=True, description="Redact env vars, private keys")
|
|
64
|
+
redact_names: list[str] = Field(default_factory=list, description="Specific names to anonymize")
|
|
65
|
+
allowlist: list[str] = Field(default_factory=list, description="Patterns to never redact")
|
|
66
|
+
custom_patterns: list[list[str]] = Field(default_factory=list, description="Extra [pattern, label] pairs")
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
class SkillMindConfig(BaseModel):
|
|
70
|
+
"""Root configuration."""
|
|
71
|
+
|
|
72
|
+
version: str = "1.0"
|
|
73
|
+
project_name: str = "default"
|
|
74
|
+
data_dir: str = ".skillmind"
|
|
75
|
+
embedding: EmbeddingConfig = Field(default_factory=EmbeddingConfig)
|
|
76
|
+
store: StoreConfig = Field(default_factory=StoreConfig)
|
|
77
|
+
sanitizer: SanitizerConfig = Field(default_factory=SanitizerConfig)
|
|
78
|
+
listener: ListenerConfig = Field(default_factory=ListenerConfig)
|
|
79
|
+
context_max_tokens: int = Field(default=4000, description="Max tokens for context injection")
|
|
80
|
+
|
|
81
|
+
@classmethod
|
|
82
|
+
def load(cls, path: str | Path | None = None) -> SkillMindConfig:
|
|
83
|
+
"""Load config from YAML file, falling back to defaults. Also loads .env."""
|
|
84
|
+
# Load .env if present
|
|
85
|
+
try:
|
|
86
|
+
from dotenv import load_dotenv
|
|
87
|
+
load_dotenv()
|
|
88
|
+
except ImportError:
|
|
89
|
+
pass
|
|
90
|
+
|
|
91
|
+
if path is None:
|
|
92
|
+
path = Path(".skillmind/config.yml")
|
|
93
|
+
path = Path(path)
|
|
94
|
+
|
|
95
|
+
if path.exists():
|
|
96
|
+
with open(path) as f:
|
|
97
|
+
data = yaml.safe_load(f) or {}
|
|
98
|
+
return cls(**data)
|
|
99
|
+
|
|
100
|
+
return cls()
|
|
101
|
+
|
|
102
|
+
def save(self, path: str | Path | None = None) -> None:
|
|
103
|
+
"""Save config to YAML."""
|
|
104
|
+
if path is None:
|
|
105
|
+
path = Path(f"{self.data_dir}/config.yml")
|
|
106
|
+
path = Path(path)
|
|
107
|
+
path.parent.mkdir(parents=True, exist_ok=True)
|
|
108
|
+
|
|
109
|
+
with open(path, "w") as f:
|
|
110
|
+
yaml.dump(self.model_dump(), f, default_flow_style=False, sort_keys=False)
|
|
111
|
+
|
|
112
|
+
def resolve_env(self) -> SkillMindConfig:
|
|
113
|
+
"""Override config values from environment variables."""
|
|
114
|
+
env_map = {
|
|
115
|
+
"SKILLMIND_BACKEND": "store.backend",
|
|
116
|
+
"PINECONE_API_KEY": "store.pinecone_api_key",
|
|
117
|
+
"PINECONE_ENVIRONMENT": "store.pinecone_environment",
|
|
118
|
+
"SUPABASE_URL": "store.supabase_url",
|
|
119
|
+
"SUPABASE_KEY": "store.supabase_key",
|
|
120
|
+
"QDRANT_URL": "store.qdrant_url",
|
|
121
|
+
"QDRANT_API_KEY": "store.qdrant_api_key",
|
|
122
|
+
}
|
|
123
|
+
for env_var, config_path in env_map.items():
|
|
124
|
+
val = os.environ.get(env_var)
|
|
125
|
+
if val:
|
|
126
|
+
parts = config_path.split(".")
|
|
127
|
+
obj: Any = self
|
|
128
|
+
for part in parts[:-1]:
|
|
129
|
+
obj = getattr(obj, part)
|
|
130
|
+
setattr(obj, parts[-1], val)
|
|
131
|
+
return self
|