superlocalmemory 2.4.2 → 2.5.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +62 -0
- package/README.md +62 -2
- package/docs/ARCHITECTURE-V2.5.md +190 -0
- package/docs/architecture-diagram.drawio +405 -0
- package/mcp_server.py +115 -14
- package/package.json +4 -1
- package/scripts/generate-thumbnails.py +220 -0
- package/src/agent_registry.py +385 -0
- package/src/db_connection_manager.py +532 -0
- package/src/event_bus.py +555 -0
- package/src/memory_store_v2.py +626 -471
- package/src/provenance_tracker.py +322 -0
- package/src/subscription_manager.py +399 -0
- package/src/trust_scorer.py +456 -0
- package/src/webhook_dispatcher.py +229 -0
- package/ui/app.js +425 -0
- package/ui/index.html +147 -1
- package/ui/js/agents.js +192 -0
- package/ui/js/clusters.js +80 -0
- package/ui/js/core.js +230 -0
- package/ui/js/events.js +178 -0
- package/ui/js/graph.js +32 -0
- package/ui/js/init.js +31 -0
- package/ui/js/memories.js +149 -0
- package/ui/js/modal.js +139 -0
- package/ui/js/patterns.js +93 -0
- package/ui/js/profiles.js +202 -0
- package/ui/js/search.js +59 -0
- package/ui/js/settings.js +167 -0
- package/ui/js/timeline.js +32 -0
- package/ui_server.py +69 -1665
- package/docs/COMPETITIVE-ANALYSIS.md +0 -210
package/mcp_server.py
CHANGED
|
@@ -46,6 +46,14 @@ except ImportError as e:
|
|
|
46
46
|
print(f"Ensure SuperLocalMemory V2 is installed at {MEMORY_DIR}", file=sys.stderr)
|
|
47
47
|
sys.exit(1)
|
|
48
48
|
|
|
49
|
+
# Agent Registry + Provenance (v2.5+)
|
|
50
|
+
try:
|
|
51
|
+
from agent_registry import AgentRegistry
|
|
52
|
+
from provenance_tracker import ProvenanceTracker
|
|
53
|
+
PROVENANCE_AVAILABLE = True
|
|
54
|
+
except ImportError:
|
|
55
|
+
PROVENANCE_AVAILABLE = False
|
|
56
|
+
|
|
49
57
|
# Parse command line arguments early (needed for port in constructor)
|
|
50
58
|
import argparse as _argparse
|
|
51
59
|
_parser = _argparse.ArgumentParser(add_help=False)
|
|
@@ -63,6 +71,80 @@ mcp = FastMCP(
|
|
|
63
71
|
# Database path
|
|
64
72
|
DB_PATH = MEMORY_DIR / "memory.db"
|
|
65
73
|
|
|
74
|
+
# ============================================================================
|
|
75
|
+
# Shared singleton instances (v2.5 — fixes per-call instantiation overhead)
|
|
76
|
+
# All MCP tool handlers share one MemoryStoreV2 instance instead of creating
|
|
77
|
+
# a new one per call. This means one ConnectionManager, one TF-IDF vectorizer,
|
|
78
|
+
# one write queue — shared across all concurrent MCP requests.
|
|
79
|
+
# ============================================================================
|
|
80
|
+
|
|
81
|
+
_store = None
|
|
82
|
+
_graph_engine = None
|
|
83
|
+
_pattern_learner = None
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def get_store() -> MemoryStoreV2:
|
|
87
|
+
"""Get or create the shared MemoryStoreV2 singleton."""
|
|
88
|
+
global _store
|
|
89
|
+
if _store is None:
|
|
90
|
+
_store = MemoryStoreV2(DB_PATH)
|
|
91
|
+
return _store
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def get_graph_engine() -> GraphEngine:
|
|
95
|
+
"""Get or create the shared GraphEngine singleton."""
|
|
96
|
+
global _graph_engine
|
|
97
|
+
if _graph_engine is None:
|
|
98
|
+
_graph_engine = GraphEngine(DB_PATH)
|
|
99
|
+
return _graph_engine
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def get_pattern_learner() -> PatternLearner:
|
|
103
|
+
"""Get or create the shared PatternLearner singleton."""
|
|
104
|
+
global _pattern_learner
|
|
105
|
+
if _pattern_learner is None:
|
|
106
|
+
_pattern_learner = PatternLearner(DB_PATH)
|
|
107
|
+
return _pattern_learner
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
_agent_registry = None
|
|
111
|
+
_provenance_tracker = None
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
def get_agent_registry():
|
|
115
|
+
"""Get shared AgentRegistry singleton (v2.5+). Returns None if unavailable."""
|
|
116
|
+
global _agent_registry
|
|
117
|
+
if not PROVENANCE_AVAILABLE:
|
|
118
|
+
return None
|
|
119
|
+
if _agent_registry is None:
|
|
120
|
+
_agent_registry = AgentRegistry.get_instance(DB_PATH)
|
|
121
|
+
return _agent_registry
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
def get_provenance_tracker():
|
|
125
|
+
"""Get shared ProvenanceTracker singleton (v2.5+). Returns None if unavailable."""
|
|
126
|
+
global _provenance_tracker
|
|
127
|
+
if not PROVENANCE_AVAILABLE:
|
|
128
|
+
return None
|
|
129
|
+
if _provenance_tracker is None:
|
|
130
|
+
_provenance_tracker = ProvenanceTracker.get_instance(DB_PATH)
|
|
131
|
+
return _provenance_tracker
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
def _register_mcp_agent(agent_name: str = "mcp-client"):
|
|
135
|
+
"""Register the calling MCP agent and record activity. Non-blocking."""
|
|
136
|
+
registry = get_agent_registry()
|
|
137
|
+
if registry:
|
|
138
|
+
try:
|
|
139
|
+
registry.register_agent(
|
|
140
|
+
agent_id=f"mcp:{agent_name}",
|
|
141
|
+
agent_name=agent_name,
|
|
142
|
+
protocol="mcp",
|
|
143
|
+
)
|
|
144
|
+
except Exception:
|
|
145
|
+
pass
|
|
146
|
+
|
|
147
|
+
|
|
66
148
|
# ============================================================================
|
|
67
149
|
# MCP TOOLS (Functions callable by AI)
|
|
68
150
|
# ============================================================================
|
|
@@ -103,8 +185,11 @@ async def remember(
|
|
|
103
185
|
remember("JWT auth with refresh tokens", tags="security,auth", importance=8)
|
|
104
186
|
"""
|
|
105
187
|
try:
|
|
188
|
+
# Register MCP agent (v2.5 — agent tracking)
|
|
189
|
+
_register_mcp_agent()
|
|
190
|
+
|
|
106
191
|
# Use existing MemoryStoreV2 class (no duplicate logic)
|
|
107
|
-
store =
|
|
192
|
+
store = get_store()
|
|
108
193
|
|
|
109
194
|
# Call existing add_memory method
|
|
110
195
|
memory_id = store.add_memory(
|
|
@@ -114,6 +199,22 @@ async def remember(
|
|
|
114
199
|
importance=importance
|
|
115
200
|
)
|
|
116
201
|
|
|
202
|
+
# Record provenance (v2.5 — who created this memory)
|
|
203
|
+
prov = get_provenance_tracker()
|
|
204
|
+
if prov:
|
|
205
|
+
try:
|
|
206
|
+
prov.record_provenance(memory_id, created_by="mcp:client", source_protocol="mcp")
|
|
207
|
+
except Exception:
|
|
208
|
+
pass
|
|
209
|
+
|
|
210
|
+
# Track write in agent registry
|
|
211
|
+
registry = get_agent_registry()
|
|
212
|
+
if registry:
|
|
213
|
+
try:
|
|
214
|
+
registry.record_write("mcp:mcp-client")
|
|
215
|
+
except Exception:
|
|
216
|
+
pass
|
|
217
|
+
|
|
117
218
|
# Format response
|
|
118
219
|
preview = content[:100] + "..." if len(content) > 100 else content
|
|
119
220
|
|
|
@@ -174,7 +275,7 @@ async def recall(
|
|
|
174
275
|
"""
|
|
175
276
|
try:
|
|
176
277
|
# Use existing MemoryStoreV2 class
|
|
177
|
-
store =
|
|
278
|
+
store = get_store()
|
|
178
279
|
|
|
179
280
|
# Call existing search method
|
|
180
281
|
results = store.search(query, limit=limit)
|
|
@@ -223,7 +324,7 @@ async def list_recent(limit: int = 10) -> dict:
|
|
|
223
324
|
"""
|
|
224
325
|
try:
|
|
225
326
|
# Use existing MemoryStoreV2 class
|
|
226
|
-
store =
|
|
327
|
+
store = get_store()
|
|
227
328
|
|
|
228
329
|
# Call existing list_all method
|
|
229
330
|
memories = store.list_all(limit=limit)
|
|
@@ -263,7 +364,7 @@ async def get_status() -> dict:
|
|
|
263
364
|
"""
|
|
264
365
|
try:
|
|
265
366
|
# Use existing MemoryStoreV2 class
|
|
266
|
-
store =
|
|
367
|
+
store = get_store()
|
|
267
368
|
|
|
268
369
|
# Call existing get_stats method
|
|
269
370
|
stats = store.get_stats()
|
|
@@ -303,7 +404,7 @@ async def build_graph() -> dict:
|
|
|
303
404
|
"""
|
|
304
405
|
try:
|
|
305
406
|
# Use existing GraphEngine class
|
|
306
|
-
engine =
|
|
407
|
+
engine = get_graph_engine()
|
|
307
408
|
|
|
308
409
|
# Call existing build_graph method
|
|
309
410
|
stats = engine.build_graph()
|
|
@@ -461,7 +562,7 @@ async def search(query: str) -> dict:
|
|
|
461
562
|
{"results": [{"id": str, "title": str, "text": str, "url": str}]}
|
|
462
563
|
"""
|
|
463
564
|
try:
|
|
464
|
-
store =
|
|
565
|
+
store = get_store()
|
|
465
566
|
raw_results = store.search(query, limit=20)
|
|
466
567
|
|
|
467
568
|
results = []
|
|
@@ -504,7 +605,7 @@ async def fetch(id: str) -> dict:
|
|
|
504
605
|
{"id": str, "title": str, "text": str, "url": str, "metadata": dict|null}
|
|
505
606
|
"""
|
|
506
607
|
try:
|
|
507
|
-
store =
|
|
608
|
+
store = get_store()
|
|
508
609
|
mem = store.get_by_id(int(id))
|
|
509
610
|
|
|
510
611
|
if not mem:
|
|
@@ -549,7 +650,7 @@ async def get_recent_memories_resource(limit: str) -> str:
|
|
|
549
650
|
Usage: memory://recent/10
|
|
550
651
|
"""
|
|
551
652
|
try:
|
|
552
|
-
store =
|
|
653
|
+
store = get_store()
|
|
553
654
|
memories = store.list_all(limit=int(limit))
|
|
554
655
|
return json.dumps(memories, indent=2)
|
|
555
656
|
except Exception as e:
|
|
@@ -564,7 +665,7 @@ async def get_stats_resource() -> str:
|
|
|
564
665
|
Usage: memory://stats
|
|
565
666
|
"""
|
|
566
667
|
try:
|
|
567
|
-
store =
|
|
668
|
+
store = get_store()
|
|
568
669
|
stats = store.get_stats()
|
|
569
670
|
return json.dumps(stats, indent=2)
|
|
570
671
|
except Exception as e:
|
|
@@ -579,7 +680,7 @@ async def get_clusters_resource() -> str:
|
|
|
579
680
|
Usage: memory://graph/clusters
|
|
580
681
|
"""
|
|
581
682
|
try:
|
|
582
|
-
engine =
|
|
683
|
+
engine = get_graph_engine()
|
|
583
684
|
stats = engine.get_stats()
|
|
584
685
|
clusters = stats.get('clusters', [])
|
|
585
686
|
return json.dumps(clusters, indent=2)
|
|
@@ -595,7 +696,7 @@ async def get_coding_identity_resource() -> str:
|
|
|
595
696
|
Usage: memory://patterns/identity
|
|
596
697
|
"""
|
|
597
698
|
try:
|
|
598
|
-
learner =
|
|
699
|
+
learner = get_pattern_learner()
|
|
599
700
|
patterns = learner.get_identity_context(min_confidence=0.5)
|
|
600
701
|
return json.dumps(patterns, indent=2)
|
|
601
702
|
except Exception as e:
|
|
@@ -615,7 +716,7 @@ async def coding_identity_prompt() -> str:
|
|
|
615
716
|
based on learned preferences and patterns.
|
|
616
717
|
"""
|
|
617
718
|
try:
|
|
618
|
-
learner =
|
|
719
|
+
learner = get_pattern_learner()
|
|
619
720
|
patterns = learner.get_identity_context(min_confidence=0.6)
|
|
620
721
|
|
|
621
722
|
if not patterns:
|
|
@@ -656,7 +757,7 @@ async def project_context_prompt(project_name: str) -> str:
|
|
|
656
757
|
Formatted prompt with relevant project memories
|
|
657
758
|
"""
|
|
658
759
|
try:
|
|
659
|
-
store =
|
|
760
|
+
store = get_store()
|
|
660
761
|
|
|
661
762
|
# Search for project-related memories
|
|
662
763
|
memories = store.search(f"project:{project_name}", limit=20)
|
|
@@ -711,7 +812,7 @@ if __name__ == "__main__":
|
|
|
711
812
|
# Print startup message to stderr (stdout is used for MCP protocol)
|
|
712
813
|
print("=" * 60, file=sys.stderr)
|
|
713
814
|
print("SuperLocalMemory V2 - MCP Server", file=sys.stderr)
|
|
714
|
-
print("Version: 2.
|
|
815
|
+
print("Version: 2.5.0", file=sys.stderr)
|
|
715
816
|
print("=" * 60, file=sys.stderr)
|
|
716
817
|
print("Created by: Varun Pratap Bhardwaj (Solution Architect)", file=sys.stderr)
|
|
717
818
|
print("Repository: https://github.com/varun369/SuperLocalMemoryV2", file=sys.stderr)
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "superlocalmemory",
|
|
3
|
-
"version": "2.
|
|
3
|
+
"version": "2.5.1",
|
|
4
4
|
"description": "Your AI Finally Remembers You - Local-first intelligent memory system for AI assistants. Works with Claude, Cursor, Windsurf, VS Code/Copilot, Codex, and 16+ AI tools. 100% local, zero cloud dependencies.",
|
|
5
5
|
"keywords": [
|
|
6
6
|
"ai-memory",
|
|
@@ -98,5 +98,8 @@
|
|
|
98
98
|
"year": "2026",
|
|
99
99
|
"github": "https://github.com/varun369",
|
|
100
100
|
"required": "Attribution notice must be preserved in all copies and derivative works"
|
|
101
|
+
},
|
|
102
|
+
"dependencies": {
|
|
103
|
+
"docx": "^9.5.1"
|
|
101
104
|
}
|
|
102
105
|
}
|
|
@@ -0,0 +1,220 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
SuperLocalMemory V2 - Thumbnail Generator
|
|
4
|
+
Copyright (c) 2026 Varun Pratap Bhardwaj
|
|
5
|
+
Licensed under MIT License
|
|
6
|
+
|
|
7
|
+
Generates optimized thumbnail versions of all screenshots.
|
|
8
|
+
- Size: 320×180px (16:9 ratio)
|
|
9
|
+
- Format: PNG (for wiki/docs) and WebP (for website)
|
|
10
|
+
- Quality: High enough to recognize content
|
|
11
|
+
- File size: < 50KB per thumbnail
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
import os
|
|
15
|
+
import json
|
|
16
|
+
from pathlib import Path
|
|
17
|
+
from PIL import Image, ImageFilter, ImageOps
|
|
18
|
+
from datetime import datetime
|
|
19
|
+
|
|
20
|
+
# Configuration
|
|
21
|
+
SCREENSHOT_DIR = Path(__file__).parent.parent / "assets" / "screenshots"
|
|
22
|
+
THUMBNAIL_DIR = Path(__file__).parent.parent / "assets" / "thumbnails"
|
|
23
|
+
THUMBNAIL_SIZE = (320, 180) # 16:9 ratio
|
|
24
|
+
QUALITY_PNG = 95
|
|
25
|
+
QUALITY_WEBP = 85
|
|
26
|
+
MAX_FILESIZE = 50 * 1024 # 50KB
|
|
27
|
+
|
|
28
|
+
# Category mapping based on filename patterns
|
|
29
|
+
CATEGORY_MAP = {
|
|
30
|
+
"overview": "dashboard",
|
|
31
|
+
"timeline": "timeline",
|
|
32
|
+
"agents": "agents",
|
|
33
|
+
"patterns": "patterns",
|
|
34
|
+
"clusters": "clusters",
|
|
35
|
+
"memories": "memories",
|
|
36
|
+
"graph": "graph",
|
|
37
|
+
"filtered": "search",
|
|
38
|
+
"live-events": "events",
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
def get_category(filename):
|
|
42
|
+
"""Extract category from filename."""
|
|
43
|
+
for pattern, category in CATEGORY_MAP.items():
|
|
44
|
+
if pattern in filename.lower():
|
|
45
|
+
return category
|
|
46
|
+
return "general"
|
|
47
|
+
|
|
48
|
+
def get_title(filename):
|
|
49
|
+
"""Generate human-readable title from filename."""
|
|
50
|
+
# Remove extensions and convert dashes/underscores to spaces
|
|
51
|
+
name = Path(filename).stem
|
|
52
|
+
# Remove 'dashboard-' prefix if present
|
|
53
|
+
if name.startswith("dashboard-"):
|
|
54
|
+
name = name[9:]
|
|
55
|
+
# Remove '-dark' suffix
|
|
56
|
+
name = name.replace("-dark", "")
|
|
57
|
+
# Convert to title case
|
|
58
|
+
return " ".join(word.capitalize() for word in name.split("-"))
|
|
59
|
+
|
|
60
|
+
def get_description(filename, category):
|
|
61
|
+
"""Generate description based on filename and category."""
|
|
62
|
+
descriptions = {
|
|
63
|
+
"overview": "Main dashboard with memory statistics and knowledge graph overview",
|
|
64
|
+
"timeline": "Chronological timeline of all stored memories and events",
|
|
65
|
+
"agents": "Agent connections and activity tracking",
|
|
66
|
+
"patterns": "Learned coding patterns and user preferences",
|
|
67
|
+
"clusters": "Knowledge graph clusters and relationships",
|
|
68
|
+
"memories": "Detailed memory list with search and filtering",
|
|
69
|
+
"graph": "Interactive knowledge graph visualization",
|
|
70
|
+
"search": "Advanced memory search and filtering interface",
|
|
71
|
+
"events": "Real-time live event stream from memory operations",
|
|
72
|
+
}
|
|
73
|
+
is_dark = "-dark" in filename.lower()
|
|
74
|
+
base_desc = descriptions.get(category, "Dashboard interface")
|
|
75
|
+
if is_dark:
|
|
76
|
+
base_desc += " (dark mode)"
|
|
77
|
+
return base_desc
|
|
78
|
+
|
|
79
|
+
def resize_and_crop(image, target_size):
|
|
80
|
+
"""
|
|
81
|
+
Resize image to target size while maintaining aspect ratio.
|
|
82
|
+
Crops if necessary to achieve exact dimensions.
|
|
83
|
+
"""
|
|
84
|
+
img_ratio = image.width / image.height
|
|
85
|
+
target_ratio = target_size[0] / target_size[1]
|
|
86
|
+
|
|
87
|
+
if img_ratio > target_ratio:
|
|
88
|
+
# Image is wider, crop width
|
|
89
|
+
new_height = target_size[1]
|
|
90
|
+
new_width = int(new_height * img_ratio)
|
|
91
|
+
resized = image.resize((new_width, new_height), Image.Resampling.LANCZOS)
|
|
92
|
+
left = (resized.width - target_size[0]) // 2
|
|
93
|
+
return resized.crop((left, 0, left + target_size[0], target_size[1]))
|
|
94
|
+
else:
|
|
95
|
+
# Image is taller, crop height
|
|
96
|
+
new_width = target_size[0]
|
|
97
|
+
new_height = int(new_width / img_ratio)
|
|
98
|
+
resized = image.resize((new_width, new_height), Image.Resampling.LANCZOS)
|
|
99
|
+
top = (resized.height - target_size[1]) // 2
|
|
100
|
+
return resized.crop((0, top, target_size[0], top + target_size[1]))
|
|
101
|
+
|
|
102
|
+
def apply_sharpening(image):
|
|
103
|
+
"""Apply subtle sharpening to enhance detail."""
|
|
104
|
+
# Use UNSHARP_MASK equivalent with subtle settings
|
|
105
|
+
return image.filter(ImageFilter.UnsharpMask(radius=1, percent=100, threshold=3))
|
|
106
|
+
|
|
107
|
+
def generate_thumbnail(source_path, dest_dir, metadata):
|
|
108
|
+
"""Generate PNG and WebP thumbnails for a single source image."""
|
|
109
|
+
try:
|
|
110
|
+
# Open image
|
|
111
|
+
with Image.open(source_path) as img:
|
|
112
|
+
# Convert RGBA to RGB if necessary (for PNG/WebP)
|
|
113
|
+
if img.mode == "RGBA":
|
|
114
|
+
background = Image.new("RGB", img.size, (255, 255, 255))
|
|
115
|
+
background.paste(img, mask=img.split()[3])
|
|
116
|
+
img = background
|
|
117
|
+
elif img.mode != "RGB":
|
|
118
|
+
img = img.convert("RGB")
|
|
119
|
+
|
|
120
|
+
# Resize and crop
|
|
121
|
+
thumbnail = resize_and_crop(img, THUMBNAIL_SIZE)
|
|
122
|
+
|
|
123
|
+
# Apply sharpening
|
|
124
|
+
thumbnail = apply_sharpening(thumbnail)
|
|
125
|
+
|
|
126
|
+
filename = source_path.stem
|
|
127
|
+
|
|
128
|
+
# Save PNG version
|
|
129
|
+
png_path = dest_dir / f"{filename}-thumb.png"
|
|
130
|
+
thumbnail.save(png_path, "PNG", quality=QUALITY_PNG, optimize=True)
|
|
131
|
+
png_size = png_path.stat().st_size
|
|
132
|
+
|
|
133
|
+
# Save WebP version
|
|
134
|
+
webp_path = dest_dir / f"{filename}-thumb.webp"
|
|
135
|
+
thumbnail.save(webp_path, "WEBP", quality=QUALITY_WEBP, method=6)
|
|
136
|
+
webp_size = webp_path.stat().st_size
|
|
137
|
+
|
|
138
|
+
# Check file sizes
|
|
139
|
+
if png_size > MAX_FILESIZE:
|
|
140
|
+
print(f"⚠️ PNG {filename}: {png_size/1024:.1f}KB (exceeds limit)")
|
|
141
|
+
if webp_size > MAX_FILESIZE:
|
|
142
|
+
print(f"⚠️ WebP {filename}: {webp_size/1024:.1f}KB (exceeds limit)")
|
|
143
|
+
|
|
144
|
+
print(f"✓ {filename}")
|
|
145
|
+
print(f" PNG: {png_size/1024:.1f}KB | WebP: {webp_size/1024:.1f}KB")
|
|
146
|
+
|
|
147
|
+
# Store metadata
|
|
148
|
+
category = get_category(filename)
|
|
149
|
+
metadata[filename] = {
|
|
150
|
+
"title": get_title(filename),
|
|
151
|
+
"description": get_description(source_path.name, category),
|
|
152
|
+
"category": category,
|
|
153
|
+
"full_image": f"../screenshots/dashboard/{source_path.name}",
|
|
154
|
+
"thumbnail_png": f"{filename}-thumb.png",
|
|
155
|
+
"thumbnail_webp": f"{filename}-thumb.webp",
|
|
156
|
+
"created": datetime.now().isoformat(),
|
|
157
|
+
"original_size": f"{img.width}×{img.height}",
|
|
158
|
+
"thumbnail_size": f"{THUMBNAIL_SIZE[0]}×{THUMBNAIL_SIZE[1]}",
|
|
159
|
+
"png_size_kb": round(png_size / 1024, 2),
|
|
160
|
+
"webp_size_kb": round(webp_size / 1024, 2),
|
|
161
|
+
}
|
|
162
|
+
return True
|
|
163
|
+
except Exception as e:
|
|
164
|
+
print(f"✗ {source_path.name}: {str(e)}")
|
|
165
|
+
return False
|
|
166
|
+
|
|
167
|
+
def main():
|
|
168
|
+
"""Generate all thumbnails."""
|
|
169
|
+
# Ensure thumbnail directory exists
|
|
170
|
+
THUMBNAIL_DIR.mkdir(parents=True, exist_ok=True)
|
|
171
|
+
|
|
172
|
+
# Verify screenshot directory exists
|
|
173
|
+
if not SCREENSHOT_DIR.exists():
|
|
174
|
+
print(f"Error: Screenshot directory not found: {SCREENSHOT_DIR}")
|
|
175
|
+
return 1
|
|
176
|
+
|
|
177
|
+
# Find all images in screenshot directory
|
|
178
|
+
image_extensions = {".png", ".jpg", ".jpeg", ".webp"}
|
|
179
|
+
sources = sorted([
|
|
180
|
+
f for f in SCREENSHOT_DIR.glob("**/*")
|
|
181
|
+
if f.is_file() and f.suffix.lower() in image_extensions and not f.name.startswith(".")
|
|
182
|
+
])
|
|
183
|
+
|
|
184
|
+
if not sources:
|
|
185
|
+
print(f"No images found in {SCREENSHOT_DIR}")
|
|
186
|
+
return 1
|
|
187
|
+
|
|
188
|
+
print(f"Found {len(sources)} images in {SCREENSHOT_DIR}")
|
|
189
|
+
print(f"Generating thumbnails to {THUMBNAIL_DIR}\n")
|
|
190
|
+
|
|
191
|
+
metadata = {}
|
|
192
|
+
successful = 0
|
|
193
|
+
failed = 0
|
|
194
|
+
|
|
195
|
+
for source in sources:
|
|
196
|
+
if generate_thumbnail(source, THUMBNAIL_DIR, metadata):
|
|
197
|
+
successful += 1
|
|
198
|
+
else:
|
|
199
|
+
failed += 1
|
|
200
|
+
|
|
201
|
+
# Save metadata index
|
|
202
|
+
index_path = THUMBNAIL_DIR / "index.json"
|
|
203
|
+
with open(index_path, "w") as f:
|
|
204
|
+
json.dump(metadata, f, indent=2, sort_keys=True)
|
|
205
|
+
print(f"\n✓ Saved metadata index to {index_path}")
|
|
206
|
+
|
|
207
|
+
# Print summary
|
|
208
|
+
print(f"\n{'='*60}")
|
|
209
|
+
print(f"Summary:")
|
|
210
|
+
print(f" Total processed: {len(sources)}")
|
|
211
|
+
print(f" Successful: {successful}")
|
|
212
|
+
print(f" Failed: {failed}")
|
|
213
|
+
print(f" PNG thumbnails: {len(list(THUMBNAIL_DIR.glob('*-thumb.png')))}")
|
|
214
|
+
print(f" WebP thumbnails: {len(list(THUMBNAIL_DIR.glob('*-thumb.webp')))}")
|
|
215
|
+
print(f" Total size: {sum(f.stat().st_size for f in THUMBNAIL_DIR.glob('*-thumb.*')) / 1024:.1f}KB")
|
|
216
|
+
|
|
217
|
+
return 0 if failed == 0 else 1
|
|
218
|
+
|
|
219
|
+
if __name__ == "__main__":
|
|
220
|
+
exit(main())
|