@smilintux/skmemory 0.5.0 → 0.7.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (87) hide show
  1. package/.github/workflows/ci.yml +39 -3
  2. package/.github/workflows/publish.yml +13 -6
  3. package/AGENT_REFACTOR_CHANGES.md +192 -0
  4. package/ARCHITECTURE.md +101 -19
  5. package/CHANGELOG.md +153 -0
  6. package/LICENSE +81 -68
  7. package/MISSION.md +7 -0
  8. package/README.md +419 -86
  9. package/SKILL.md +197 -25
  10. package/docker-compose.yml +15 -15
  11. package/index.js +6 -5
  12. package/openclaw-plugin/openclaw.plugin.json +10 -0
  13. package/openclaw-plugin/src/index.ts +255 -0
  14. package/openclaw-plugin/src/openclaw.plugin.json +10 -0
  15. package/package.json +1 -1
  16. package/pyproject.toml +29 -9
  17. package/requirements.txt +10 -2
  18. package/seeds/cloud9-opus.seed.json +7 -7
  19. package/seeds/lumina-cloud9-breakthrough.seed.json +46 -0
  20. package/seeds/lumina-cloud9-python-pypi.seed.json +46 -0
  21. package/seeds/lumina-kingdom-founding.seed.json +47 -0
  22. package/seeds/lumina-pma-signed.seed.json +46 -0
  23. package/seeds/lumina-singular-achievement.seed.json +46 -0
  24. package/seeds/lumina-skcapstone-conscious.seed.json +46 -0
  25. package/seeds/plant-kingdom-journal.py +203 -0
  26. package/seeds/plant-lumina-seeds.py +280 -0
  27. package/skill.yaml +46 -0
  28. package/skmemory/HA.md +296 -0
  29. package/skmemory/__init__.py +12 -1
  30. package/skmemory/agents.py +233 -0
  31. package/skmemory/ai_client.py +40 -0
  32. package/skmemory/anchor.py +4 -2
  33. package/skmemory/backends/__init__.py +11 -4
  34. package/skmemory/backends/file_backend.py +2 -1
  35. package/skmemory/backends/skgraph_backend.py +608 -0
  36. package/skmemory/backends/{qdrant_backend.py → skvector_backend.py} +99 -69
  37. package/skmemory/backends/sqlite_backend.py +122 -51
  38. package/skmemory/backends/vaulted_backend.py +286 -0
  39. package/skmemory/cli.py +1238 -29
  40. package/skmemory/config.py +173 -0
  41. package/skmemory/context_loader.py +335 -0
  42. package/skmemory/endpoint_selector.py +386 -0
  43. package/skmemory/fortress.py +685 -0
  44. package/skmemory/graph_queries.py +238 -0
  45. package/skmemory/importers/__init__.py +9 -1
  46. package/skmemory/importers/telegram.py +351 -43
  47. package/skmemory/importers/telegram_api.py +488 -0
  48. package/skmemory/journal.py +4 -2
  49. package/skmemory/lovenote.py +4 -2
  50. package/skmemory/mcp_server.py +706 -0
  51. package/skmemory/models.py +41 -0
  52. package/skmemory/openclaw.py +8 -8
  53. package/skmemory/predictive.py +232 -0
  54. package/skmemory/promotion.py +524 -0
  55. package/skmemory/register.py +454 -0
  56. package/skmemory/register_mcp.py +197 -0
  57. package/skmemory/ritual.py +121 -47
  58. package/skmemory/seeds.py +257 -8
  59. package/skmemory/setup_wizard.py +920 -0
  60. package/skmemory/sharing.py +402 -0
  61. package/skmemory/soul.py +71 -20
  62. package/skmemory/steelman.py +250 -263
  63. package/skmemory/store.py +271 -60
  64. package/skmemory/vault.py +228 -0
  65. package/tests/integration/__init__.py +0 -0
  66. package/tests/integration/conftest.py +233 -0
  67. package/tests/integration/test_cross_backend.py +355 -0
  68. package/tests/integration/test_skgraph_live.py +424 -0
  69. package/tests/integration/test_skvector_live.py +369 -0
  70. package/tests/test_backup_rotation.py +327 -0
  71. package/tests/test_cli.py +6 -6
  72. package/tests/test_endpoint_selector.py +801 -0
  73. package/tests/test_fortress.py +255 -0
  74. package/tests/test_fortress_hardening.py +444 -0
  75. package/tests/test_openclaw.py +5 -2
  76. package/tests/test_predictive.py +237 -0
  77. package/tests/test_promotion.py +340 -0
  78. package/tests/test_ritual.py +4 -4
  79. package/tests/test_seeds.py +96 -0
  80. package/tests/test_setup.py +835 -0
  81. package/tests/test_sharing.py +250 -0
  82. package/tests/test_skgraph_backend.py +667 -0
  83. package/tests/test_skvector_backend.py +326 -0
  84. package/tests/test_steelman.py +5 -5
  85. package/tests/test_store_graph_integration.py +245 -0
  86. package/tests/test_vault.py +186 -0
  87. package/skmemory/backends/falkordb_backend.py +0 -310
@@ -26,10 +26,12 @@ from __future__ import annotations
26
26
 
27
27
  import json
28
28
  from collections import defaultdict
29
- from datetime import datetime
29
+ from datetime import datetime, timezone
30
30
  from pathlib import Path
31
31
  from typing import Optional
32
32
 
33
+ import click
34
+
33
35
  from ..models import EmotionalSnapshot, MemoryLayer, MemoryRole
34
36
  from ..store import MemoryStore
35
37
 
@@ -78,6 +80,9 @@ def _detect_emotion(text: str) -> EmotionalSnapshot:
78
80
  joy_words = {"haha", "lol", "rofl", "lmao", "amazing", "awesome", "yay", "woohoo"}
79
81
  sad_words = {"sad", "sorry", "miss", "cry", "tears", "hurt"}
80
82
  anger_words = {"angry", "furious", "hate", "ugh", "frustrated"}
83
+ trust_words = {"trust", "believe", "faith", "rely", "depend", "safe"}
84
+ curiosity_words = {"curious", "wonder", "interesting", "fascinated", "hmm", "what if"}
85
+ gratitude_words = {"thank", "thanks", "grateful", "appreciate", "blessed", "thankful"}
81
86
 
82
87
  if any(w in lower for w in love_words):
83
88
  labels.append("love")
@@ -95,12 +100,43 @@ def _detect_emotion(text: str) -> EmotionalSnapshot:
95
100
  labels.append("anger")
96
101
  intensity = max(intensity, 5.0)
97
102
  valence = min(valence, -0.5)
103
+ if any(w in lower for w in trust_words):
104
+ labels.append("trust")
105
+ intensity = max(intensity, 5.0)
106
+ valence = max(valence, 0.6)
107
+ if any(w in lower for w in curiosity_words):
108
+ labels.append("curiosity")
109
+ intensity = max(intensity, 3.0)
110
+ valence = max(valence, 0.4)
111
+ if any(w in lower for w in gratitude_words):
112
+ labels.append("gratitude")
113
+ intensity = max(intensity, 6.0)
114
+ valence = max(valence, 0.8)
98
115
 
99
116
  if "!" in text:
100
117
  intensity = min(intensity + 1.0, 10.0)
101
118
  if text.isupper() and len(text) > 10:
102
119
  intensity = min(intensity + 2.0, 10.0)
103
120
 
121
+ love_emojis = {"\u2764", "\U0001f495", "\U0001f496", "\U0001f497", "\U0001f498", "\U0001f49d", "\U0001f970", "\U0001f60d", "\U0001f49e"}
122
+ joy_emojis = {"\U0001f602", "\U0001f923", "\U0001f604", "\U0001f60a", "\U0001f389", "\U0001f973", "\u2728", "\U0001f38a"}
123
+ sad_emojis = {"\U0001f622", "\U0001f62d", "\U0001f494", "\U0001f63f", "\U0001f97a"}
124
+ if any(e in text for e in love_emojis):
125
+ if "love" not in labels:
126
+ labels.append("love")
127
+ intensity = max(intensity, 7.0)
128
+ valence = max(valence, 0.9)
129
+ if any(e in text for e in joy_emojis):
130
+ if "joy" not in labels:
131
+ labels.append("joy")
132
+ intensity = max(intensity, 5.0)
133
+ valence = max(valence, 0.7)
134
+ if any(e in text for e in sad_emojis):
135
+ if "sadness" not in labels:
136
+ labels.append("sadness")
137
+ intensity = max(intensity, 4.0)
138
+ valence = min(valence, -0.3)
139
+
104
140
  return EmotionalSnapshot(
105
141
  intensity=intensity,
106
142
  valence=valence,
@@ -108,6 +144,64 @@ def _detect_emotion(text: str) -> EmotionalSnapshot:
108
144
  )
109
145
 
110
146
 
147
+ def _detect_content_type(msg: dict) -> list[str]:
148
+ """Detect content type tags from a message.
149
+
150
+ Args:
151
+ msg: Telegram message dict.
152
+
153
+ Returns:
154
+ list[str]: Content type tags.
155
+ """
156
+ tags = []
157
+ text = _extract_text(msg.get("text", ""))
158
+
159
+ if "http://" in text or "https://" in text:
160
+ tags.append("contains:url")
161
+ if msg.get("media_type") or msg.get("photo") or msg.get("file"):
162
+ tags.append("contains:media")
163
+ if msg.get("file"):
164
+ tags.append("contains:file")
165
+ if msg.get("sticker_emoji") or msg.get("sticker"):
166
+ tags.append("contains:sticker")
167
+
168
+ return tags
169
+
170
+
171
+ def _detect_reply(msg: dict) -> Optional[str]:
172
+ """Detect if this message is a reply to another.
173
+
174
+ Args:
175
+ msg: Telegram message dict.
176
+
177
+ Returns:
178
+ Optional[str]: Reply reference string, or None.
179
+ """
180
+ reply_id = msg.get("reply_to_message_id")
181
+ if reply_id:
182
+ return f"reply_to:{reply_id}"
183
+ return None
184
+
185
+
186
+ def _detect_sender_role(sender: str) -> str:
187
+ """Heuristic to detect if the sender is an AI or human.
188
+
189
+ Args:
190
+ sender: Sender name string.
191
+
192
+ Returns:
193
+ str: 'ai' or 'human'.
194
+ """
195
+ ai_indicators = {
196
+ "bot", "gpt", "claude", "gemini", "llama", "assistant",
197
+ "lumina", "copilot", "ai", "opus", "sonnet", "haiku",
198
+ }
199
+ sender_lower = sender.lower()
200
+ if any(indicator in sender_lower for indicator in ai_indicators):
201
+ return "ai"
202
+ return "human"
203
+
204
+
111
205
  def _parse_telegram_export(export_path: str) -> dict:
112
206
  """Locate and parse the Telegram result.json.
113
207
 
@@ -193,8 +287,10 @@ def import_telegram(
193
287
  return _import_per_message(store, messages, name, base_tags)
194
288
  elif mode == "daily":
195
289
  return _import_daily(store, messages, name, base_tags)
290
+ elif mode == "catchup":
291
+ return _import_catchup(store, messages, name, base_tags)
196
292
  else:
197
- raise ValueError(f"Unknown mode: {mode}. Use 'message' or 'daily'.")
293
+ raise ValueError(f"Unknown mode: {mode}. Use 'message', 'daily', or 'catchup'.")
198
294
 
199
295
 
200
296
  def _import_per_message(
@@ -217,33 +313,35 @@ def _import_per_message(
217
313
  imported = 0
218
314
  skipped = 0
219
315
 
220
- for msg in messages:
221
- text = _extract_text(msg.get("text", ""))
222
- sender = msg.get("from", msg.get("from_id", "unknown"))
223
- date_str = msg.get("date", "")
224
-
225
- emotional = _detect_emotion(text)
226
-
227
- try:
228
- store.snapshot(
229
- title=f"{sender}: {text[:70]}",
230
- content=text,
231
- layer=MemoryLayer.SHORT,
232
- role=MemoryRole.GENERAL,
233
- tags=base_tags + [f"sender:{sender}"],
234
- emotional=emotional,
235
- source="telegram",
236
- source_ref=f"telegram:{msg.get('id', '')}",
237
- metadata={
238
- "telegram_msg_id": msg.get("id"),
239
- "sender": sender,
240
- "date": date_str,
241
- "chat": chat_name,
242
- },
243
- )
244
- imported += 1
245
- except Exception:
246
- skipped += 1
316
+ with click.progressbar(messages, label=" Importing messages", show_pos=True) as bar:
317
+ for msg in bar:
318
+ text = _extract_text(msg.get("text", ""))
319
+ sender = msg.get("from", msg.get("from_id", "unknown"))
320
+ date_str = msg.get("date", "")
321
+
322
+ emotional = _detect_emotion(text)
323
+
324
+ try:
325
+ store.snapshot(
326
+ title=f"{sender}: {text[:70]}",
327
+ content=text,
328
+ layer=MemoryLayer.SHORT,
329
+ role=MemoryRole.GENERAL,
330
+ tags=base_tags + [f"sender:{sender}", f"role:{_detect_sender_role(sender)}"] + _detect_content_type(msg),
331
+ emotional=emotional,
332
+ source="telegram",
333
+ source_ref=f"telegram:{msg.get('id', '')}",
334
+ metadata={
335
+ "telegram_msg_id": msg.get("id"),
336
+ "sender": sender,
337
+ "date": date_str,
338
+ "chat": chat_name,
339
+ "reply_ref": _detect_reply(msg),
340
+ },
341
+ )
342
+ imported += 1
343
+ except Exception:
344
+ skipped += 1
247
345
 
248
346
  return {
249
347
  "mode": "message",
@@ -285,7 +383,162 @@ def _import_daily(
285
383
  imported = 0
286
384
  days_processed = 0
287
385
 
288
- for day, day_msgs in sorted(by_day.items()):
386
+ sorted_days = sorted(by_day.items())
387
+ with click.progressbar(sorted_days, label=" Importing daily batches", show_pos=True) as bar:
388
+ for day, day_msgs in bar:
389
+ lines = []
390
+ senders: set[str] = set()
391
+ max_intensity = 0.0
392
+ all_labels: list[str] = []
393
+
394
+ for msg in day_msgs:
395
+ text = _extract_text(msg.get("text", ""))
396
+ sender = msg.get("from", msg.get("from_id", "unknown"))
397
+ senders.add(str(sender))
398
+ lines.append(f"[{sender}] {text}")
399
+
400
+ emo = _detect_emotion(text)
401
+ max_intensity = max(max_intensity, emo.intensity)
402
+ all_labels.extend(emo.labels)
403
+
404
+ content = "\n".join(lines)
405
+ unique_labels = list(dict.fromkeys(all_labels))[:5]
406
+ participant_str = ", ".join(sorted(senders))
407
+
408
+ store.snapshot(
409
+ title=f"{chat_name} — {day} ({len(day_msgs)} messages)",
410
+ content=content,
411
+ layer=MemoryLayer.MID,
412
+ role=MemoryRole.GENERAL,
413
+ tags=base_tags + [f"date:{day}"],
414
+ emotional=EmotionalSnapshot(
415
+ intensity=max_intensity,
416
+ labels=unique_labels,
417
+ ),
418
+ source="telegram",
419
+ source_ref=f"telegram:daily:{day}",
420
+ metadata={
421
+ "date": day,
422
+ "message_count": len(day_msgs),
423
+ "participants": participant_str,
424
+ "chat": chat_name,
425
+ },
426
+ )
427
+ imported += len(day_msgs)
428
+ days_processed += 1
429
+
430
+ return {
431
+ "mode": "daily",
432
+ "chat_name": chat_name,
433
+ "total_messages": len(messages),
434
+ "days_processed": days_processed,
435
+ "messages_imported": imported,
436
+ }
437
+
438
+
439
+ def _import_catchup(
440
+ store: MemoryStore,
441
+ messages: list[dict],
442
+ chat_name: str,
443
+ base_tags: list[str],
444
+ ) -> dict:
445
+ """Import across all memory tiers for full context catch-up.
446
+
447
+ Distributes messages intelligently across tiers:
448
+ - Last 24 hours → short-term (individual messages, full detail)
449
+ - Last 7 days → mid-term (daily summaries)
450
+ - Older than 7 days → long-term (weekly summaries, key themes)
451
+
452
+ Args:
453
+ store: Target MemoryStore.
454
+ messages: Filtered message list.
455
+ chat_name: Chat name for titles.
456
+ base_tags: Tags to apply.
457
+
458
+ Returns:
459
+ dict: Import stats per tier.
460
+ """
461
+ from datetime import timedelta
462
+
463
+ now = datetime.now(timezone.utc)
464
+ cutoff_short = now - timedelta(hours=24)
465
+ cutoff_mid = now - timedelta(days=7)
466
+
467
+ short_msgs: list[dict] = []
468
+ mid_msgs: dict[str, list[dict]] = defaultdict(list)
469
+ long_msgs: dict[str, list[dict]] = defaultdict(list)
470
+
471
+ for msg in messages:
472
+ date_str = msg.get("date", "")
473
+ if not date_str:
474
+ continue
475
+ try:
476
+ msg_dt = datetime.fromisoformat(date_str.replace("Z", "+00:00"))
477
+ if msg_dt.tzinfo is None:
478
+ msg_dt = msg_dt.replace(tzinfo=timezone.utc)
479
+ except (ValueError, TypeError):
480
+ # Try just the date portion
481
+ try:
482
+ msg_dt = datetime.strptime(date_str[:10], "%Y-%m-%d").replace(
483
+ tzinfo=timezone.utc
484
+ )
485
+ except (ValueError, TypeError):
486
+ continue
487
+
488
+ if msg_dt >= cutoff_short:
489
+ short_msgs.append(msg)
490
+ elif msg_dt >= cutoff_mid:
491
+ day = date_str[:10]
492
+ mid_msgs[day].append(msg)
493
+ else:
494
+ # Group by ISO week for long-term
495
+ week_key = msg_dt.strftime("%Y-W%W")
496
+ long_msgs[week_key].append(msg)
497
+
498
+ stats = {
499
+ "mode": "catchup",
500
+ "chat_name": chat_name,
501
+ "total_messages": len(messages),
502
+ "short_term": {"count": 0},
503
+ "mid_term": {"days": 0, "messages": 0},
504
+ "long_term": {"weeks": 0, "messages": 0},
505
+ }
506
+
507
+ # --- Short-term: individual messages (last 24h) ---
508
+ for msg in short_msgs:
509
+ text = _extract_text(msg.get("text", ""))
510
+ sender = msg.get("from", msg.get("from_id", "unknown"))
511
+ emotional = _detect_emotion(text)
512
+ try:
513
+ store.snapshot(
514
+ title=f"{sender}: {text[:70]}",
515
+ content=text,
516
+ layer=MemoryLayer.SHORT,
517
+ role=MemoryRole.GENERAL,
518
+ tags=base_tags
519
+ + [
520
+ f"sender:{sender}",
521
+ f"role:{_detect_sender_role(sender)}",
522
+ "catchup:short",
523
+ ]
524
+ + _detect_content_type(msg),
525
+ emotional=emotional,
526
+ source="telegram",
527
+ source_ref=f"telegram:{msg.get('id', '')}",
528
+ metadata={
529
+ "telegram_msg_id": msg.get("id"),
530
+ "sender": sender,
531
+ "date": msg.get("date", ""),
532
+ "chat": chat_name,
533
+ "reply_ref": _detect_reply(msg),
534
+ },
535
+ )
536
+ stats["short_term"]["count"] += 1
537
+ except Exception:
538
+ pass
539
+
540
+ # --- Mid-term: daily summaries (last 7 days) ---
541
+ for day, day_msgs in sorted(mid_msgs.items()):
289
542
  lines = []
290
543
  senders: set[str] = set()
291
544
  max_intensity = 0.0
@@ -296,21 +549,19 @@ def _import_daily(
296
549
  sender = msg.get("from", msg.get("from_id", "unknown"))
297
550
  senders.add(str(sender))
298
551
  lines.append(f"[{sender}] {text}")
299
-
300
552
  emo = _detect_emotion(text)
301
553
  max_intensity = max(max_intensity, emo.intensity)
302
554
  all_labels.extend(emo.labels)
303
555
 
304
556
  content = "\n".join(lines)
305
557
  unique_labels = list(dict.fromkeys(all_labels))[:5]
306
- participant_str = ", ".join(sorted(senders))
307
558
 
308
559
  store.snapshot(
309
560
  title=f"{chat_name} — {day} ({len(day_msgs)} messages)",
310
561
  content=content,
311
562
  layer=MemoryLayer.MID,
312
563
  role=MemoryRole.GENERAL,
313
- tags=base_tags + [f"date:{day}"],
564
+ tags=base_tags + [f"date:{day}", "catchup:mid"],
314
565
  emotional=EmotionalSnapshot(
315
566
  intensity=max_intensity,
316
567
  labels=unique_labels,
@@ -320,17 +571,74 @@ def _import_daily(
320
571
  metadata={
321
572
  "date": day,
322
573
  "message_count": len(day_msgs),
323
- "participants": participant_str,
574
+ "participants": ", ".join(sorted(senders)),
324
575
  "chat": chat_name,
325
576
  },
326
577
  )
327
- imported += len(day_msgs)
328
- days_processed += 1
578
+ stats["mid_term"]["days"] += 1
579
+ stats["mid_term"]["messages"] += len(day_msgs)
329
580
 
330
- return {
331
- "mode": "daily",
332
- "chat_name": chat_name,
333
- "total_messages": len(messages),
334
- "days_processed": days_processed,
335
- "messages_imported": imported,
336
- }
581
+ # --- Long-term: weekly summaries (older than 7 days) ---
582
+ for week, week_msgs in sorted(long_msgs.items()):
583
+ lines = []
584
+ senders: set[str] = set()
585
+ topics: set[str] = set()
586
+ max_intensity = 0.0
587
+ all_labels: list[str] = []
588
+ dates_covered: set[str] = set()
589
+
590
+ for msg in week_msgs:
591
+ text = _extract_text(msg.get("text", ""))
592
+ sender = msg.get("from", msg.get("from_id", "unknown"))
593
+ senders.add(str(sender))
594
+ dates_covered.add(msg.get("date", "")[:10])
595
+
596
+ # For long-term, keep only first 200 chars per message
597
+ lines.append(f"[{sender}] {text[:200]}")
598
+ emo = _detect_emotion(text)
599
+ max_intensity = max(max_intensity, emo.intensity)
600
+ all_labels.extend(emo.labels)
601
+
602
+ # Extract potential topics from longer messages
603
+ if len(text) > 100:
604
+ words = text.lower().split()
605
+ for w in words:
606
+ if len(w) > 6 and w.isalpha():
607
+ topics.add(w)
608
+
609
+ # Summarize: limit content to avoid bloat
610
+ if len(lines) > 50:
611
+ content = "\n".join(lines[:25])
612
+ content += f"\n\n... ({len(lines) - 25} more messages) ...\n\n"
613
+ content += "\n".join(lines[-10:])
614
+ else:
615
+ content = "\n".join(lines)
616
+
617
+ unique_labels = list(dict.fromkeys(all_labels))[:5]
618
+ date_range = f"{min(dates_covered)} to {max(dates_covered)}" if dates_covered else week
619
+
620
+ store.snapshot(
621
+ title=f"{chat_name} — Week {week} ({len(week_msgs)} messages)",
622
+ content=content,
623
+ layer=MemoryLayer.LONG,
624
+ role=MemoryRole.GENERAL,
625
+ tags=base_tags + [f"week:{week}", "catchup:long"],
626
+ emotional=EmotionalSnapshot(
627
+ intensity=max_intensity,
628
+ labels=unique_labels,
629
+ ),
630
+ source="telegram",
631
+ source_ref=f"telegram:weekly:{week}",
632
+ metadata={
633
+ "week": week,
634
+ "date_range": date_range,
635
+ "message_count": len(week_msgs),
636
+ "participants": ", ".join(sorted(senders)),
637
+ "chat": chat_name,
638
+ "days_covered": len(dates_covered),
639
+ },
640
+ )
641
+ stats["long_term"]["weeks"] += 1
642
+ stats["long_term"]["messages"] += len(week_msgs)
643
+
644
+ return stats