spatial-memory-mcp 1.0.3__py3-none-any.whl → 1.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of spatial-memory-mcp might be problematic. Click here for more details.

Files changed (39) hide show
  1. spatial_memory/__init__.py +97 -97
  2. spatial_memory/__main__.py +241 -2
  3. spatial_memory/adapters/lancedb_repository.py +74 -5
  4. spatial_memory/config.py +115 -2
  5. spatial_memory/core/__init__.py +35 -0
  6. spatial_memory/core/cache.py +317 -0
  7. spatial_memory/core/circuit_breaker.py +297 -0
  8. spatial_memory/core/connection_pool.py +41 -3
  9. spatial_memory/core/consolidation_strategies.py +402 -0
  10. spatial_memory/core/database.py +791 -769
  11. spatial_memory/core/db_idempotency.py +242 -0
  12. spatial_memory/core/db_indexes.py +575 -0
  13. spatial_memory/core/db_migrations.py +584 -0
  14. spatial_memory/core/db_search.py +509 -0
  15. spatial_memory/core/db_versioning.py +177 -0
  16. spatial_memory/core/embeddings.py +156 -19
  17. spatial_memory/core/errors.py +75 -3
  18. spatial_memory/core/filesystem.py +178 -0
  19. spatial_memory/core/logging.py +194 -103
  20. spatial_memory/core/models.py +4 -0
  21. spatial_memory/core/rate_limiter.py +326 -105
  22. spatial_memory/core/response_types.py +497 -0
  23. spatial_memory/core/tracing.py +300 -0
  24. spatial_memory/core/validation.py +403 -319
  25. spatial_memory/factory.py +407 -0
  26. spatial_memory/migrations/__init__.py +40 -0
  27. spatial_memory/ports/repositories.py +52 -2
  28. spatial_memory/server.py +329 -188
  29. spatial_memory/services/export_import.py +61 -43
  30. spatial_memory/services/lifecycle.py +397 -122
  31. spatial_memory/services/memory.py +81 -4
  32. spatial_memory/services/spatial.py +129 -46
  33. spatial_memory/tools/definitions.py +695 -671
  34. {spatial_memory_mcp-1.0.3.dist-info → spatial_memory_mcp-1.6.0.dist-info}/METADATA +83 -3
  35. spatial_memory_mcp-1.6.0.dist-info/RECORD +54 -0
  36. spatial_memory_mcp-1.0.3.dist-info/RECORD +0 -41
  37. {spatial_memory_mcp-1.0.3.dist-info → spatial_memory_mcp-1.6.0.dist-info}/WHEEL +0 -0
  38. {spatial_memory_mcp-1.0.3.dist-info → spatial_memory_mcp-1.6.0.dist-info}/entry_points.txt +0 -0
  39. {spatial_memory_mcp-1.0.3.dist-info → spatial_memory_mcp-1.6.0.dist-info}/licenses/LICENSE +0 -0
@@ -619,7 +619,10 @@ class ExportImportService:
619
619
  batches: Iterator[list[dict[str, Any]]],
620
620
  include_vectors: bool,
621
621
  ) -> int:
622
- """Export to JSON format.
622
+ """Export to JSON format using streaming to avoid memory exhaustion.
623
+
624
+ Writes a valid JSON array by streaming records one at a time,
625
+ without accumulating all records in memory.
623
626
 
624
627
  Args:
625
628
  path: Output file path.
@@ -629,16 +632,32 @@ class ExportImportService:
629
632
  Returns:
630
633
  Number of records exported.
631
634
  """
632
- all_records: list[dict[str, Any]] = []
633
- for batch in batches:
634
- for record in batch:
635
- processed = self._prepare_record_for_export(record, include_vectors)
636
- all_records.append(processed)
635
+ total_records = 0
636
+ first_record = True
637
637
 
638
638
  with open(path, "w", encoding="utf-8") as f:
639
- json.dump(all_records, f, default=self._json_serializer, indent=2)
639
+ f.write("[\n")
640
+
641
+ for batch in batches:
642
+ for record in batch:
643
+ processed = self._prepare_record_for_export(record, include_vectors)
640
644
 
641
- return len(all_records)
645
+ # Add comma separator for all but first record
646
+ if not first_record:
647
+ f.write(",\n")
648
+ first_record = False
649
+
650
+ # Write the record
651
+ json_str = json.dumps(processed, default=self._json_serializer, indent=2)
652
+ # Indent each line for pretty formatting
653
+ indented = "\n".join(" " + line for line in json_str.split("\n"))
654
+ f.write(indented)
655
+
656
+ total_records += 1
657
+
658
+ f.write("\n]")
659
+
660
+ return total_records
642
661
 
643
662
  def _export_csv(
644
663
  self,
@@ -646,7 +665,10 @@ class ExportImportService:
646
665
  batches: Iterator[list[dict[str, Any]]],
647
666
  include_vectors: bool,
648
667
  ) -> int:
649
- """Export to CSV format.
668
+ """Export to CSV format using streaming to avoid memory exhaustion.
669
+
670
+ Writes CSV rows as they are processed without accumulating
671
+ all records in memory.
650
672
 
651
673
  Args:
652
674
  path: Output file path.
@@ -656,43 +678,39 @@ class ExportImportService:
656
678
  Returns:
657
679
  Number of records exported.
658
680
  """
659
- all_records: list[dict[str, Any]] = []
660
- for batch in batches:
661
- for record in batch:
662
- processed = self._prepare_record_for_export(record, include_vectors)
663
- # Convert complex types to strings for CSV
664
- processed["tags"] = json.dumps(processed.get("tags", []))
665
- processed["metadata"] = json.dumps(processed.get("metadata", {}))
666
- if include_vectors and "vector" in processed:
667
- processed["vector"] = json.dumps(processed["vector"])
668
- # Convert datetimes to ISO format
669
- for key in ["created_at", "updated_at", "last_accessed"]:
670
- if key in processed and processed[key] is not None:
671
- if isinstance(processed[key], datetime):
672
- processed[key] = processed[key].isoformat()
673
- all_records.append(processed)
674
-
675
- if not all_records:
676
- # Write empty CSV with header
677
- fieldnames = [
678
- "id", "content", "namespace", "importance", "tags",
679
- "source", "metadata", "created_at", "updated_at",
680
- "last_accessed", "access_count"
681
- ]
682
- if include_vectors:
683
- fieldnames.append("vector")
684
- with open(path, "w", newline="", encoding="utf-8") as f:
685
- writer = csv.DictWriter(f, fieldnames=fieldnames)
686
- writer.writeheader()
687
- return 0
688
-
689
- fieldnames = list(all_records[0].keys())
681
+ # Define fieldnames upfront
682
+ fieldnames = [
683
+ "id", "content", "namespace", "importance", "tags",
684
+ "source", "metadata", "created_at", "updated_at",
685
+ "last_accessed", "access_count"
686
+ ]
687
+ if include_vectors:
688
+ fieldnames.append("vector")
689
+
690
+ total_records = 0
691
+
690
692
  with open(path, "w", newline="", encoding="utf-8") as f:
691
- writer = csv.DictWriter(f, fieldnames=fieldnames)
693
+ writer = csv.DictWriter(f, fieldnames=fieldnames, extrasaction="ignore")
692
694
  writer.writeheader()
693
- writer.writerows(all_records)
694
695
 
695
- return len(all_records)
696
+ for batch in batches:
697
+ for record in batch:
698
+ processed = self._prepare_record_for_export(record, include_vectors)
699
+ # Convert complex types to strings for CSV
700
+ processed["tags"] = json.dumps(processed.get("tags", []))
701
+ processed["metadata"] = json.dumps(processed.get("metadata", {}))
702
+ if include_vectors and "vector" in processed:
703
+ processed["vector"] = json.dumps(processed["vector"])
704
+ # Convert datetimes to ISO format
705
+ for key in ["created_at", "updated_at", "last_accessed"]:
706
+ if key in processed and processed[key] is not None:
707
+ if isinstance(processed[key], datetime):
708
+ processed[key] = processed[key].isoformat()
709
+
710
+ writer.writerow(processed)
711
+ total_records += 1
712
+
713
+ return total_records
696
714
 
697
715
  def _prepare_record_for_export(
698
716
  self,