cinchdb 0.1.14__py3-none-any.whl → 0.1.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
cinchdb/core/database.py CHANGED
@@ -1,11 +1,13 @@
1
1
  """Unified database connection interface for CinchDB."""
2
2
 
3
+ import os
3
4
  from pathlib import Path
4
5
  from typing import List, Dict, Any, Optional, TYPE_CHECKING
5
6
 
6
7
  from cinchdb.models import Column, Change
7
8
  from cinchdb.core.path_utils import get_project_root
8
9
  from cinchdb.utils import validate_query_safe
10
+ from cinchdb.infrastructure.metadata_connection_pool import get_metadata_db
9
11
 
10
12
  if TYPE_CHECKING:
11
13
  from cinchdb.managers.table import TableManager
@@ -78,7 +80,7 @@ class CinchDB:
78
80
  self.database = database
79
81
  self.branch = branch
80
82
  self.tenant = tenant
81
-
83
+
82
84
  # Determine connection type
83
85
  if project_dir is not None:
84
86
  # Local connection
@@ -120,16 +122,26 @@ class CinchDB:
120
122
  return
121
123
 
122
124
  # Check if this is a lazy database using metadata DB
123
- from cinchdb.infrastructure.metadata_db import MetadataDB
124
-
125
- with MetadataDB(self.project_dir) as metadata_db:
126
- db_info = metadata_db.get_database(self.database)
125
+ metadata_db = get_metadata_db(self.project_dir)
126
+ db_info = metadata_db.get_database(self.database)
127
127
 
128
128
  if db_info and not db_info['materialized']:
129
129
  # Database exists in metadata but not materialized
130
130
  from cinchdb.core.initializer import ProjectInitializer
131
131
  initializer = ProjectInitializer(self.project_dir)
132
132
  initializer.materialize_database(self.database)
133
+
134
+ def get_connection(self, db_path) -> "DatabaseConnection":
135
+ """Get a database connection.
136
+
137
+ Args:
138
+ db_path: Path to database file
139
+
140
+ Returns:
141
+ DatabaseConnection instance
142
+ """
143
+ from cinchdb.core.connection import DatabaseConnection
144
+ return DatabaseConnection(db_path)
133
145
 
134
146
  @property
135
147
  def session(self):
@@ -464,38 +476,189 @@ class CinchDB:
464
476
  )
465
477
  return result
466
478
 
467
- def update(self, table: str, id: str, data: Dict[str, Any]) -> Dict[str, Any]:
468
- """Update a record in a table.
479
+ def update(self, table: str, *updates: Dict[str, Any]) -> Dict[str, Any] | List[Dict[str, Any]]:
480
+ """Update one or more records in a table.
469
481
 
470
482
  Args:
471
483
  table: Table name
472
- id: Record ID
473
- data: Updated data as dictionary
484
+ *updates: One or more update dictionaries, each must contain 'id' field
474
485
 
475
486
  Returns:
476
- Updated record
487
+ Single record dict if one record updated, list of dicts if multiple
488
+
489
+ Examples:
490
+ # Single update
491
+ db.update("users", {"id": "123", "name": "John Updated", "status": "active"})
492
+
493
+ # Multiple updates using star expansion
494
+ db.update("users",
495
+ {"id": "123", "name": "John Updated", "status": "active"},
496
+ {"id": "456", "name": "Jane Updated", "email": "jane.new@example.com"},
497
+ {"id": "789", "status": "inactive"}
498
+ )
499
+
500
+ # Or with a list using star expansion
501
+ user_updates = [
502
+ {"id": "abc", "name": "Alice Updated"},
503
+ {"id": "def", "status": "premium"}
504
+ ]
505
+ db.update("users", *user_updates)
477
506
  """
507
+ if not updates:
508
+ raise ValueError("At least one update record must be provided")
509
+
510
+ # Validate that all updates have an 'id' field
511
+ for i, update_data in enumerate(updates):
512
+ if 'id' not in update_data:
513
+ raise ValueError(f"Update record {i} missing required 'id' field")
514
+
478
515
  if self.is_local:
479
- return self.data.update(table, id, data)
516
+ # Single record
517
+ if len(updates) == 1:
518
+ update_data = updates[0].copy()
519
+ record_id = update_data.pop('id')
520
+ return self.data.update_by_id(table, record_id, update_data)
521
+
522
+ # Multiple records - batch update
523
+ results = []
524
+ for update_data in updates:
525
+ update_copy = update_data.copy()
526
+ record_id = update_copy.pop('id')
527
+ result = self.data.update_by_id(table, record_id, update_copy)
528
+ results.append(result)
529
+ return results
480
530
  else:
481
- # Remote update - use new data CRUD endpoint
482
- result = self._make_request(
483
- "PUT", f"/tables/{table}/data/{id}", json={"data": data}
484
- )
485
- return result
531
+ # Remote update
532
+ if len(updates) == 1:
533
+ # Single record - use existing endpoint
534
+ update_data = updates[0].copy()
535
+ record_id = update_data.pop('id')
536
+ result = self._make_request(
537
+ "PUT", f"/tables/{table}/data/{record_id}", json={"data": update_data}
538
+ )
539
+ return result
540
+ else:
541
+ # Multiple records - use bulk endpoint
542
+ result = self._make_request(
543
+ "PUT", f"/tables/{table}/data/bulk", json={"updates": list(updates)}
544
+ )
545
+ return result
486
546
 
487
- def delete(self, table: str, id: str) -> None:
488
- """Delete a record from a table.
547
+ def delete(self, table: str, *ids: str) -> int:
548
+ """Delete one or more records from a table.
489
549
 
490
550
  Args:
491
551
  table: Table name
492
- id: Record ID
552
+ *ids: One or more record IDs
553
+
554
+ Returns:
555
+ Number of records deleted
556
+
557
+ Examples:
558
+ # Single delete
559
+ db.delete("users", "123")
560
+
561
+ # Multiple deletes
562
+ db.delete("users", "123", "456", "789")
563
+
564
+ # Or with a list using star expansion
565
+ user_ids = ["abc", "def", "ghi"]
566
+ db.delete("users", *user_ids)
493
567
  """
568
+ if not ids:
569
+ raise ValueError("At least one ID must be provided")
570
+
494
571
  if self.is_local:
495
- self.data.delete(table, id)
572
+ # Single record
573
+ if len(ids) == 1:
574
+ success = self.data.delete_by_id(table, ids[0])
575
+ return 1 if success else 0
576
+
577
+ # Multiple records - batch delete
578
+ deleted_count = 0
579
+ for record_id in ids:
580
+ success = self.data.delete_by_id(table, record_id)
581
+ if success:
582
+ deleted_count += 1
583
+ return deleted_count
496
584
  else:
497
585
  # Remote delete
498
- self._make_request("DELETE", f"/tables/{table}/data/{id}")
586
+ if len(ids) == 1:
587
+ # Single record - use existing endpoint
588
+ self._make_request("DELETE", f"/tables/{table}/data/{ids[0]}")
589
+ return 1
590
+ else:
591
+ # Multiple records - use bulk endpoint
592
+ result = self._make_request(
593
+ "DELETE", f"/tables/{table}/data/bulk", json={"ids": list(ids)}
594
+ )
595
+ return result.get("deleted_count", len(ids))
596
+
597
+ def delete_where(self, table: str, operator: str = "AND", **filters) -> int:
598
+ """Delete records from a table based on filter criteria.
599
+
600
+ Args:
601
+ table: Table name
602
+ operator: Logical operator to combine conditions - "AND" (default) or "OR"
603
+ **filters: Filter criteria (supports operators like __gt, __lt, __in, __like, __not)
604
+ Multiple conditions are combined with the specified operator
605
+
606
+ Returns:
607
+ Number of records deleted
608
+
609
+ Examples:
610
+ # Delete records where status = 'inactive' (single condition)
611
+ count = db.delete_where('users', status='inactive')
612
+
613
+ # Delete records where status = 'inactive' AND age > 65 (default AND)
614
+ count = db.delete_where('users', status='inactive', age__gt=65)
615
+
616
+ # Delete records where status = 'inactive' OR age > 65
617
+ count = db.delete_where('users', operator='OR', status='inactive', age__gt=65)
618
+
619
+ # Delete records where item_id in [1, 2, 3]
620
+ count = db.delete_where('items', item_id__in=[1, 2, 3])
621
+ """
622
+ if self.is_local:
623
+ return self.data.delete_where(table, operator=operator, **filters)
624
+ else:
625
+ raise NotImplementedError("Remote bulk delete not implemented")
626
+
627
+ def update_where(self, table: str, data: Dict[str, Any], operator: str = "AND", **filters) -> int:
628
+ """Update records in a table based on filter criteria.
629
+
630
+ Args:
631
+ table: Table name
632
+ data: Dictionary of column-value pairs to update
633
+ operator: Logical operator to combine conditions - "AND" (default) or "OR"
634
+ **filters: Filter criteria (supports operators like __gt, __lt, __in, __like, __not)
635
+ Multiple conditions are combined with the specified operator
636
+
637
+ Returns:
638
+ Number of records updated
639
+
640
+ Examples:
641
+ # Update status for all users with age > 65 (single condition)
642
+ count = db.update_where('users', {'status': 'senior'}, age__gt=65)
643
+
644
+ # Update status where age > 65 AND status = 'active' (default AND)
645
+ count = db.update_where('users', {'status': 'senior'}, age__gt=65, status='active')
646
+
647
+ # Update status where age > 65 OR status = 'pending'
648
+ count = db.update_where('users', {'status': 'senior'}, operator='OR', age__gt=65, status='pending')
649
+
650
+ # Update multiple fields where item_id in specific list
651
+ count = db.update_where(
652
+ 'items',
653
+ {'status': 'inactive', 'updated_at': datetime.now()},
654
+ item_id__in=[1, 2, 3]
655
+ )
656
+ """
657
+ if self.is_local:
658
+ return self.data.update_where(table, data, operator=operator, **filters)
659
+ else:
660
+ raise NotImplementedError("Remote bulk update not implemented")
661
+
499
662
 
500
663
  def create_index(
501
664
  self,
@@ -581,31 +744,6 @@ class CinchDB:
581
744
  changes.append(Change(**data))
582
745
  return changes
583
746
 
584
- def optimize_tenant(self, tenant_name: str = None, force: bool = False) -> bool:
585
- """Optimize a tenant's storage with VACUUM and page size adjustment.
586
-
587
- Args:
588
- tenant_name: Name of the tenant to optimize (default: current tenant)
589
- force: If True, always perform optimization
590
-
591
- Returns:
592
- True if optimization was performed, False otherwise
593
-
594
- Examples:
595
- # Optimize current tenant
596
- db.optimize_tenant()
597
-
598
- # Optimize specific tenant
599
- db.optimize_tenant("store_west")
600
-
601
- # Force optimization even if not needed
602
- db.optimize_tenant(force=True)
603
- """
604
- if self.is_local:
605
- tenant_to_optimize = tenant_name or self.tenant
606
- return self.tenants.optimize_tenant_storage(tenant_to_optimize, force=force)
607
- else:
608
- raise NotImplementedError("Remote tenant optimization not implemented")
609
747
 
610
748
  def get_tenant_size(self, tenant_name: str = None) -> dict:
611
749
  """Get storage size information for a tenant.
@@ -639,6 +777,46 @@ class CinchDB:
639
777
  else:
640
778
  raise NotImplementedError("Remote tenant size query not implemented")
641
779
 
780
+ def vacuum_tenant(self, tenant_name: str = None) -> dict:
781
+ """Run VACUUM operation on a specific tenant to optimize storage and performance.
782
+
783
+ VACUUM reclaims space from deleted records, defragments the database file,
784
+ and can improve query performance by rebuilding internal structures.
785
+
786
+ Args:
787
+ tenant_name: Name of tenant to vacuum (default: current tenant)
788
+
789
+ Returns:
790
+ Dictionary with vacuum results:
791
+ - success: Whether vacuum completed successfully
792
+ - tenant: Name of the tenant
793
+ - size_before: Size in bytes before vacuum
794
+ - size_after: Size in bytes after vacuum
795
+ - space_reclaimed: Bytes reclaimed by vacuum
796
+ - space_reclaimed_mb: MB reclaimed (rounded to 2 decimals)
797
+ - duration_seconds: Time taken for vacuum operation
798
+ - error: Error message if vacuum failed
799
+
800
+ Raises:
801
+ ValueError: If tenant doesn't exist or is not materialized
802
+ NotImplementedError: If called on remote database
803
+
804
+ Examples:
805
+ # Vacuum current tenant
806
+ result = db.vacuum_tenant()
807
+ if result['success']:
808
+ print(f"Reclaimed {result['space_reclaimed_mb']:.2f} MB")
809
+
810
+ # Vacuum specific tenant
811
+ result = db.vacuum_tenant("store_east")
812
+ print(f"Vacuum took {result['duration_seconds']} seconds")
813
+ """
814
+ if self.is_local:
815
+ tenant_to_vacuum = tenant_name or self.tenant
816
+ return self.tenants.vacuum_tenant(tenant_to_vacuum)
817
+ else:
818
+ raise NotImplementedError("Remote tenant vacuum not implemented")
819
+
642
820
  def get_storage_info(self) -> dict:
643
821
  """Get storage size information for all tenants in current branch.
644
822
 
@@ -667,35 +845,6 @@ class CinchDB:
667
845
  else:
668
846
  raise NotImplementedError("Remote storage info not implemented")
669
847
 
670
- def optimize_all_tenants(self, force: bool = False) -> dict:
671
- """Optimize storage for all tenants in current branch.
672
-
673
- This is designed to be called periodically to:
674
- - Reclaim unused space with VACUUM
675
- - Adjust page sizes as databases grow
676
- - Keep small databases compact
677
-
678
- Args:
679
- force: If True, optimize all tenants regardless of size
680
-
681
- Returns:
682
- Dictionary with optimization results:
683
- - optimized: List of tenant names that were optimized
684
- - skipped: List of tenant names that were skipped
685
- - errors: List of tuples (tenant_name, error_message)
686
-
687
- Examples:
688
- # Run periodic optimization
689
- results = db.optimize_all_tenants()
690
- print(f"Optimized {len(results['optimized'])} tenants")
691
-
692
- # Force optimization of all tenants
693
- results = db.optimize_all_tenants(force=True)
694
- """
695
- if self.is_local:
696
- return self.tenants.optimize_all_tenants(force=force)
697
- else:
698
- raise NotImplementedError("Remote tenant optimization not implemented")
699
848
 
700
849
  def close(self):
701
850
  """Close any open connections."""
@@ -0,0 +1,43 @@
1
+ """Maintenance utilities for CinchDB operations."""
2
+
3
+ from pathlib import Path
4
+ from cinchdb.infrastructure.metadata_connection_pool import get_metadata_db
5
+
6
+
7
+ class MaintenanceError(Exception):
8
+ """Exception raised when operation blocked by maintenance mode."""
9
+ pass
10
+
11
+
12
+ def check_maintenance_mode(project_root: Path, database: str, branch: str = None) -> None:
13
+ """Check if database or branch is in maintenance mode and raise error if so.
14
+
15
+ Args:
16
+ project_root: Path to project root
17
+ database: Database name
18
+ branch: Branch name (optional)
19
+
20
+ Raises:
21
+ MaintenanceError: If database or branch is in maintenance mode
22
+ """
23
+ try:
24
+ metadata_db = get_metadata_db(project_root)
25
+
26
+ # Check database-level maintenance
27
+ if metadata_db.is_database_in_maintenance(database):
28
+ info = metadata_db.get_maintenance_info(database)
29
+ reason = info.get("reason", "Database maintenance in progress") if info else "Database maintenance in progress"
30
+ raise MaintenanceError(f"Database '{database}' is in maintenance mode: {reason}")
31
+
32
+ # Check branch-level maintenance if branch specified
33
+ if branch and metadata_db.is_branch_in_maintenance(database, branch):
34
+ info = metadata_db.get_maintenance_info(database, branch)
35
+ reason = info.get("reason", "Branch maintenance in progress") if info else "Branch maintenance in progress"
36
+ raise MaintenanceError(f"Branch '{database}/{branch}' is in maintenance mode: {reason}")
37
+
38
+ except MaintenanceError:
39
+ raise # Re-raise maintenance errors
40
+ except Exception:
41
+ # If we can't check maintenance status, allow the operation to proceed
42
+ # This prevents maintenance check failures from blocking normal operations
43
+ pass
@@ -1,7 +1,8 @@
1
1
  """Path utilities for CinchDB."""
2
2
 
3
3
  from pathlib import Path
4
- from typing import List, Optional
4
+ from typing import List
5
+ from cinchdb.infrastructure.metadata_connection_pool import get_metadata_db
5
6
 
6
7
 
7
8
  def get_project_root(start_path: Path) -> Path:
@@ -119,10 +120,9 @@ def list_databases(project_root: Path) -> List[str]:
119
120
  if not metadata_db_path.exists():
120
121
  return []
121
122
 
122
- from cinchdb.infrastructure.metadata_db import MetadataDB
123
- with MetadataDB(project_root) as metadata_db:
124
- db_records = metadata_db.list_databases()
125
- return sorted(record['name'] for record in db_records)
123
+ metadata_db = get_metadata_db(project_root)
124
+ db_records = metadata_db.list_databases()
125
+ return sorted(record['name'] for record in db_records)
126
126
 
127
127
 
128
128
  def list_branches(project_root: Path, database: str) -> List[str]:
@@ -139,13 +139,12 @@ def list_branches(project_root: Path, database: str) -> List[str]:
139
139
  if not metadata_db_path.exists():
140
140
  return []
141
141
 
142
- from cinchdb.infrastructure.metadata_db import MetadataDB
143
- with MetadataDB(project_root) as metadata_db:
144
- db_info = metadata_db.get_database(database)
145
- if not db_info:
146
- return []
147
- branch_records = metadata_db.list_branches(db_info['id'])
148
- return sorted(record['name'] for record in branch_records)
142
+ metadata_db = get_metadata_db(project_root)
143
+ db_info = metadata_db.get_database(database)
144
+ if not db_info:
145
+ return []
146
+ branch_records = metadata_db.list_branches(db_info['id'])
147
+ return sorted(record['name'] for record in branch_records)
149
148
 
150
149
 
151
150
  def list_tenants(project_root: Path, database: str, branch: str) -> List[str]:
@@ -163,13 +162,12 @@ def list_tenants(project_root: Path, database: str, branch: str) -> List[str]:
163
162
  if not metadata_db_path.exists():
164
163
  return []
165
164
 
166
- from cinchdb.infrastructure.metadata_db import MetadataDB
167
- with MetadataDB(project_root) as metadata_db:
168
- db_info = metadata_db.get_database(database)
169
- if not db_info:
170
- return []
171
- branch_info = metadata_db.get_branch(db_info['id'], branch)
172
- if not branch_info:
173
- return []
174
- tenant_records = metadata_db.list_tenants(branch_info['id'])
175
- return sorted(record['name'] for record in tenant_records)
165
+ metadata_db = get_metadata_db(project_root)
166
+ db_info = metadata_db.get_database(database)
167
+ if not db_info:
168
+ return []
169
+ branch_info = metadata_db.get_branch(db_info['id'], branch)
170
+ if not branch_info:
171
+ return []
172
+ tenant_records = metadata_db.list_tenants(branch_info['id'])
173
+ return sorted(record['name'] for record in tenant_records)