vector-inspector 0.2.6__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. vector_inspector/config/__init__.py +4 -0
  2. vector_inspector/config/known_embedding_models.json +432 -0
  3. vector_inspector/core/cache_manager.py +159 -0
  4. vector_inspector/core/connection_manager.py +277 -0
  5. vector_inspector/core/connections/__init__.py +2 -1
  6. vector_inspector/core/connections/base_connection.py +42 -1
  7. vector_inspector/core/connections/chroma_connection.py +137 -16
  8. vector_inspector/core/connections/pinecone_connection.py +768 -0
  9. vector_inspector/core/connections/qdrant_connection.py +62 -8
  10. vector_inspector/core/embedding_providers/__init__.py +14 -0
  11. vector_inspector/core/embedding_providers/base_provider.py +128 -0
  12. vector_inspector/core/embedding_providers/clip_provider.py +260 -0
  13. vector_inspector/core/embedding_providers/provider_factory.py +176 -0
  14. vector_inspector/core/embedding_providers/sentence_transformer_provider.py +203 -0
  15. vector_inspector/core/embedding_utils.py +167 -0
  16. vector_inspector/core/model_registry.py +205 -0
  17. vector_inspector/services/backup_restore_service.py +19 -29
  18. vector_inspector/services/credential_service.py +130 -0
  19. vector_inspector/services/filter_service.py +1 -1
  20. vector_inspector/services/profile_service.py +409 -0
  21. vector_inspector/services/settings_service.py +136 -1
  22. vector_inspector/ui/components/connection_manager_panel.py +327 -0
  23. vector_inspector/ui/components/profile_manager_panel.py +565 -0
  24. vector_inspector/ui/dialogs/__init__.py +6 -0
  25. vector_inspector/ui/dialogs/cross_db_migration.py +383 -0
  26. vector_inspector/ui/dialogs/embedding_config_dialog.py +315 -0
  27. vector_inspector/ui/dialogs/provider_type_dialog.py +189 -0
  28. vector_inspector/ui/main_window.py +456 -190
  29. vector_inspector/ui/views/connection_view.py +55 -10
  30. vector_inspector/ui/views/info_panel.py +272 -55
  31. vector_inspector/ui/views/metadata_view.py +71 -3
  32. vector_inspector/ui/views/search_view.py +44 -4
  33. vector_inspector/ui/views/visualization_view.py +19 -5
  34. {vector_inspector-0.2.6.dist-info → vector_inspector-0.3.1.dist-info}/METADATA +3 -1
  35. vector_inspector-0.3.1.dist-info/RECORD +55 -0
  36. vector_inspector-0.2.6.dist-info/RECORD +0 -35
  37. {vector_inspector-0.2.6.dist-info → vector_inspector-0.3.1.dist-info}/WHEEL +0 -0
  38. {vector_inspector-0.2.6.dist-info → vector_inspector-0.3.1.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,203 @@
1
+ """Sentence Transformer embedding provider with lazy loading."""
2
+
3
+ from typing import List, Union, Optional
4
+ import numpy as np
5
+
6
+ from .base_provider import (
7
+ EmbeddingProvider,
8
+ EmbeddingMetadata,
9
+ Modality,
10
+ Normalization
11
+ )
12
+
13
+
14
+ class SentenceTransformerProvider(EmbeddingProvider):
15
+ """Provider for sentence-transformers models.
16
+
17
+ Lazy-loads the sentence-transformers library and model on first use.
18
+ Supports all models from the sentence-transformers library including:
19
+ - all-MiniLM-L6-v2
20
+ - all-mpnet-base-v2
21
+ - BGE, GTE, E5 families
22
+ - Multilingual variants
23
+ """
24
+
25
+ def __init__(self, model_name: str, trust_remote_code: bool = False):
26
+ """Initialize sentence-transformer provider.
27
+
28
+ Args:
29
+ model_name: HuggingFace model ID or local path
30
+ trust_remote_code: Whether to trust remote code (for some models)
31
+ """
32
+ super().__init__(model_name)
33
+ self.trust_remote_code = trust_remote_code
34
+ self._metadata = None
35
+
36
+ def get_metadata(self) -> EmbeddingMetadata:
37
+ """Get metadata about the sentence-transformer model.
38
+
39
+ This attempts to extract metadata without loading the full model if possible.
40
+ """
41
+ if self._metadata is not None:
42
+ return self._metadata
43
+
44
+ # Try to get metadata without loading full model
45
+ try:
46
+ # Import config utilities
47
+ from sentence_transformers import SentenceTransformer
48
+ from transformers import AutoConfig
49
+
50
+ # Try to get config without loading weights
51
+ try:
52
+ config = AutoConfig.from_pretrained(self.model_name)
53
+ dimension = getattr(config, 'hidden_size', None)
54
+ max_length = getattr(config, 'max_position_embeddings', None)
55
+ except Exception:
56
+ # If config fails, we'll need to load the model
57
+ dimension = None
58
+ max_length = None
59
+
60
+ # If we couldn't get dimension from config, load model
61
+ if dimension is None:
62
+ if not self._is_loaded:
63
+ self._load_model()
64
+ dimension = self._model.get_sentence_embedding_dimension()
65
+ max_length = self._model.max_seq_length
66
+
67
+ self._metadata = EmbeddingMetadata(
68
+ name=self.model_name,
69
+ dimension=int(dimension) if dimension is not None else 768,
70
+ modality=Modality.TEXT,
71
+ normalization=Normalization.L2, # Most sentence-transformers normalize
72
+ model_type="sentence-transformer",
73
+ source="huggingface",
74
+ max_sequence_length=max_length,
75
+ description=f"Sentence-Transformer model: {self.model_name}"
76
+ )
77
+
78
+ except ImportError:
79
+ # sentence-transformers not installed
80
+ raise ImportError(
81
+ "sentence-transformers library not installed. "
82
+ "Install with: pip install sentence-transformers"
83
+ )
84
+ except Exception as e:
85
+ # Fallback metadata if we can't determine dimension
86
+ self._metadata = EmbeddingMetadata(
87
+ name=self.model_name,
88
+ dimension=768, # Common default
89
+ modality=Modality.TEXT,
90
+ normalization=Normalization.L2,
91
+ model_type="sentence-transformer",
92
+ source="huggingface",
93
+ description=f"Sentence-Transformer model: {self.model_name} (dimension not verified)"
94
+ )
95
+
96
+ return self._metadata
97
+
98
+ def _load_model(self):
99
+ """Load the sentence-transformer model."""
100
+ try:
101
+ from sentence_transformers import SentenceTransformer
102
+ except ImportError:
103
+ raise ImportError(
104
+ "sentence-transformers library not installed. "
105
+ "Install with: pip install sentence-transformers"
106
+ )
107
+
108
+ # Load model with optional trust_remote_code
109
+ self._model = SentenceTransformer(
110
+ self.model_name,
111
+ trust_remote_code=self.trust_remote_code
112
+ )
113
+
114
+ def encode(
115
+ self,
116
+ inputs: Union[str, List[str]],
117
+ normalize: bool = True,
118
+ show_progress: bool = False
119
+ ) -> np.ndarray:
120
+ """Encode text inputs into embeddings.
121
+
122
+ Args:
123
+ inputs: Single string or list of strings
124
+ normalize: Whether to L2-normalize embeddings
125
+ show_progress: Whether to show progress bar
126
+
127
+ Returns:
128
+ numpy array of embeddings
129
+ """
130
+ # Ensure model is loaded
131
+ if not self._is_loaded:
132
+ self.warmup()
133
+
134
+ # Convert single string to list
135
+ if isinstance(inputs, str):
136
+ inputs = [inputs]
137
+
138
+ # Encode
139
+ embeddings = self._model.encode(
140
+ inputs,
141
+ normalize_embeddings=normalize,
142
+ show_progress_bar=show_progress,
143
+ convert_to_numpy=True
144
+ )
145
+
146
+ return embeddings
147
+
148
+ def encode_batch(
149
+ self,
150
+ inputs: List[str],
151
+ batch_size: int = 32,
152
+ normalize: bool = True,
153
+ show_progress: bool = True
154
+ ) -> np.ndarray:
155
+ """Encode a large batch of texts efficiently.
156
+
157
+ Args:
158
+ inputs: List of strings
159
+ batch_size: Batch size for encoding
160
+ normalize: Whether to L2-normalize embeddings
161
+ show_progress: Whether to show progress bar
162
+
163
+ Returns:
164
+ numpy array of embeddings
165
+ """
166
+ if not self._is_loaded:
167
+ self.warmup()
168
+
169
+ embeddings = self._model.encode(
170
+ inputs,
171
+ batch_size=batch_size,
172
+ normalize_embeddings=normalize,
173
+ show_progress_bar=show_progress,
174
+ convert_to_numpy=True
175
+ )
176
+
177
+ return embeddings
178
+
179
+ def similarity(self, query: Union[str, np.ndarray], corpus: List[str]) -> np.ndarray:
180
+ """Compute similarity between query and corpus.
181
+
182
+ Args:
183
+ query: Query string or embedding
184
+ corpus: List of corpus strings
185
+
186
+ Returns:
187
+ Similarity scores (cosine similarity if normalized)
188
+ """
189
+ if not self._is_loaded:
190
+ self.warmup()
191
+
192
+ # Get embeddings
193
+ if isinstance(query, str):
194
+ query_emb = self.encode(query, normalize=True)
195
+ else:
196
+ query_emb = query
197
+
198
+ corpus_emb = self.encode(corpus, normalize=True)
199
+
200
+ # Compute cosine similarity (dot product if normalized)
201
+ similarities = np.dot(corpus_emb, query_emb.T).squeeze()
202
+
203
+ return similarities
@@ -0,0 +1,167 @@
1
+ """Utilities for managing embedding models and vector dimensions."""
2
+
3
+ from typing import Optional, Union, Tuple
4
+ from sentence_transformers import SentenceTransformer
5
+
6
+ from .model_registry import get_model_registry
7
+
8
+
9
+ # Default model to use when dimension is unknown or not mapped
10
+ DEFAULT_MODEL = ("all-MiniLM-L6-v2", "sentence-transformer")
11
+
12
+
13
+ def _get_dimension_to_model_dict():
14
+ """Build dimension->models dictionary from registry.
15
+
16
+ Returns:
17
+ Dict mapping dimension to list of (name, type, description) tuples
18
+ """
19
+ registry = get_model_registry()
20
+ dimension_map = {}
21
+
22
+ for dimension in registry.get_all_dimensions():
23
+ models = registry.get_models_by_dimension(dimension)
24
+ dimension_map[dimension] = [
25
+ (m.name, m.type, m.description) for m in models
26
+ ]
27
+
28
+ return dimension_map
29
+
30
+
31
+ # For backward compatibility - dynamically loads from registry
32
+ DIMENSION_TO_MODEL = _get_dimension_to_model_dict()
33
+
34
+
35
+ def get_model_for_dimension(dimension: int, prefer_multimodal: bool = True) -> Tuple[str, str]:
36
+ """
37
+ Get the appropriate embedding model name and type for a given vector dimension.
38
+
39
+ Args:
40
+ dimension: The vector dimension size
41
+ prefer_multimodal: If True and multiple models exist for this dimension,
42
+ prefer multi-modal (CLIP) over text-only models
43
+
44
+ Returns:
45
+ Tuple of (model_name, model_type) where model_type is "sentence-transformer" or "clip"
46
+ """
47
+ registry = get_model_registry()
48
+ models = registry.get_models_by_dimension(dimension)
49
+
50
+ if not models:
51
+ # Find the closest dimension if exact match not found
52
+ closest_dim = registry.find_closest_dimension(dimension)
53
+ if closest_dim:
54
+ models = registry.get_models_by_dimension(closest_dim)
55
+
56
+ if not models:
57
+ return DEFAULT_MODEL
58
+
59
+ if len(models) == 1:
60
+ return (models[0].name, models[0].type)
61
+
62
+ # Multiple models available - apply preference
63
+ if prefer_multimodal:
64
+ # Prefer CLIP/multimodal
65
+ for model in models:
66
+ if model.modality == "multimodal" or model.type == "clip":
67
+ return (model.name, model.type)
68
+
69
+ # Default to first option
70
+ return (models[0].name, models[0].type)
71
+
72
+
73
+ def get_available_models_for_dimension(dimension: int) -> list:
74
+ """
75
+ Get all available model options for a given dimension.
76
+ Includes both predefined (from registry) and custom user-added models.
77
+
78
+ Args:
79
+ dimension: The vector dimension size
80
+
81
+ Returns:
82
+ List of tuples: [(model_name, model_type, description), ...]
83
+ """
84
+ # Start with models from registry
85
+ registry = get_model_registry()
86
+ registry_models = registry.get_models_by_dimension(dimension)
87
+ models = [(m.name, m.type, m.description) for m in registry_models]
88
+
89
+ # Add custom models from settings
90
+ try:
91
+ from ..services.settings_service import SettingsService
92
+ settings = SettingsService()
93
+ custom_models = settings.get_custom_embedding_models(dimension)
94
+
95
+ for model in custom_models:
96
+ # Format: (model_name, model_type, description)
97
+ models.append((
98
+ model["name"],
99
+ model["type"],
100
+ f"{model['description']} (custom)"
101
+ ))
102
+ except Exception as e:
103
+ print(f"Warning: Could not load custom models: {e}")
104
+
105
+ return models
106
+
107
+
108
+ def load_embedding_model(model_name: str, model_type: str) -> Union[SentenceTransformer, any]:
109
+ """
110
+ Load an embedding model (sentence-transformer or CLIP).
111
+
112
+ Args:
113
+ model_name: Name of the model to load
114
+ model_type: Type of model ("sentence-transformer" or "clip")
115
+
116
+ Returns:
117
+ Loaded model (SentenceTransformer or CLIP model)
118
+ """
119
+ if model_type == "clip":
120
+ from transformers import CLIPModel, CLIPProcessor
121
+ model = CLIPModel.from_pretrained(model_name)
122
+ processor = CLIPProcessor.from_pretrained(model_name)
123
+ return (model, processor)
124
+ else:
125
+ return SentenceTransformer(model_name)
126
+
127
+
128
+ def encode_text(text: str, model: Union[SentenceTransformer, Tuple], model_type: str) -> list:
129
+ """
130
+ Encode text using the appropriate model.
131
+
132
+ Args:
133
+ text: Text to encode
134
+ model: The loaded model (SentenceTransformer or (CLIPModel, CLIPProcessor) tuple)
135
+ model_type: Type of model ("sentence-transformer" or "clip")
136
+
137
+ Returns:
138
+ Embedding vector as a list
139
+ """
140
+ if model_type == "clip":
141
+ import torch
142
+ clip_model, processor = model
143
+ inputs = processor(text=[text], return_tensors="pt", padding=True)
144
+ with torch.no_grad():
145
+ text_features = clip_model.get_text_features(**inputs)
146
+ # Normalize the features (CLIP embeddings are typically normalized)
147
+ text_features = text_features / text_features.norm(dim=-1, keepdim=True)
148
+ return text_features[0].cpu().numpy().tolist()
149
+ else:
150
+ # sentence-transformer
151
+ embedding = model.encode(text)
152
+ return embedding.tolist()
153
+
154
+
155
+ def get_embedding_model_for_dimension(dimension: int) -> Tuple[Union[SentenceTransformer, Tuple], str, str]:
156
+ """
157
+ Get a loaded embedding model for a specific dimension.
158
+
159
+ Args:
160
+ dimension: The vector dimension size
161
+
162
+ Returns:
163
+ Tuple of (loaded_model, model_name, model_type)
164
+ """
165
+ model_name, model_type = get_model_for_dimension(dimension)
166
+ model = load_embedding_model(model_name, model_type)
167
+ return (model, model_name, model_type)
@@ -0,0 +1,205 @@
1
+ """Model registry for loading and managing known embedding models."""
2
+
3
+ import json
4
+ from pathlib import Path
5
+ from typing import List, Dict, Optional, Tuple
6
+ from dataclasses import dataclass
7
+
8
+
9
+ @dataclass
10
+ class ModelInfo:
11
+ """Information about an embedding model."""
12
+ name: str
13
+ type: str
14
+ dimension: int
15
+ modality: str
16
+ normalization: str
17
+ source: str
18
+ description: str
19
+
20
+ def to_dict(self) -> Dict:
21
+ """Convert to dictionary."""
22
+ return {
23
+ "name": self.name,
24
+ "type": self.type,
25
+ "dimension": self.dimension,
26
+ "modality": self.modality,
27
+ "normalization": self.normalization,
28
+ "source": self.source,
29
+ "description": self.description
30
+ }
31
+
32
+ @classmethod
33
+ def from_dict(cls, data: Dict) -> 'ModelInfo':
34
+ """Create from dictionary."""
35
+ return cls(
36
+ name=data["name"],
37
+ type=data["type"],
38
+ dimension=data["dimension"],
39
+ modality=data["modality"],
40
+ normalization=data["normalization"],
41
+ source=data["source"],
42
+ description=data["description"]
43
+ )
44
+
45
+
46
+ class EmbeddingModelRegistry:
47
+ """Registry of known embedding models loaded from JSON."""
48
+
49
+ _instance = None
50
+ _models: List[ModelInfo] = []
51
+ _dimension_index: Dict[int, List[ModelInfo]] = {}
52
+ _name_index: Dict[str, ModelInfo] = {}
53
+
54
+ def __new__(cls):
55
+ """Singleton pattern."""
56
+ if cls._instance is None:
57
+ cls._instance = super().__new__(cls)
58
+ cls._instance._load_registry()
59
+ return cls._instance
60
+
61
+ def _load_registry(self):
62
+ """Load models from JSON file."""
63
+ registry_path = Path(__file__).parent.parent / "config" / "known_embedding_models.json"
64
+
65
+ if not registry_path.exists():
66
+ print(f"Warning: Model registry not found at {registry_path}")
67
+ return
68
+
69
+ try:
70
+ with open(registry_path, 'r', encoding='utf-8') as f:
71
+ data = json.load(f)
72
+
73
+ # Parse models
74
+ for model_data in data.get("models", []):
75
+ model_info = ModelInfo.from_dict(model_data)
76
+ self._models.append(model_info)
77
+
78
+ # Index by dimension
79
+ if model_info.dimension not in self._dimension_index:
80
+ self._dimension_index[model_info.dimension] = []
81
+ self._dimension_index[model_info.dimension].append(model_info)
82
+
83
+ # Index by name
84
+ self._name_index[model_info.name.lower()] = model_info
85
+
86
+ print(f"Loaded {len(self._models)} models from registry")
87
+ #...
88
+ except Exception as e:
89
+ print(f"Error loading model registry: {e}")
90
+
91
+ def get_models_by_dimension(self, dimension: int) -> List[ModelInfo]:
92
+ """Get all models for a specific dimension.
93
+
94
+ Args:
95
+ dimension: Vector dimension
96
+
97
+ Returns:
98
+ List of ModelInfo objects
99
+ """
100
+ return self._dimension_index.get(dimension, [])
101
+
102
+ def get_model_by_name(self, name: str) -> Optional[ModelInfo]:
103
+ """Get model info by name (case-insensitive).
104
+
105
+ Args:
106
+ name: Model name
107
+
108
+ Returns:
109
+ ModelInfo or None if not found
110
+ """
111
+ return self._name_index.get(name.lower())
112
+
113
+ def get_all_models(self) -> List[ModelInfo]:
114
+ """Get all registered models.
115
+
116
+ Returns:
117
+ List of all ModelInfo objects
118
+ """
119
+ return self._models.copy()
120
+
121
+ def get_all_dimensions(self) -> List[int]:
122
+ """Get all available dimensions.
123
+
124
+ Returns:
125
+ Sorted list of dimensions
126
+ """
127
+ return sorted(self._dimension_index.keys())
128
+
129
+ def find_closest_dimension(self, target_dimension: int) -> Optional[int]:
130
+ """Find the closest available dimension.
131
+
132
+ Args:
133
+ target_dimension: Target dimension to match
134
+
135
+ Returns:
136
+ Closest dimension or None if no models exist
137
+ """
138
+ if not self._dimension_index:
139
+ return None
140
+
141
+ return min(self._dimension_index.keys(), key=lambda x: abs(x - target_dimension))
142
+
143
+ def get_models_by_type(self, model_type: str) -> List[ModelInfo]:
144
+ """Get all models of a specific type.
145
+
146
+ Args:
147
+ model_type: Model type (e.g., "sentence-transformer", "clip")
148
+
149
+ Returns:
150
+ List of ModelInfo objects
151
+ """
152
+ return [m for m in self._models if m.type == model_type]
153
+
154
+ def get_models_by_source(self, source: str) -> List[ModelInfo]:
155
+ """Get all models from a specific source.
156
+
157
+ Args:
158
+ source: Model source (e.g., "huggingface", "openai-api")
159
+
160
+ Returns:
161
+ List of ModelInfo objects
162
+ """
163
+ return [m for m in self._models if m.source == source]
164
+
165
+ def search_models(self, query: str) -> List[ModelInfo]:
166
+ """Search models by name or description.
167
+
168
+ Args:
169
+ query: Search query (case-insensitive)
170
+
171
+ Returns:
172
+ List of matching ModelInfo objects
173
+ """
174
+ query_lower = query.lower()
175
+ results = []
176
+
177
+ for model in self._models:
178
+ if (query_lower in model.name.lower() or
179
+ query_lower in model.description.lower()):
180
+ results.append(model)
181
+
182
+ return results
183
+
184
+ def reload(self):
185
+ """Reload the registry from disk."""
186
+ self._models.clear()
187
+ self._dimension_index.clear()
188
+ self._name_index.clear()
189
+ self._load_registry()
190
+
191
+
192
+ # Global registry instance
193
+ _registry = None
194
+
195
+
196
+ def get_model_registry() -> EmbeddingModelRegistry:
197
+ """Get the global model registry instance.
198
+
199
+ Returns:
200
+ EmbeddingModelRegistry singleton
201
+ """
202
+ global _registry
203
+ if _registry is None:
204
+ _registry = EmbeddingModelRegistry()
205
+ return _registry
@@ -169,36 +169,10 @@ class BackupRestoreService:
169
169
  print(f"Failed to generate embeddings: {e}")
170
170
  return False
171
171
 
172
- # Convert IDs to Qdrant-compatible format (integers or UUIDs)
173
- # Store original IDs in metadata
172
+ # Keep IDs as strings - Qdrant's _to_uuid method handles conversion
173
+ # Just ensure all IDs are strings
174
174
  original_ids = data.get("ids", [])
175
- qdrant_ids = []
176
- metadatas = data.get("metadatas", [])
177
-
178
- for i, orig_id in enumerate(original_ids):
179
- # Try to convert to integer, otherwise use index
180
- try:
181
- # If it's like "doc_123", extract the number
182
- if isinstance(orig_id, str) and "_" in orig_id:
183
- qdrant_id = int(orig_id.split("_")[-1])
184
- else:
185
- qdrant_id = int(orig_id)
186
- except (ValueError, AttributeError):
187
- # Use index as ID if can't convert
188
- qdrant_id = i
189
-
190
- qdrant_ids.append(qdrant_id)
191
-
192
- # Store original ID in metadata
193
- if i < len(metadatas):
194
- if metadatas[i] is None:
195
- metadatas[i] = {}
196
- metadatas[i]["original_id"] = orig_id
197
- else:
198
- metadatas.append({"original_id": orig_id})
199
-
200
- data["ids"] = qdrant_ids
201
- data["metadatas"] = metadatas
175
+ data["ids"] = [str(id_val) for id_val in original_ids]
202
176
 
203
177
  # Add items to collection
204
178
  success = connection.add_items(
@@ -215,12 +189,28 @@ class BackupRestoreService:
215
189
  return True
216
190
  else:
217
191
  print("Failed to restore collection")
192
+ # Clean up partially created collection
193
+ try:
194
+ if restore_collection_name in connection.list_collections():
195
+ print(f"Cleaning up failed restore: deleting collection '{restore_collection_name}'")
196
+ connection.delete_collection(restore_collection_name)
197
+ except Exception as cleanup_error:
198
+ print(f"Warning: Failed to clean up collection: {cleanup_error}")
218
199
  return False
219
200
 
220
201
  except Exception as e:
221
202
  print(f"Restore failed: {e}")
222
203
  import traceback
223
204
  traceback.print_exc()
205
+
206
+ # Clean up partially created collection
207
+ try:
208
+ if restore_collection_name in connection.list_collections():
209
+ print(f"Cleaning up failed restore: deleting collection '{restore_collection_name}'")
210
+ connection.delete_collection(restore_collection_name)
211
+ except Exception as cleanup_error:
212
+ print(f"Warning: Failed to clean up collection: {cleanup_error}")
213
+
224
214
  return False
225
215
 
226
216
  @staticmethod