solana-agent 27.3.6__py3-none-any.whl → 27.3.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. solana_agent/__init__.py +1 -3
  2. solana_agent/adapters/mongodb_adapter.py +5 -2
  3. solana_agent/adapters/openai_adapter.py +32 -27
  4. solana_agent/adapters/pinecone_adapter.py +91 -63
  5. solana_agent/client/solana_agent.py +38 -23
  6. solana_agent/domains/agent.py +7 -13
  7. solana_agent/domains/routing.py +5 -5
  8. solana_agent/factories/agent_factory.py +49 -34
  9. solana_agent/interfaces/client/client.py +22 -13
  10. solana_agent/interfaces/plugins/plugins.py +2 -1
  11. solana_agent/interfaces/providers/data_storage.py +9 -2
  12. solana_agent/interfaces/providers/llm.py +26 -12
  13. solana_agent/interfaces/providers/memory.py +1 -1
  14. solana_agent/interfaces/providers/vector_storage.py +3 -9
  15. solana_agent/interfaces/services/agent.py +21 -6
  16. solana_agent/interfaces/services/knowledge_base.py +6 -8
  17. solana_agent/interfaces/services/query.py +16 -5
  18. solana_agent/interfaces/services/routing.py +0 -1
  19. solana_agent/plugins/manager.py +14 -9
  20. solana_agent/plugins/registry.py +13 -11
  21. solana_agent/plugins/tools/__init__.py +0 -5
  22. solana_agent/plugins/tools/auto_tool.py +1 -0
  23. solana_agent/repositories/memory.py +20 -22
  24. solana_agent/services/__init__.py +1 -1
  25. solana_agent/services/agent.py +119 -89
  26. solana_agent/services/knowledge_base.py +182 -131
  27. solana_agent/services/query.py +48 -24
  28. solana_agent/services/routing.py +30 -18
  29. {solana_agent-27.3.6.dist-info → solana_agent-27.3.8.dist-info}/METADATA +6 -5
  30. solana_agent-27.3.8.dist-info/RECORD +39 -0
  31. solana_agent-27.3.6.dist-info/RECORD +0 -39
  32. {solana_agent-27.3.6.dist-info → solana_agent-27.3.8.dist-info}/LICENSE +0 -0
  33. {solana_agent-27.3.6.dist-info → solana_agent-27.3.8.dist-info}/WHEEL +0 -0
solana_agent/__init__.py CHANGED
@@ -16,16 +16,14 @@ from solana_agent.factories.agent_factory import SolanaAgentFactory
16
16
  # Useful tools and utilities
17
17
  from solana_agent.plugins.manager import PluginManager
18
18
  from solana_agent.plugins.registry import ToolRegistry
19
- from solana_agent.plugins.tools import AutoTool
19
+ from solana_agent.plugins.tools.auto_tool import AutoTool
20
20
 
21
21
  # Package metadata
22
22
  __all__ = [
23
23
  # Main client interfaces
24
24
  "SolanaAgent",
25
-
26
25
  # Factories
27
26
  "SolanaAgentFactory",
28
-
29
27
  # Tools
30
28
  "PluginManager",
31
29
  "ToolRegistry",
@@ -3,6 +3,7 @@ MongoDB adapter for the Solana Agent system.
3
3
 
4
4
  This adapter implements the DataStorageProvider interface for MongoDB.
5
5
  """
6
+
6
7
  import uuid
7
8
  from typing import Dict, List, Tuple, Optional
8
9
 
@@ -40,7 +41,7 @@ class MongoDBAdapter(DataStorageProvider):
40
41
  query: Dict,
41
42
  sort: Optional[List[Tuple]] = None,
42
43
  limit: int = 0,
43
- skip: int = 0
44
+ skip: int = 0,
44
45
  ) -> List[Dict]:
45
46
  cursor = self.db[collection].find(query)
46
47
  if sort:
@@ -51,7 +52,9 @@ class MongoDBAdapter(DataStorageProvider):
51
52
  cursor = cursor.skip(skip)
52
53
  return list(cursor)
53
54
 
54
- def update_one(self, collection: str, query: Dict, update: Dict, upsert: bool = False) -> bool:
55
+ def update_one(
56
+ self, collection: str, query: Dict, update: Dict, upsert: bool = False
57
+ ) -> bool:
55
58
  result = self.db[collection].update_one(query, update, upsert=upsert)
56
59
  return result.modified_count > 0 or (upsert and result.upserted_id is not None)
57
60
 
@@ -3,6 +3,7 @@ LLM provider adapters for the Solana Agent system.
3
3
 
4
4
  These adapters implement the LLMProvider interface for different LLM services.
5
5
  """
6
+
6
7
  from typing import AsyncGenerator, List, Literal, Optional, Type, TypeVar
7
8
 
8
9
  from openai import AsyncOpenAI
@@ -12,7 +13,7 @@ from instructor import Mode
12
13
 
13
14
  from solana_agent.interfaces.providers.llm import LLMProvider
14
15
 
15
- T = TypeVar('T', bound=BaseModel)
16
+ T = TypeVar("T", bound=BaseModel)
16
17
 
17
18
  DEFAULT_CHAT_MODEL = "gpt-4.1-mini"
18
19
  DEFAULT_PARSE_MODEL = "gpt-4.1-nano"
@@ -38,10 +39,19 @@ class OpenAIAdapter(LLMProvider):
38
39
  self,
39
40
  text: str,
40
41
  instructions: str = "You speak in a friendly and helpful manner.",
41
- voice: Literal["alloy", "ash", "ballad", "coral", "echo",
42
- "fable", "onyx", "nova", "sage", "shimmer"] = "nova",
43
- response_format: Literal['mp3', 'opus',
44
- 'aac', 'flac', 'wav', 'pcm'] = "aac",
42
+ voice: Literal[
43
+ "alloy",
44
+ "ash",
45
+ "ballad",
46
+ "coral",
47
+ "echo",
48
+ "fable",
49
+ "onyx",
50
+ "nova",
51
+ "sage",
52
+ "shimmer",
53
+ ] = "nova",
54
+ response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] = "aac",
45
55
  ) -> AsyncGenerator[bytes, None]: # pragma: no cover
46
56
  """Stream text-to-speech audio from OpenAI models.
47
57
 
@@ -59,7 +69,7 @@ class OpenAIAdapter(LLMProvider):
59
69
  model=self.tts_model,
60
70
  voice=voice,
61
71
  input=text,
62
- response_format=response_format
72
+ response_format=response_format,
63
73
  ) as stream:
64
74
  # Stream the bytes in 16KB chunks
65
75
  async for chunk in stream.iter_bytes(chunk_size=1024 * 16):
@@ -68,12 +78,14 @@ class OpenAIAdapter(LLMProvider):
68
78
  except Exception as e:
69
79
  print(f"Error in text_to_speech: {str(e)}")
70
80
  import traceback
81
+
71
82
  print(traceback.format_exc())
72
83
  yield b"" # Return empty bytes on error
73
84
 
74
85
  except Exception as e:
75
86
  print(f"Error in text_to_speech: {str(e)}")
76
87
  import traceback
88
+
77
89
  print(traceback.format_exc())
78
90
  yield b"" # Return empty bytes on error
79
91
 
@@ -106,6 +118,7 @@ class OpenAIAdapter(LLMProvider):
106
118
  except Exception as e:
107
119
  print(f"Error in transcribe_audio: {str(e)}")
108
120
  import traceback
121
+
109
122
  print(traceback.format_exc())
110
123
  yield f"I apologize, but I encountered an error transcribing the audio: {str(e)}"
111
124
 
@@ -152,6 +165,7 @@ class OpenAIAdapter(LLMProvider):
152
165
  except Exception as e:
153
166
  print(f"Error in generate_text: {str(e)}")
154
167
  import traceback
168
+
155
169
  print(traceback.format_exc())
156
170
  yield f"I apologize, but I encountered an error: {str(e)}"
157
171
 
@@ -179,20 +193,18 @@ class OpenAIAdapter(LLMProvider):
179
193
  if model:
180
194
  self.parse_model = model
181
195
 
182
- patched_client = instructor.from_openai(
183
- client, mode=Mode.TOOLS_STRICT)
196
+ patched_client = instructor.from_openai(client, mode=Mode.TOOLS_STRICT)
184
197
 
185
198
  # Use instructor's structured generation with function calling
186
199
  response = await patched_client.chat.completions.create(
187
200
  model=self.parse_model,
188
201
  messages=messages,
189
202
  response_model=model_class,
190
- max_retries=2 # Automatically retry on validation errors
203
+ max_retries=2, # Automatically retry on validation errors
191
204
  )
192
205
  return response
193
206
  except Exception as e:
194
- print(
195
- f"Error with instructor parsing (TOOLS_STRICT mode): {e}")
207
+ print(f"Error with instructor parsing (TOOLS_STRICT mode): {e}")
196
208
 
197
209
  try:
198
210
  if api_key and base_url:
@@ -204,13 +216,12 @@ class OpenAIAdapter(LLMProvider):
204
216
  self.parse_model = model
205
217
 
206
218
  # First fallback: Try regular JSON mode
207
- patched_client = instructor.from_openai(
208
- client, mode=Mode.JSON)
219
+ patched_client = instructor.from_openai(client, mode=Mode.JSON)
209
220
  response = await patched_client.chat.completions.create(
210
221
  model=self.parse_model,
211
222
  messages=messages,
212
223
  response_model=model_class,
213
- max_retries=1
224
+ max_retries=1,
214
225
  )
215
226
  return response
216
227
  except Exception as json_error:
@@ -218,8 +229,7 @@ class OpenAIAdapter(LLMProvider):
218
229
 
219
230
  try:
220
231
  if api_key and base_url:
221
- client = AsyncOpenAI(
222
- api_key=api_key, base_url=base_url)
232
+ client = AsyncOpenAI(api_key=api_key, base_url=base_url)
223
233
  else:
224
234
  client = self.client
225
235
 
@@ -241,9 +251,9 @@ class OpenAIAdapter(LLMProvider):
241
251
  model=self.parse_model,
242
252
  messages=[
243
253
  {"role": "system", "content": fallback_system_prompt},
244
- {"role": "user", "content": prompt}
254
+ {"role": "user", "content": prompt},
245
255
  ],
246
- response_format={"type": "json_object"}
256
+ response_format={"type": "json_object"},
247
257
  )
248
258
 
249
259
  # Extract and parse the JSON response
@@ -259,10 +269,7 @@ class OpenAIAdapter(LLMProvider):
259
269
  ) from e
260
270
 
261
271
  async def embed_text(
262
- self,
263
- text: str,
264
- model: Optional[str] = None,
265
- dimensions: Optional[int] = None
272
+ self, text: str, model: Optional[str] = None, dimensions: Optional[int] = None
266
273
  ) -> List[float]: # pragma: no cover
267
274
  """Generate an embedding for the given text using OpenAI.
268
275
 
@@ -286,19 +293,17 @@ class OpenAIAdapter(LLMProvider):
286
293
  text = text.replace("\n", " ")
287
294
 
288
295
  response = await self.client.embeddings.create(
289
- input=[text],
290
- model=embedding_model,
291
- dimensions=embedding_dimensions
296
+ input=[text], model=embedding_model, dimensions=embedding_dimensions
292
297
  )
293
298
 
294
299
  if response.data and response.data[0].embedding:
295
300
  return response.data[0].embedding
296
301
  else:
297
- raise ValueError(
298
- "Failed to retrieve embedding from OpenAI response")
302
+ raise ValueError("Failed to retrieve embedding from OpenAI response")
299
303
 
300
304
  except Exception as e:
301
305
  print(f"Error generating embedding: {e}")
302
306
  import traceback
307
+
303
308
  print(traceback.format_exc())
304
309
  raise
@@ -1,4 +1,4 @@
1
- from typing import List, Dict, Any, Optional, Literal, Union
1
+ from typing import List, Dict, Any, Optional, Literal
2
2
  from pinecone import PineconeAsyncio, ServerlessSpec
3
3
  from pinecone.exceptions import PineconeApiException
4
4
  import asyncio
@@ -89,14 +89,15 @@ class PineconeAdapter(VectorStorageProvider):
89
89
  if not self.index_name:
90
90
  raise ValueError("Pinecone index name is required.")
91
91
  if self.embedding_dimensions <= 0:
92
- raise ValueError(
93
- "embedding_dimensions must be a positive integer.")
92
+ raise ValueError("embedding_dimensions must be a positive integer.")
94
93
  if self.use_reranking and not self.rerank_model:
95
94
  raise ValueError(
96
- "rerank_model must be specified when use_reranking is True.")
95
+ "rerank_model must be specified when use_reranking is True."
96
+ )
97
97
 
98
98
  print(
99
- f"PineconeAdapter configured for index '{self.index_name}' using external embeddings with dimension {self.embedding_dimensions}.")
99
+ f"PineconeAdapter configured for index '{self.index_name}' using external embeddings with dimension {self.embedding_dimensions}."
100
+ )
100
101
  if self.use_reranking:
101
102
  print(f"Reranking enabled using model '{self.rerank_model}'.")
102
103
 
@@ -116,18 +117,21 @@ class PineconeAdapter(VectorStorageProvider):
116
117
  if self.create_index_if_not_exists:
117
118
  await self._create_index_if_not_exists_async()
118
119
 
119
- print(
120
- f"Describing Pinecone index '{self.index_name}' to get host...")
120
+ print(f"Describing Pinecone index '{self.index_name}' to get host...")
121
121
  index_description = await self.pinecone.describe_index(self.index_name)
122
122
  self.index_host = index_description.host
123
123
  if not self.index_host:
124
124
  raise RuntimeError(
125
- f"Could not obtain host for index '{self.index_name}'.")
125
+ f"Could not obtain host for index '{self.index_name}'."
126
+ )
126
127
  print(f"Obtained index host: {self.index_host}")
127
128
 
128
129
  # Validate index dimension matches configured dimension
129
130
  index_dimension = index_description.dimension
130
- if index_dimension != 0 and index_dimension != self.embedding_dimensions:
131
+ if (
132
+ index_dimension != 0
133
+ and index_dimension != self.embedding_dimensions
134
+ ):
131
135
  # This is a critical mismatch
132
136
  raise ValueError(
133
137
  f"CRITICAL MISMATCH: Pinecone index dimension ({index_dimension}) "
@@ -136,7 +140,8 @@ class PineconeAdapter(VectorStorageProvider):
136
140
  )
137
141
  elif index_dimension == 0:
138
142
  print(
139
- f"Warning: Pinecone index dimension reported as 0. Cannot verify match with configured dimension ({self.embedding_dimensions}).")
143
+ f"Warning: Pinecone index dimension reported as 0. Cannot verify match with configured dimension ({self.embedding_dimensions})."
144
+ )
140
145
 
141
146
  print("Attempting to get index stats...")
142
147
  stats = await self.describe_index_stats()
@@ -144,7 +149,8 @@ class PineconeAdapter(VectorStorageProvider):
144
149
 
145
150
  total_vector_count = stats.get("total_vector_count", 0)
146
151
  print(
147
- f"Current index '{self.index_name}' contains {total_vector_count} vectors.")
152
+ f"Current index '{self.index_name}' contains {total_vector_count} vectors."
153
+ )
148
154
 
149
155
  self._initialized = True
150
156
  print("Pinecone adapter initialization complete.")
@@ -156,7 +162,8 @@ class PineconeAdapter(VectorStorageProvider):
156
162
  raise
157
163
  except Exception as e:
158
164
  print(
159
- f"Failed to initialize Pinecone async adapter for index '{self.index_name}': {e}")
165
+ f"Failed to initialize Pinecone async adapter for index '{self.index_name}': {e}"
166
+ )
160
167
  self.pinecone = None
161
168
  self.index_host = None
162
169
  raise
@@ -164,40 +171,37 @@ class PineconeAdapter(VectorStorageProvider):
164
171
  async def _create_index_if_not_exists_async(self) -> None:
165
172
  """Create the Pinecone index asynchronously if it doesn't already exist."""
166
173
  if not self.pinecone:
167
- raise RuntimeError(
168
- "Pinecone client not initialized before creating index.")
174
+ raise RuntimeError("Pinecone client not initialized before creating index.")
169
175
  try:
170
176
  indexes_response = await self.pinecone.list_indexes()
171
- existing_indexes = indexes_response.get('indexes', [])
172
- existing_names = [idx.get('name') for idx in existing_indexes]
177
+ existing_indexes = indexes_response.get("indexes", [])
178
+ existing_names = [idx.get("name") for idx in existing_indexes]
173
179
 
174
180
  if self.index_name not in existing_names:
175
181
  print(
176
- f"Creating Pinecone index '{self.index_name}' with dimension {self.embedding_dimensions}...")
182
+ f"Creating Pinecone index '{self.index_name}' with dimension {self.embedding_dimensions}..."
183
+ )
177
184
 
178
- spec_data = {
179
- "cloud": self.cloud_provider,
180
- "region": self.region
181
- }
185
+ spec_data = {"cloud": self.cloud_provider, "region": self.region}
182
186
 
183
187
  create_params = {
184
188
  "name": self.index_name,
185
189
  "dimension": self.embedding_dimensions, # Use configured dimension
186
190
  "metric": self.metric,
187
191
  # Assuming serverless, adjust if needed
188
- "spec": ServerlessSpec(**spec_data)
192
+ "spec": ServerlessSpec(**spec_data),
189
193
  }
190
194
 
191
195
  await self.pinecone.create_index(**create_params)
192
196
  print(
193
- f"✅ Successfully initiated creation of Pinecone index '{self.index_name}'. Waiting for it to be ready...")
197
+ f"✅ Successfully initiated creation of Pinecone index '{self.index_name}'. Waiting for it to be ready..."
198
+ )
194
199
  # Wait time might need adjustment based on index size/type and cloud provider
195
200
  await asyncio.sleep(30) # Increased wait time
196
201
  else:
197
202
  print(f"Using existing Pinecone index '{self.index_name}'")
198
203
  except Exception as e:
199
- print(
200
- f"Error checking or creating Pinecone index asynchronously: {e}")
204
+ print(f"Error checking or creating Pinecone index asynchronously: {e}")
201
205
  raise
202
206
 
203
207
  async def _ensure_initialized(self):
@@ -206,20 +210,22 @@ class PineconeAdapter(VectorStorageProvider):
206
210
  await self._initialize_async()
207
211
  if not self._initialized or not self.pinecone or not self.index_host:
208
212
  raise RuntimeError(
209
- "Pinecone async client failed to initialize or get index host.")
213
+ "Pinecone async client failed to initialize or get index host."
214
+ )
210
215
 
211
216
  # _get_embedding method is removed as embeddings are handled externally
212
217
 
213
218
  async def upsert_text(self, *args, **kwargs): # pragma: no cover
214
219
  """Deprecated: Embeddings should be generated externally."""
215
220
  raise NotImplementedError(
216
- "upsert_text is deprecated. Use the generic upsert method with pre-computed vectors.")
221
+ "upsert_text is deprecated. Use the generic upsert method with pre-computed vectors."
222
+ )
217
223
 
218
224
  async def upsert(
219
225
  self,
220
226
  # Expects {"id": str, "values": List[float], "metadata": Optional[Dict]}
221
227
  vectors: List[Dict[str, Any]],
222
- namespace: Optional[str] = None
228
+ namespace: Optional[str] = None,
223
229
  ) -> None: # pragma: no cover
224
230
  """Upsert pre-embedded vectors into Pinecone asynchronously."""
225
231
  await self._ensure_initialized()
@@ -227,13 +233,16 @@ class PineconeAdapter(VectorStorageProvider):
227
233
  print("Upsert skipped: No vectors provided.")
228
234
  return
229
235
  try:
230
- async with self.pinecone.IndexAsyncio(host=self.index_host) as index_instance:
236
+ async with self.pinecone.IndexAsyncio(
237
+ host=self.index_host
238
+ ) as index_instance:
231
239
  upsert_params = {"vectors": vectors}
232
240
  if namespace:
233
241
  upsert_params["namespace"] = namespace
234
242
  await index_instance.upsert(**upsert_params)
235
243
  print(
236
- f"Successfully upserted {len(vectors)} vectors into namespace '{namespace or 'default'}'.")
244
+ f"Successfully upserted {len(vectors)} vectors into namespace '{namespace or 'default'}'."
245
+ )
237
246
  except PineconeApiException as e:
238
247
  print(f"Pinecone API error during async upsert: {e}")
239
248
  raise
@@ -244,7 +253,8 @@ class PineconeAdapter(VectorStorageProvider):
244
253
  async def query_text(self, *args, **kwargs): # pragma: no cover
245
254
  """Deprecated: Use query() for simple vector search or query_and_rerank() for reranking."""
246
255
  raise NotImplementedError(
247
- "query_text is deprecated. Use query() or query_and_rerank() with a pre-computed vector.")
256
+ "query_text is deprecated. Use query() or query_and_rerank() with a pre-computed vector."
257
+ )
248
258
 
249
259
  async def query_and_rerank(
250
260
  self,
@@ -276,12 +286,16 @@ class PineconeAdapter(VectorStorageProvider):
276
286
 
277
287
  if not self.use_reranking:
278
288
  print(
279
- "Warning: query_and_rerank called but use_reranking is False. Performing standard query.")
280
- return await self.query(vector, top_k, namespace, filter, include_values, include_metadata)
289
+ "Warning: query_and_rerank called but use_reranking is False. Performing standard query."
290
+ )
291
+ return await self.query(
292
+ vector, top_k, namespace, filter, include_values, include_metadata
293
+ )
281
294
 
282
295
  if not self.rerank_model:
283
296
  raise ValueError(
284
- "Cannot rerank: rerank_model was not specified during initialization.")
297
+ "Cannot rerank: rerank_model was not specified during initialization."
298
+ )
285
299
 
286
300
  # Determine how many results to fetch initially for reranking
287
301
  initial_k = top_k * self.initial_query_top_k_multiplier
@@ -294,7 +308,7 @@ class PineconeAdapter(VectorStorageProvider):
294
308
  namespace=namespace,
295
309
  filter=filter,
296
310
  include_values=include_values, # Include values if requested in final output
297
- include_metadata=True # Always need metadata for reranking text field
311
+ include_metadata=True, # Always need metadata for reranking text field
298
312
  )
299
313
 
300
314
  if not initial_results:
@@ -314,25 +328,28 @@ class PineconeAdapter(VectorStorageProvider):
314
328
  original_results_map[doc_text] = match
315
329
  else:
316
330
  print(
317
- f"Warning: Skipping result ID {match.get('id')} for reranking - missing or invalid text in field '{self.rerank_text_field}'.")
331
+ f"Warning: Skipping result ID {match.get('id')} for reranking - missing or invalid text in field '{self.rerank_text_field}'."
332
+ )
318
333
  else:
319
334
  print(
320
- f"Warning: Skipping result ID {match.get('id')} for reranking - metadata is missing or not a dictionary.")
335
+ f"Warning: Skipping result ID {match.get('id')} for reranking - metadata is missing or not a dictionary."
336
+ )
321
337
 
322
338
  if not documents_to_rerank:
323
339
  print(
324
- f"⚠️ Reranking skipped: No documents found with text in the specified field ('{self.rerank_text_field}'). Returning top {top_k} initial results.")
340
+ f"⚠️ Reranking skipped: No documents found with text in the specified field ('{self.rerank_text_field}'). Returning top {top_k} initial results."
341
+ )
325
342
  # Return the originally requested top_k
326
343
  return initial_results[:top_k]
327
344
 
328
345
  # 3. Perform Reranking Call
329
346
  if not self.pinecone:
330
- raise RuntimeError(
331
- "Pinecone client not initialized for reranking.")
347
+ raise RuntimeError("Pinecone client not initialized for reranking.")
332
348
 
333
349
  try:
334
350
  print(
335
- f"Reranking {len(documents_to_rerank)} results using {self.rerank_model} for query: '{query_text_for_rerank[:50]}...'")
351
+ f"Reranking {len(documents_to_rerank)} results using {self.rerank_model} for query: '{query_text_for_rerank[:50]}...'"
352
+ )
336
353
  rerank_params = {} # Add model-specific params if needed
337
354
 
338
355
  rerank_request = {
@@ -340,7 +357,7 @@ class PineconeAdapter(VectorStorageProvider):
340
357
  "documents": documents_to_rerank,
341
358
  "model": self.rerank_model,
342
359
  "top_n": top_k, # Request the final desired number
343
- "parameters": rerank_params
360
+ "parameters": rerank_params,
344
361
  }
345
362
 
346
363
  rerank_response = await self.pinecone.rerank(**rerank_request)
@@ -361,21 +378,25 @@ class PineconeAdapter(VectorStorageProvider):
361
378
  reranked_results.append(updated_match)
362
379
  else:
363
380
  print(
364
- f"Warning: Reranked document text not found in original results map: '{doc_text[:50]}...'")
381
+ f"Warning: Reranked document text not found in original results map: '{doc_text[:50]}...'"
382
+ )
365
383
 
366
384
  if reranked_results:
367
385
  print(
368
- f"Reranking complete. Returning {len(reranked_results)} results.")
386
+ f"Reranking complete. Returning {len(reranked_results)} results."
387
+ )
369
388
  return reranked_results
370
389
  else:
371
390
  # Should not happen if rerank_response.results existed, but handle defensively
372
391
  print(
373
- "Warning: No matches found after processing reranking response. Falling back to initial vector search results.")
392
+ "Warning: No matches found after processing reranking response. Falling back to initial vector search results."
393
+ )
374
394
  return initial_results[:top_k]
375
395
 
376
396
  except Exception as rerank_error:
377
397
  print(
378
- f"Error during reranking with {self.rerank_model}: {rerank_error}. Returning initial results.")
398
+ f"Error during reranking with {self.rerank_model}: {rerank_error}. Returning initial results."
399
+ )
379
400
  # Fallback to top_k initial results
380
401
  return initial_results[:top_k]
381
402
 
@@ -408,12 +429,14 @@ class PineconeAdapter(VectorStorageProvider):
408
429
  """
409
430
  await self._ensure_initialized()
410
431
  try:
411
- async with self.pinecone.IndexAsyncio(host=self.index_host) as index_instance:
432
+ async with self.pinecone.IndexAsyncio(
433
+ host=self.index_host
434
+ ) as index_instance:
412
435
  query_params = {
413
436
  "vector": vector,
414
437
  "top_k": top_k,
415
438
  "include_values": include_values,
416
- "include_metadata": include_metadata
439
+ "include_metadata": include_metadata,
417
440
  }
418
441
  if namespace:
419
442
  query_params["namespace"] = namespace
@@ -422,8 +445,7 @@ class PineconeAdapter(VectorStorageProvider):
422
445
  query_response = await index_instance.query(**query_params)
423
446
 
424
447
  # Ensure response structure is handled safely
425
- matches = query_response.get(
426
- "matches", []) if query_response else []
448
+ matches = query_response.get("matches", []) if query_response else []
427
449
  return matches
428
450
 
429
451
  except PineconeApiException as e:
@@ -434,9 +456,7 @@ class PineconeAdapter(VectorStorageProvider):
434
456
  return [] # Return empty list for general errors
435
457
 
436
458
  async def delete(
437
- self,
438
- ids: List[str],
439
- namespace: Optional[str] = None
459
+ self, ids: List[str], namespace: Optional[str] = None
440
460
  ) -> None: # pragma: no cover
441
461
  """Delete vectors by IDs from Pinecone asynchronously."""
442
462
  await self._ensure_initialized()
@@ -444,13 +464,16 @@ class PineconeAdapter(VectorStorageProvider):
444
464
  print("Delete skipped: No IDs provided.")
445
465
  return
446
466
  try:
447
- async with self.pinecone.IndexAsyncio(host=self.index_host) as index_instance:
467
+ async with self.pinecone.IndexAsyncio(
468
+ host=self.index_host
469
+ ) as index_instance:
448
470
  delete_params = {"ids": ids}
449
471
  if namespace:
450
472
  delete_params["namespace"] = namespace
451
473
  await index_instance.delete(**delete_params)
452
474
  print(
453
- f"Attempted to delete {len(ids)} vectors from namespace '{namespace or 'default'}'.")
475
+ f"Attempted to delete {len(ids)} vectors from namespace '{namespace or 'default'}'."
476
+ )
454
477
  except PineconeApiException as e:
455
478
  print(f"Pinecone API error during async delete: {e}")
456
479
  raise
@@ -463,16 +486,21 @@ class PineconeAdapter(VectorStorageProvider):
463
486
  print(f"describe_index_stats: Entering for host {self.index_host}")
464
487
  try:
465
488
  print(
466
- f"describe_index_stats: Getting IndexAsyncio context for host {self.index_host}...")
467
- async with self.pinecone.IndexAsyncio(host=self.index_host) as index_instance:
489
+ f"describe_index_stats: Getting IndexAsyncio context for host {self.index_host}..."
490
+ )
491
+ async with self.pinecone.IndexAsyncio(
492
+ host=self.index_host
493
+ ) as index_instance:
468
494
  print(
469
- f"describe_index_stats: Context acquired. Calling describe_index_stats on index instance...")
495
+ "describe_index_stats: Context acquired. Calling describe_index_stats on index instance..."
496
+ )
470
497
  stats_response = await index_instance.describe_index_stats()
471
498
  print(
472
- f"describe_index_stats: Call completed. Response: {stats_response}")
499
+ f"describe_index_stats: Call completed. Response: {stats_response}"
500
+ )
473
501
 
474
502
  # Convert response to dict if necessary (handle potential None or different types)
475
- if hasattr(stats_response, 'to_dict'):
503
+ if hasattr(stats_response, "to_dict"):
476
504
  result_dict = stats_response.to_dict()
477
505
  elif isinstance(stats_response, dict):
478
506
  result_dict = stats_response
@@ -482,14 +510,14 @@ class PineconeAdapter(VectorStorageProvider):
482
510
  result_dict = dict(stats_response)
483
511
  except (TypeError, ValueError):
484
512
  print(
485
- f"Warning: Could not convert stats_response to dict: {stats_response}")
513
+ f"Warning: Could not convert stats_response to dict: {stats_response}"
514
+ )
486
515
  result_dict = {}
487
516
 
488
517
  print(f"describe_index_stats: Returning stats dict: {result_dict}")
489
518
  return result_dict
490
519
  except PineconeApiException as e:
491
- print(
492
- f"Pinecone API error describing index stats asynchronously: {e}")
520
+ print(f"Pinecone API error describing index stats asynchronously: {e}")
493
521
  raise # Re-raise API errors
494
522
  except Exception as e:
495
523
  print(f"Error describing index stats asynchronously: {e}")