solana-agent 28.1.0__py3-none-any.whl → 28.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- solana_agent/adapters/openai_adapter.py +59 -44
- solana_agent/adapters/pinecone_adapter.py +76 -57
- solana_agent/cli.py +128 -0
- solana_agent/factories/agent_factory.py +35 -16
- solana_agent/plugins/manager.py +24 -8
- solana_agent/plugins/registry.py +23 -10
- solana_agent/repositories/memory.py +18 -10
- solana_agent/services/knowledge_base.py +91 -41
- solana_agent/services/routing.py +14 -9
- {solana_agent-28.1.0.dist-info → solana_agent-28.2.0.dist-info}/METADATA +46 -3
- {solana_agent-28.1.0.dist-info → solana_agent-28.2.0.dist-info}/RECORD +14 -12
- solana_agent-28.2.0.dist-info/entry_points.txt +3 -0
- {solana_agent-28.1.0.dist-info → solana_agent-28.2.0.dist-info}/LICENSE +0 -0
- {solana_agent-28.1.0.dist-info → solana_agent-28.2.0.dist-info}/WHEEL +0 -0
@@ -4,6 +4,7 @@ LLM provider adapters for the Solana Agent system.
|
|
4
4
|
These adapters implement the LLMProvider interface for different LLM services.
|
5
5
|
"""
|
6
6
|
|
7
|
+
import logging
|
7
8
|
from typing import AsyncGenerator, List, Literal, Optional, Type, TypeVar
|
8
9
|
|
9
10
|
from openai import AsyncOpenAI
|
@@ -14,6 +15,9 @@ import logfire
|
|
14
15
|
|
15
16
|
from solana_agent.interfaces.providers.llm import LLMProvider
|
16
17
|
|
18
|
+
# Setup logger for this module
|
19
|
+
logger = logging.getLogger(__name__)
|
20
|
+
|
17
21
|
T = TypeVar("T", bound=BaseModel)
|
18
22
|
|
19
23
|
DEFAULT_CHAT_MODEL = "gpt-4.1"
|
@@ -35,12 +39,10 @@ class OpenAIAdapter(LLMProvider):
|
|
35
39
|
try:
|
36
40
|
logfire.configure(token=logfire_api_key)
|
37
41
|
self.logfire = True
|
38
|
-
|
42
|
+
logger.info("Logfire configured successfully.") # Use logger.info
|
39
43
|
except Exception as e:
|
40
|
-
|
41
|
-
|
42
|
-
) # Log error if configuration fails
|
43
|
-
self.logfire = False # Ensure logfire is False if config fails
|
44
|
+
logger.error(f"Failed to configure Logfire: {e}") # Use logger.error
|
45
|
+
self.logfire = False
|
44
46
|
|
45
47
|
self.parse_model = DEFAULT_PARSE_MODEL
|
46
48
|
self.text_model = DEFAULT_CHAT_MODEL
|
@@ -79,7 +81,8 @@ class OpenAIAdapter(LLMProvider):
|
|
79
81
|
Audio bytes as they become available
|
80
82
|
"""
|
81
83
|
try:
|
82
|
-
|
84
|
+
if self.logfire: # Instrument only if logfire is enabled
|
85
|
+
logfire.instrument_openai(self.client)
|
83
86
|
async with self.client.audio.speech.with_streaming_response.create(
|
84
87
|
model=self.tts_model,
|
85
88
|
voice=voice,
|
@@ -91,17 +94,8 @@ class OpenAIAdapter(LLMProvider):
|
|
91
94
|
yield chunk
|
92
95
|
|
93
96
|
except Exception as e:
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
print(traceback.format_exc())
|
98
|
-
yield b"" # Return empty bytes on error
|
99
|
-
|
100
|
-
except Exception as e:
|
101
|
-
print(f"Error in text_to_speech: {str(e)}")
|
102
|
-
import traceback
|
103
|
-
|
104
|
-
print(traceback.format_exc())
|
97
|
+
# Log the exception with traceback
|
98
|
+
logger.exception(f"Error in text_to_speech: {e}")
|
105
99
|
yield b"" # Return empty bytes on error
|
106
100
|
|
107
101
|
async def transcribe_audio(
|
@@ -121,7 +115,8 @@ class OpenAIAdapter(LLMProvider):
|
|
121
115
|
Transcript text chunks as they become available
|
122
116
|
"""
|
123
117
|
try:
|
124
|
-
|
118
|
+
if self.logfire: # Instrument only if logfire is enabled
|
119
|
+
logfire.instrument_openai(self.client)
|
125
120
|
async with self.client.audio.transcriptions.with_streaming_response.create(
|
126
121
|
model=self.transcription_model,
|
127
122
|
file=(f"file.{input_format}", audio_bytes),
|
@@ -132,10 +127,8 @@ class OpenAIAdapter(LLMProvider):
|
|
132
127
|
yield chunk
|
133
128
|
|
134
129
|
except Exception as e:
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
print(traceback.format_exc())
|
130
|
+
# Log the exception with traceback
|
131
|
+
logger.exception(f"Error in transcribe_audio: {e}")
|
139
132
|
yield f"I apologize, but I encountered an error transcribing the audio: {str(e)}"
|
140
133
|
|
141
134
|
async def generate_text(
|
@@ -177,12 +170,16 @@ class OpenAIAdapter(LLMProvider):
|
|
177
170
|
full_text = response.choices[0].message.content
|
178
171
|
return full_text # Return the complete string
|
179
172
|
else:
|
180
|
-
|
173
|
+
logger.warning(
|
174
|
+
"Received non-streaming response with no content."
|
175
|
+
) # Use logger.warning
|
181
176
|
return "" # Return empty string if no content
|
182
177
|
|
183
178
|
except Exception as e:
|
184
|
-
# Log the
|
185
|
-
|
179
|
+
# Log the exception and return an error message string
|
180
|
+
logger.exception(f"Error in generate_text: {e}")
|
181
|
+
# Consider returning a more informative error string or raising
|
182
|
+
return f"Error generating text: {e}"
|
186
183
|
|
187
184
|
async def parse_structured_output(
|
188
185
|
self,
|
@@ -208,56 +205,67 @@ class OpenAIAdapter(LLMProvider):
|
|
208
205
|
if self.logfire:
|
209
206
|
logfire.instrument_openai(client)
|
210
207
|
|
211
|
-
|
212
|
-
|
208
|
+
# Use the provided model or the default parse model
|
209
|
+
current_parse_model = model or self.parse_model
|
213
210
|
|
214
211
|
patched_client = instructor.from_openai(client, mode=Mode.TOOLS_STRICT)
|
215
212
|
|
216
213
|
# Use instructor's structured generation with function calling
|
217
214
|
response = await patched_client.chat.completions.create(
|
218
|
-
model=
|
215
|
+
model=current_parse_model, # Use the determined model
|
219
216
|
messages=messages,
|
220
217
|
response_model=model_class,
|
221
218
|
max_retries=2, # Automatically retry on validation errors
|
222
219
|
)
|
223
220
|
return response
|
224
221
|
except Exception as e:
|
225
|
-
|
222
|
+
logger.warning(
|
223
|
+
f"Instructor parsing (TOOLS_STRICT mode) failed: {e}"
|
224
|
+
) # Log warning
|
226
225
|
|
227
226
|
try:
|
227
|
+
# Determine client again for fallback
|
228
228
|
if api_key and base_url:
|
229
229
|
client = AsyncOpenAI(api_key=api_key, base_url=base_url)
|
230
230
|
else:
|
231
231
|
client = self.client
|
232
232
|
|
233
|
-
if
|
234
|
-
|
233
|
+
if self.logfire: # Instrument again if needed
|
234
|
+
logfire.instrument_openai(client)
|
235
|
+
|
236
|
+
# Use the provided model or the default parse model
|
237
|
+
current_parse_model = model or self.parse_model
|
235
238
|
|
236
239
|
# First fallback: Try regular JSON mode
|
240
|
+
logger.info("Falling back to instructor JSON mode.") # Log info
|
237
241
|
patched_client = instructor.from_openai(client, mode=Mode.JSON)
|
238
242
|
response = await patched_client.chat.completions.create(
|
239
|
-
model=
|
243
|
+
model=current_parse_model, # Use the determined model
|
240
244
|
messages=messages,
|
241
245
|
response_model=model_class,
|
242
246
|
max_retries=1,
|
243
247
|
)
|
244
248
|
return response
|
245
249
|
except Exception as json_error:
|
246
|
-
|
250
|
+
logger.warning(
|
251
|
+
f"Instructor JSON mode fallback also failed: {json_error}"
|
252
|
+
) # Log warning
|
247
253
|
|
248
254
|
try:
|
255
|
+
# Determine client again for final fallback
|
249
256
|
if api_key and base_url:
|
250
257
|
client = AsyncOpenAI(api_key=api_key, base_url=base_url)
|
251
258
|
else:
|
252
259
|
client = self.client
|
253
260
|
|
254
|
-
if self.logfire:
|
261
|
+
if self.logfire: # Instrument again if needed
|
255
262
|
logfire.instrument_openai(client)
|
256
263
|
|
257
|
-
|
258
|
-
|
264
|
+
# Use the provided model or the default parse model
|
265
|
+
current_parse_model = model or self.parse_model
|
259
266
|
|
260
267
|
# Final fallback: Manual extraction with a detailed prompt
|
268
|
+
logger.info("Falling back to manual JSON extraction.") # Log info
|
261
269
|
fallback_system_prompt = f"""
|
262
270
|
{system_prompt}
|
263
271
|
|
@@ -269,7 +277,7 @@ class OpenAIAdapter(LLMProvider):
|
|
269
277
|
|
270
278
|
# Regular completion without instructor
|
271
279
|
completion = await client.chat.completions.create(
|
272
|
-
model=
|
280
|
+
model=current_parse_model, # Use the determined model
|
273
281
|
messages=[
|
274
282
|
{"role": "system", "content": fallback_system_prompt},
|
275
283
|
{"role": "user", "content": prompt},
|
@@ -284,7 +292,10 @@ class OpenAIAdapter(LLMProvider):
|
|
284
292
|
return model_class.model_validate_json(json_str)
|
285
293
|
|
286
294
|
except Exception as fallback_error:
|
287
|
-
|
295
|
+
# Log the final exception with traceback
|
296
|
+
logger.exception(
|
297
|
+
f"All structured output fallback methods failed: {fallback_error}"
|
298
|
+
)
|
288
299
|
raise ValueError(
|
289
300
|
f"Failed to generate structured output: {e}. All fallbacks failed."
|
290
301
|
) from e
|
@@ -303,6 +314,8 @@ class OpenAIAdapter(LLMProvider):
|
|
303
314
|
A list of floats representing the embedding vector.
|
304
315
|
"""
|
305
316
|
if not text:
|
317
|
+
# Log error instead of raising immediately, let caller handle empty input if needed
|
318
|
+
logger.error("Attempted to embed empty text.")
|
306
319
|
raise ValueError("Text cannot be empty")
|
307
320
|
|
308
321
|
try:
|
@@ -313,7 +326,7 @@ class OpenAIAdapter(LLMProvider):
|
|
313
326
|
# Replace newlines with spaces as recommended by OpenAI
|
314
327
|
text = text.replace("\n", " ")
|
315
328
|
|
316
|
-
if self.logfire:
|
329
|
+
if self.logfire: # Instrument only if logfire is enabled
|
317
330
|
logfire.instrument_openai(self.client)
|
318
331
|
|
319
332
|
response = await self.client.embeddings.create(
|
@@ -323,11 +336,13 @@ class OpenAIAdapter(LLMProvider):
|
|
323
336
|
if response.data and response.data[0].embedding:
|
324
337
|
return response.data[0].embedding
|
325
338
|
else:
|
339
|
+
# Log warning about unexpected response structure
|
340
|
+
logger.warning(
|
341
|
+
"Failed to retrieve embedding from OpenAI response structure."
|
342
|
+
)
|
326
343
|
raise ValueError("Failed to retrieve embedding from OpenAI response")
|
327
344
|
|
328
345
|
except Exception as e:
|
329
|
-
|
330
|
-
|
331
|
-
|
332
|
-
print(traceback.format_exc())
|
333
|
-
raise
|
346
|
+
# Log the exception with traceback before raising
|
347
|
+
logger.exception(f"Error generating embedding: {e}")
|
348
|
+
raise # Re-raise the original exception
|
@@ -1,3 +1,4 @@
|
|
1
|
+
import logging # Import logging
|
1
2
|
from typing import List, Dict, Any, Optional, Literal
|
2
3
|
from pinecone import PineconeAsyncio, ServerlessSpec
|
3
4
|
from pinecone.exceptions import PineconeApiException
|
@@ -6,6 +7,9 @@ import asyncio
|
|
6
7
|
from solana_agent.interfaces.providers.vector_storage import VectorStorageProvider
|
7
8
|
# LLMProvider is no longer needed here
|
8
9
|
|
10
|
+
# Setup logger for this module
|
11
|
+
logger = logging.getLogger(__name__)
|
12
|
+
|
9
13
|
# Type definitions remain useful
|
10
14
|
PineconeRerankModel = Literal[
|
11
15
|
"cohere-rerank-3.5",
|
@@ -95,11 +99,11 @@ class PineconeAdapter(VectorStorageProvider):
|
|
95
99
|
"rerank_model must be specified when use_reranking is True."
|
96
100
|
)
|
97
101
|
|
98
|
-
|
102
|
+
logger.info(
|
99
103
|
f"PineconeAdapter configured for index '{self.index_name}' using external embeddings with dimension {self.embedding_dimensions}."
|
100
104
|
)
|
101
105
|
if self.use_reranking:
|
102
|
-
|
106
|
+
logger.info(f"Reranking enabled using model '{self.rerank_model}'.")
|
103
107
|
|
104
108
|
self._init_lock = asyncio.Lock()
|
105
109
|
self._initialized = False
|
@@ -111,20 +115,22 @@ class PineconeAdapter(VectorStorageProvider):
|
|
111
115
|
return
|
112
116
|
|
113
117
|
try:
|
114
|
-
|
118
|
+
logger.info("Initializing PineconeAsyncio client...")
|
115
119
|
self.pinecone = PineconeAsyncio(api_key=self.api_key)
|
116
120
|
|
117
121
|
if self.create_index_if_not_exists:
|
118
122
|
await self._create_index_if_not_exists_async()
|
119
123
|
|
120
|
-
|
124
|
+
logger.info(
|
125
|
+
f"Describing Pinecone index '{self.index_name}' to get host..."
|
126
|
+
)
|
121
127
|
index_description = await self.pinecone.describe_index(self.index_name)
|
122
128
|
self.index_host = index_description.host
|
123
129
|
if not self.index_host:
|
124
130
|
raise RuntimeError(
|
125
131
|
f"Could not obtain host for index '{self.index_name}'."
|
126
132
|
)
|
127
|
-
|
133
|
+
logger.info(f"Obtained index host: {self.index_host}")
|
128
134
|
|
129
135
|
# Validate index dimension matches configured dimension
|
130
136
|
index_dimension = index_description.dimension
|
@@ -139,29 +145,32 @@ class PineconeAdapter(VectorStorageProvider):
|
|
139
145
|
f"Ensure the index was created with the correct dimension or update the adapter configuration."
|
140
146
|
)
|
141
147
|
elif index_dimension == 0:
|
142
|
-
|
143
|
-
f"
|
148
|
+
logger.warning(
|
149
|
+
f"Pinecone index dimension reported as 0. Cannot verify match with configured dimension ({self.embedding_dimensions})."
|
144
150
|
)
|
145
151
|
|
146
|
-
|
152
|
+
logger.info("Attempting to get index stats...")
|
147
153
|
stats = await self.describe_index_stats()
|
148
|
-
|
154
|
+
logger.info(f"Successfully retrieved index stats: {stats}")
|
149
155
|
|
150
156
|
total_vector_count = stats.get("total_vector_count", 0)
|
151
|
-
|
157
|
+
logger.info(
|
152
158
|
f"Current index '{self.index_name}' contains {total_vector_count} vectors."
|
153
159
|
)
|
154
160
|
|
155
161
|
self._initialized = True
|
156
|
-
|
162
|
+
logger.info("Pinecone adapter initialization complete.")
|
157
163
|
|
158
164
|
except PineconeApiException as e:
|
159
|
-
|
165
|
+
logger.error(
|
166
|
+
f"Pinecone API error during async initialization: {e}",
|
167
|
+
exc_info=True,
|
168
|
+
)
|
160
169
|
self.pinecone = None
|
161
170
|
self.index_host = None
|
162
171
|
raise
|
163
172
|
except Exception as e:
|
164
|
-
|
173
|
+
logger.exception(
|
165
174
|
f"Failed to initialize Pinecone async adapter for index '{self.index_name}': {e}"
|
166
175
|
)
|
167
176
|
self.pinecone = None
|
@@ -178,7 +187,7 @@ class PineconeAdapter(VectorStorageProvider):
|
|
178
187
|
existing_names = [idx.get("name") for idx in existing_indexes]
|
179
188
|
|
180
189
|
if self.index_name not in existing_names:
|
181
|
-
|
190
|
+
logger.info(
|
182
191
|
f"Creating Pinecone index '{self.index_name}' with dimension {self.embedding_dimensions}..."
|
183
192
|
)
|
184
193
|
|
@@ -193,15 +202,17 @@ class PineconeAdapter(VectorStorageProvider):
|
|
193
202
|
}
|
194
203
|
|
195
204
|
await self.pinecone.create_index(**create_params)
|
196
|
-
|
197
|
-
f"
|
205
|
+
logger.info(
|
206
|
+
f"Successfully initiated creation of Pinecone index '{self.index_name}'. Waiting for it to be ready..."
|
198
207
|
)
|
199
208
|
# Wait time might need adjustment based on index size/type and cloud provider
|
200
209
|
await asyncio.sleep(30) # Increased wait time
|
201
210
|
else:
|
202
|
-
|
211
|
+
logger.info(f"Using existing Pinecone index '{self.index_name}'")
|
203
212
|
except Exception as e:
|
204
|
-
|
213
|
+
logger.exception(
|
214
|
+
f"Error checking or creating Pinecone index asynchronously: {e}"
|
215
|
+
)
|
205
216
|
raise
|
206
217
|
|
207
218
|
async def _ensure_initialized(self):
|
@@ -230,7 +241,7 @@ class PineconeAdapter(VectorStorageProvider):
|
|
230
241
|
"""Upsert pre-embedded vectors into Pinecone asynchronously."""
|
231
242
|
await self._ensure_initialized()
|
232
243
|
if not vectors:
|
233
|
-
|
244
|
+
logger.info("Upsert skipped: No vectors provided.")
|
234
245
|
return
|
235
246
|
try:
|
236
247
|
async with self.pinecone.IndexAsyncio(
|
@@ -240,14 +251,14 @@ class PineconeAdapter(VectorStorageProvider):
|
|
240
251
|
if namespace:
|
241
252
|
upsert_params["namespace"] = namespace
|
242
253
|
await index_instance.upsert(**upsert_params)
|
243
|
-
|
254
|
+
logger.info(
|
244
255
|
f"Successfully upserted {len(vectors)} vectors into namespace '{namespace or 'default'}'."
|
245
256
|
)
|
246
257
|
except PineconeApiException as e:
|
247
|
-
|
258
|
+
logger.error(f"Pinecone API error during async upsert: {e}", exc_info=True)
|
248
259
|
raise
|
249
260
|
except Exception as e:
|
250
|
-
|
261
|
+
logger.exception(f"Error during async upsert: {e}")
|
251
262
|
raise
|
252
263
|
|
253
264
|
async def query_text(self, *args, **kwargs): # pragma: no cover
|
@@ -285,8 +296,8 @@ class PineconeAdapter(VectorStorageProvider):
|
|
285
296
|
await self._ensure_initialized()
|
286
297
|
|
287
298
|
if not self.use_reranking:
|
288
|
-
|
289
|
-
"
|
299
|
+
logger.warning(
|
300
|
+
"query_and_rerank called but use_reranking is False. Performing standard query."
|
290
301
|
)
|
291
302
|
return await self.query(
|
292
303
|
vector, top_k, namespace, filter, include_values, include_metadata
|
@@ -327,17 +338,17 @@ class PineconeAdapter(VectorStorageProvider):
|
|
327
338
|
# Store original match keyed by the text for easy lookup after reranking
|
328
339
|
original_results_map[doc_text] = match
|
329
340
|
else:
|
330
|
-
|
331
|
-
f"
|
341
|
+
logger.warning(
|
342
|
+
f"Skipping result ID {match.get('id')} for reranking - missing or invalid text in field '{self.rerank_text_field}'."
|
332
343
|
)
|
333
344
|
else:
|
334
|
-
|
335
|
-
f"
|
345
|
+
logger.warning(
|
346
|
+
f"Skipping result ID {match.get('id')} for reranking - metadata is missing or not a dictionary."
|
336
347
|
)
|
337
348
|
|
338
349
|
if not documents_to_rerank:
|
339
|
-
|
340
|
-
f"
|
350
|
+
logger.warning(
|
351
|
+
f"Reranking skipped: No documents found with text in the specified field ('{self.rerank_text_field}'). Returning top {top_k} initial results."
|
341
352
|
)
|
342
353
|
# Return the originally requested top_k
|
343
354
|
return initial_results[:top_k]
|
@@ -347,7 +358,7 @@ class PineconeAdapter(VectorStorageProvider):
|
|
347
358
|
raise RuntimeError("Pinecone client not initialized for reranking.")
|
348
359
|
|
349
360
|
try:
|
350
|
-
|
361
|
+
logger.info(
|
351
362
|
f"Reranking {len(documents_to_rerank)} results using {self.rerank_model} for query: '{query_text_for_rerank[:50]}...'"
|
352
363
|
)
|
353
364
|
rerank_params = {} # Add model-specific params if needed
|
@@ -377,31 +388,32 @@ class PineconeAdapter(VectorStorageProvider):
|
|
377
388
|
updated_match["score"] = score
|
378
389
|
reranked_results.append(updated_match)
|
379
390
|
else:
|
380
|
-
|
381
|
-
f"
|
391
|
+
logger.warning(
|
392
|
+
f"Reranked document text not found in original results map: '{doc_text[:50]}...'"
|
382
393
|
)
|
383
394
|
|
384
395
|
if reranked_results:
|
385
|
-
|
396
|
+
logger.info(
|
386
397
|
f"Reranking complete. Returning {len(reranked_results)} results."
|
387
398
|
)
|
388
399
|
return reranked_results
|
389
400
|
else:
|
390
401
|
# Should not happen if rerank_response.results existed, but handle defensively
|
391
|
-
|
392
|
-
"
|
402
|
+
logger.warning(
|
403
|
+
"No matches found after processing reranking response. Falling back to initial vector search results."
|
393
404
|
)
|
394
405
|
return initial_results[:top_k]
|
395
406
|
|
396
407
|
except Exception as rerank_error:
|
397
|
-
|
398
|
-
f"Error during reranking with {self.rerank_model}: {rerank_error}. Returning initial results."
|
408
|
+
logger.error(
|
409
|
+
f"Error during reranking with {self.rerank_model}: {rerank_error}. Returning initial results.",
|
410
|
+
exc_info=True,
|
399
411
|
)
|
400
412
|
# Fallback to top_k initial results
|
401
413
|
return initial_results[:top_k]
|
402
414
|
|
403
415
|
except Exception as e:
|
404
|
-
|
416
|
+
logger.exception(f"Failed to query or rerank: {e}")
|
405
417
|
return []
|
406
418
|
|
407
419
|
async def query(
|
@@ -449,10 +461,10 @@ class PineconeAdapter(VectorStorageProvider):
|
|
449
461
|
return matches
|
450
462
|
|
451
463
|
except PineconeApiException as e:
|
452
|
-
|
464
|
+
logger.error(f"Pinecone API error during async query: {e}", exc_info=True)
|
453
465
|
raise # Re-raise API errors
|
454
466
|
except Exception as e:
|
455
|
-
|
467
|
+
logger.exception(f"Error during async query: {e}")
|
456
468
|
return [] # Return empty list for general errors
|
457
469
|
|
458
470
|
async def delete(
|
@@ -461,7 +473,7 @@ class PineconeAdapter(VectorStorageProvider):
|
|
461
473
|
"""Delete vectors by IDs from Pinecone asynchronously."""
|
462
474
|
await self._ensure_initialized()
|
463
475
|
if not ids:
|
464
|
-
|
476
|
+
logger.info("Delete skipped: No IDs provided.")
|
465
477
|
return
|
466
478
|
try:
|
467
479
|
async with self.pinecone.IndexAsyncio(
|
@@ -471,33 +483,35 @@ class PineconeAdapter(VectorStorageProvider):
|
|
471
483
|
if namespace:
|
472
484
|
delete_params["namespace"] = namespace
|
473
485
|
await index_instance.delete(**delete_params)
|
474
|
-
|
486
|
+
logger.info(
|
475
487
|
f"Attempted to delete {len(ids)} vectors from namespace '{namespace or 'default'}'."
|
476
488
|
)
|
477
489
|
except PineconeApiException as e:
|
478
|
-
|
490
|
+
logger.error(f"Pinecone API error during async delete: {e}", exc_info=True)
|
479
491
|
raise
|
480
492
|
except Exception as e:
|
481
|
-
|
493
|
+
logger.exception(f"Error during async delete: {e}")
|
482
494
|
raise
|
483
495
|
|
484
496
|
async def describe_index_stats(self) -> Dict[str, Any]: # pragma: no cover
|
485
497
|
"""Get statistics about the index asynchronously."""
|
486
|
-
|
498
|
+
logger.debug(
|
499
|
+
f"describe_index_stats: Entering for host {self.index_host}"
|
500
|
+
) # Changed to debug
|
487
501
|
try:
|
488
|
-
|
502
|
+
logger.debug(
|
489
503
|
f"describe_index_stats: Getting IndexAsyncio context for host {self.index_host}..."
|
490
|
-
)
|
504
|
+
) # Changed to debug
|
491
505
|
async with self.pinecone.IndexAsyncio(
|
492
506
|
host=self.index_host
|
493
507
|
) as index_instance:
|
494
|
-
|
508
|
+
logger.debug(
|
495
509
|
"describe_index_stats: Context acquired. Calling describe_index_stats on index instance..."
|
496
|
-
)
|
510
|
+
) # Changed to debug
|
497
511
|
stats_response = await index_instance.describe_index_stats()
|
498
|
-
|
512
|
+
logger.debug(
|
499
513
|
f"describe_index_stats: Call completed. Response: {stats_response}"
|
500
|
-
)
|
514
|
+
) # Changed to debug
|
501
515
|
|
502
516
|
# Convert response to dict if necessary (handle potential None or different types)
|
503
517
|
if hasattr(stats_response, "to_dict"):
|
@@ -509,16 +523,21 @@ class PineconeAdapter(VectorStorageProvider):
|
|
509
523
|
try:
|
510
524
|
result_dict = dict(stats_response)
|
511
525
|
except (TypeError, ValueError):
|
512
|
-
|
513
|
-
f"
|
526
|
+
logger.warning(
|
527
|
+
f"Could not convert stats_response to dict: {stats_response}"
|
514
528
|
)
|
515
529
|
result_dict = {}
|
516
530
|
|
517
|
-
|
531
|
+
logger.debug(
|
532
|
+
f"describe_index_stats: Returning stats dict: {result_dict}"
|
533
|
+
) # Changed to debug
|
518
534
|
return result_dict
|
519
535
|
except PineconeApiException as e:
|
520
|
-
|
536
|
+
logger.error(
|
537
|
+
f"Pinecone API error describing index stats asynchronously: {e}",
|
538
|
+
exc_info=True,
|
539
|
+
)
|
521
540
|
raise # Re-raise API errors
|
522
541
|
except Exception as e:
|
523
|
-
|
542
|
+
logger.exception(f"Error describing index stats asynchronously: {e}")
|
524
543
|
return {} # Return empty dict for general errors
|