solana-agent 20.1.2__py3-none-any.whl → 31.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. solana_agent/__init__.py +10 -5
  2. solana_agent/adapters/ffmpeg_transcoder.py +375 -0
  3. solana_agent/adapters/mongodb_adapter.py +15 -2
  4. solana_agent/adapters/openai_adapter.py +679 -0
  5. solana_agent/adapters/openai_realtime_ws.py +1813 -0
  6. solana_agent/adapters/pinecone_adapter.py +543 -0
  7. solana_agent/cli.py +128 -0
  8. solana_agent/client/solana_agent.py +180 -20
  9. solana_agent/domains/agent.py +13 -13
  10. solana_agent/domains/routing.py +18 -8
  11. solana_agent/factories/agent_factory.py +239 -38
  12. solana_agent/guardrails/pii.py +107 -0
  13. solana_agent/interfaces/client/client.py +95 -12
  14. solana_agent/interfaces/guardrails/guardrails.py +26 -0
  15. solana_agent/interfaces/plugins/plugins.py +2 -1
  16. solana_agent/interfaces/providers/__init__.py +0 -0
  17. solana_agent/interfaces/providers/audio.py +40 -0
  18. solana_agent/interfaces/providers/data_storage.py +9 -2
  19. solana_agent/interfaces/providers/llm.py +86 -9
  20. solana_agent/interfaces/providers/memory.py +13 -1
  21. solana_agent/interfaces/providers/realtime.py +212 -0
  22. solana_agent/interfaces/providers/vector_storage.py +53 -0
  23. solana_agent/interfaces/services/agent.py +27 -12
  24. solana_agent/interfaces/services/knowledge_base.py +59 -0
  25. solana_agent/interfaces/services/query.py +41 -8
  26. solana_agent/interfaces/services/routing.py +0 -1
  27. solana_agent/plugins/manager.py +37 -16
  28. solana_agent/plugins/registry.py +34 -19
  29. solana_agent/plugins/tools/__init__.py +0 -5
  30. solana_agent/plugins/tools/auto_tool.py +1 -0
  31. solana_agent/repositories/memory.py +332 -111
  32. solana_agent/services/__init__.py +1 -1
  33. solana_agent/services/agent.py +390 -241
  34. solana_agent/services/knowledge_base.py +768 -0
  35. solana_agent/services/query.py +1858 -153
  36. solana_agent/services/realtime.py +626 -0
  37. solana_agent/services/routing.py +104 -51
  38. solana_agent-31.4.0.dist-info/METADATA +1070 -0
  39. solana_agent-31.4.0.dist-info/RECORD +49 -0
  40. {solana_agent-20.1.2.dist-info → solana_agent-31.4.0.dist-info}/WHEEL +1 -1
  41. solana_agent-31.4.0.dist-info/entry_points.txt +3 -0
  42. solana_agent/adapters/llm_adapter.py +0 -160
  43. solana_agent-20.1.2.dist-info/METADATA +0 -464
  44. solana_agent-20.1.2.dist-info/RECORD +0 -35
  45. {solana_agent-20.1.2.dist-info → solana_agent-31.4.0.dist-info/licenses}/LICENSE +0 -0
@@ -0,0 +1,543 @@
1
+ import logging # Import logging
2
+ from typing import List, Dict, Any, Optional, Literal
3
+ from pinecone import PineconeAsyncio, ServerlessSpec
4
+ from pinecone.exceptions import PineconeApiException
5
+ import asyncio
6
+
7
+ from solana_agent.interfaces.providers.vector_storage import VectorStorageProvider
8
+ # LLMProvider is no longer needed here
9
+
10
+ # Setup logger for this module
11
+ logger = logging.getLogger(__name__)
12
+
13
+ # Type definitions remain useful
14
+ PineconeRerankModel = Literal[
15
+ "cohere-rerank-3.5",
16
+ "bge-reranker-v2-m3",
17
+ "pinecone-rerank-v0",
18
+ ]
19
+ # Kept for potential future use, but not used internally now
20
+ InputType = Literal["query", "passage"]
21
+ TruncateType = Literal["END", "NONE"] # Kept for potential future use
22
+
23
+
24
+ class PineconeAdapter(VectorStorageProvider):
25
+ """
26
+ Adapter for interacting with Pinecone vector database using PineconeAsyncio.
27
+ Assumes embeddings are generated externally (e.g., via OpenAI).
28
+ Supports Pinecone native reranking.
29
+ Follows context management patterns for Pinecone client v3+.
30
+ """
31
+
32
+ def __init__(
33
+ self,
34
+ api_key: Optional[str] = None,
35
+ index_name: Optional[str] = None,
36
+ # Default for OpenAI text-embedding-3-large, MUST match external embedder
37
+ embedding_dimensions: int = 3072,
38
+ cloud_provider: str = "aws",
39
+ region: str = "us-east-1",
40
+ metric: str = "cosine",
41
+ create_index_if_not_exists: bool = True,
42
+ # Reranking Config
43
+ use_reranking: bool = False,
44
+ rerank_model: Optional[PineconeRerankModel] = None,
45
+ rerank_top_k: int = 3, # Final number of results after reranking
46
+ # Multiplier for initial fetch before rerank
47
+ initial_query_top_k_multiplier: int = 5,
48
+ # Metadata field containing text for reranking
49
+ rerank_text_field: str = "text",
50
+ ):
51
+ """
52
+ Initialize the Pinecone Adapter.
53
+
54
+ Args:
55
+ api_key: Pinecone API key.
56
+ index_name: Name of the Pinecone index.
57
+ embedding_dimensions: Dimension of the embeddings generated externally. MUST match the index dimension.
58
+ cloud_provider: Cloud provider for the index (e.g., 'aws', 'gcp').
59
+ region: Region for the index.
60
+ metric: Distance metric for the index (e.g., 'cosine', 'dotproduct', 'euclidean').
61
+ create_index_if_not_exists: Attempt to create the index if it doesn't exist.
62
+ use_reranking: Enable Pinecone native reranking.
63
+ rerank_model: The reranking model to use (required if use_reranking is True).
64
+ rerank_top_k: Final number of results to return after reranking.
65
+ initial_query_top_k_multiplier: Fetch top_k * multiplier results initially for reranking.
66
+ rerank_text_field: The key in vector metadata containing the text content for reranking.
67
+ """
68
+ self.api_key = api_key
69
+ self.index_name = index_name
70
+ # Crucial: Must match external embedder and index
71
+ self.embedding_dimensions = embedding_dimensions
72
+ self.cloud_provider = cloud_provider
73
+ self.region = region
74
+ self.metric = metric
75
+ self.create_index_if_not_exists = create_index_if_not_exists
76
+
77
+ # Reranking Config
78
+ self.use_reranking = use_reranking
79
+ self.rerank_model = rerank_model
80
+ self.rerank_top_k = rerank_top_k
81
+ # Calculate how many results to fetch initially if reranking
82
+ self.initial_query_top_k_multiplier = initial_query_top_k_multiplier
83
+
84
+ self.rerank_text_field = rerank_text_field
85
+
86
+ self.pinecone: Optional[PineconeAsyncio] = None
87
+ # Store index host for connections
88
+ self.index_host: Optional[str] = None
89
+
90
+ # --- Validation ---
91
+ if not self.api_key:
92
+ raise ValueError("Pinecone API key is required.")
93
+ if not self.index_name:
94
+ raise ValueError("Pinecone index name is required.")
95
+ if self.embedding_dimensions <= 0:
96
+ raise ValueError("embedding_dimensions must be a positive integer.")
97
+ if self.use_reranking and not self.rerank_model:
98
+ raise ValueError(
99
+ "rerank_model must be specified when use_reranking is True."
100
+ )
101
+
102
+ logger.info(
103
+ f"PineconeAdapter configured for index '{self.index_name}' using external embeddings with dimension {self.embedding_dimensions}."
104
+ )
105
+ if self.use_reranking:
106
+ logger.info(f"Reranking enabled using model '{self.rerank_model}'.")
107
+
108
+ self._init_lock = asyncio.Lock()
109
+ self._initialized = False
110
+
111
+ async def _initialize_async(self):
112
+ """Asynchronously initialize the Pinecone client and get index host."""
113
+ async with self._init_lock:
114
+ if self._initialized:
115
+ return
116
+
117
+ try:
118
+ logger.info("Initializing PineconeAsyncio client...")
119
+ self.pinecone = PineconeAsyncio(api_key=self.api_key)
120
+
121
+ if self.create_index_if_not_exists:
122
+ await self._create_index_if_not_exists_async()
123
+
124
+ logger.info(
125
+ f"Describing Pinecone index '{self.index_name}' to get host..."
126
+ )
127
+ index_description = await self.pinecone.describe_index(self.index_name)
128
+ self.index_host = index_description.host
129
+ if not self.index_host:
130
+ raise RuntimeError(
131
+ f"Could not obtain host for index '{self.index_name}'."
132
+ )
133
+ logger.info(f"Obtained index host: {self.index_host}")
134
+
135
+ # Validate index dimension matches configured dimension
136
+ index_dimension = index_description.dimension
137
+ if (
138
+ index_dimension != 0
139
+ and index_dimension != self.embedding_dimensions
140
+ ):
141
+ # This is a critical mismatch
142
+ raise ValueError(
143
+ f"CRITICAL MISMATCH: Pinecone index dimension ({index_dimension}) "
144
+ f"does not match configured embedding dimension ({self.embedding_dimensions}). "
145
+ f"Ensure the index was created with the correct dimension or update the adapter configuration."
146
+ )
147
+ elif index_dimension == 0:
148
+ logger.warning(
149
+ f"Pinecone index dimension reported as 0. Cannot verify match with configured dimension ({self.embedding_dimensions})."
150
+ )
151
+
152
+ logger.info("Attempting to get index stats...")
153
+ stats = await self.describe_index_stats()
154
+ logger.info(f"Successfully retrieved index stats: {stats}")
155
+
156
+ total_vector_count = stats.get("total_vector_count", 0)
157
+ logger.info(
158
+ f"Current index '{self.index_name}' contains {total_vector_count} vectors."
159
+ )
160
+
161
+ self._initialized = True
162
+ logger.info("Pinecone adapter initialization complete.")
163
+
164
+ except PineconeApiException as e:
165
+ logger.error(
166
+ f"Pinecone API error during async initialization: {e}",
167
+ exc_info=True,
168
+ )
169
+ self.pinecone = None
170
+ self.index_host = None
171
+ raise
172
+ except Exception as e:
173
+ logger.exception(
174
+ f"Failed to initialize Pinecone async adapter for index '{self.index_name}': {e}"
175
+ )
176
+ self.pinecone = None
177
+ self.index_host = None
178
+ raise
179
+
180
+ async def _create_index_if_not_exists_async(self) -> None:
181
+ """Create the Pinecone index asynchronously if it doesn't already exist."""
182
+ if not self.pinecone:
183
+ raise RuntimeError("Pinecone client not initialized before creating index.")
184
+ try:
185
+ indexes_response = await self.pinecone.list_indexes()
186
+ existing_indexes = indexes_response.get("indexes", [])
187
+ existing_names = [idx.get("name") for idx in existing_indexes]
188
+
189
+ if self.index_name not in existing_names:
190
+ logger.info(
191
+ f"Creating Pinecone index '{self.index_name}' with dimension {self.embedding_dimensions}..."
192
+ )
193
+
194
+ spec_data = {"cloud": self.cloud_provider, "region": self.region}
195
+
196
+ create_params = {
197
+ "name": self.index_name,
198
+ "dimension": self.embedding_dimensions, # Use configured dimension
199
+ "metric": self.metric,
200
+ # Assuming serverless, adjust if needed
201
+ "spec": ServerlessSpec(**spec_data),
202
+ }
203
+
204
+ await self.pinecone.create_index(**create_params)
205
+ logger.info(
206
+ f"Successfully initiated creation of Pinecone index '{self.index_name}'. Waiting for it to be ready..."
207
+ )
208
+ # Wait time might need adjustment based on index size/type and cloud provider
209
+ await asyncio.sleep(30) # Increased wait time
210
+ else:
211
+ logger.info(f"Using existing Pinecone index '{self.index_name}'")
212
+ except Exception as e:
213
+ logger.exception(
214
+ f"Error checking or creating Pinecone index asynchronously: {e}"
215
+ )
216
+ raise
217
+
218
+ async def _ensure_initialized(self):
219
+ """Ensure the async client is initialized before use."""
220
+ if not self._initialized:
221
+ await self._initialize_async()
222
+ if not self._initialized or not self.pinecone or not self.index_host:
223
+ raise RuntimeError(
224
+ "Pinecone async client failed to initialize or get index host."
225
+ )
226
+
227
+ # _get_embedding method is removed as embeddings are handled externally
228
+
229
+ async def upsert_text(self, *args, **kwargs): # pragma: no cover
230
+ """Deprecated: Embeddings should be generated externally."""
231
+ raise NotImplementedError(
232
+ "upsert_text is deprecated. Use the generic upsert method with pre-computed vectors."
233
+ )
234
+
235
+ async def upsert(
236
+ self,
237
+ # Expects {"id": str, "values": List[float], "metadata": Optional[Dict]}
238
+ vectors: List[Dict[str, Any]],
239
+ namespace: Optional[str] = None,
240
+ ) -> None: # pragma: no cover
241
+ """Upsert pre-embedded vectors into Pinecone asynchronously."""
242
+ await self._ensure_initialized()
243
+ if not vectors:
244
+ logger.info("Upsert skipped: No vectors provided.")
245
+ return
246
+ try:
247
+ async with self.pinecone.IndexAsyncio(
248
+ host=self.index_host
249
+ ) as index_instance:
250
+ upsert_params = {"vectors": vectors}
251
+ if namespace:
252
+ upsert_params["namespace"] = namespace
253
+ await index_instance.upsert(**upsert_params)
254
+ logger.info(
255
+ f"Successfully upserted {len(vectors)} vectors into namespace '{namespace or 'default'}'."
256
+ )
257
+ except PineconeApiException as e:
258
+ logger.error(f"Pinecone API error during async upsert: {e}", exc_info=True)
259
+ raise
260
+ except Exception as e:
261
+ logger.exception(f"Error during async upsert: {e}")
262
+ raise
263
+
264
+ async def query_text(self, *args, **kwargs): # pragma: no cover
265
+ """Deprecated: Use query() for simple vector search or query_and_rerank() for reranking."""
266
+ raise NotImplementedError(
267
+ "query_text is deprecated. Use query() or query_and_rerank() with a pre-computed vector."
268
+ )
269
+
270
+ async def query_and_rerank(
271
+ self,
272
+ vector: List[float],
273
+ query_text_for_rerank: str, # The original query text is needed for the reranker
274
+ top_k: int = 5,
275
+ namespace: Optional[str] = None,
276
+ filter: Optional[Dict[str, Any]] = None,
277
+ include_values: bool = False,
278
+ include_metadata: bool = True,
279
+ ) -> List[Dict[str, Any]]: # pragma: no cover
280
+ """
281
+ Queries Pinecone with a vector and reranks the results using Pinecone's reranker.
282
+ Requires 'use_reranking' to be True and 'rerank_model' to be set during init.
283
+
284
+ Args:
285
+ vector: The query vector.
286
+ query_text_for_rerank: The original text query used for the reranking model.
287
+ top_k: The final number of results desired after reranking.
288
+ namespace: Optional Pinecone namespace.
289
+ filter: Optional metadata filter for the initial query.
290
+ include_values: Whether to include vector values in the results.
291
+ include_metadata: Whether to include metadata in the results.
292
+
293
+ Returns:
294
+ A list of reranked result dictionaries.
295
+ """
296
+ await self._ensure_initialized()
297
+
298
+ if not self.use_reranking:
299
+ logger.warning(
300
+ "query_and_rerank called but use_reranking is False. Performing standard query."
301
+ )
302
+ return await self.query(
303
+ vector, top_k, namespace, filter, include_values, include_metadata
304
+ )
305
+
306
+ if not self.rerank_model:
307
+ raise ValueError(
308
+ "Cannot rerank: rerank_model was not specified during initialization."
309
+ )
310
+
311
+ # Determine how many results to fetch initially for reranking
312
+ initial_k = top_k * self.initial_query_top_k_multiplier
313
+
314
+ try:
315
+ # 1. Initial Vector Search
316
+ initial_results = await self.query(
317
+ vector=vector,
318
+ top_k=initial_k,
319
+ namespace=namespace,
320
+ filter=filter,
321
+ include_values=include_values, # Include values if requested in final output
322
+ include_metadata=True, # Always need metadata for reranking text field
323
+ )
324
+
325
+ if not initial_results:
326
+ return [] # No results from initial query
327
+
328
+ # 2. Prepare for Reranking
329
+ documents_to_rerank = []
330
+ original_results_map = {}
331
+ for match in initial_results:
332
+ # Ensure metadata exists and contains the rerank text field
333
+ doc_metadata = match.get("metadata")
334
+ if isinstance(doc_metadata, dict):
335
+ doc_text = doc_metadata.get(self.rerank_text_field)
336
+ if doc_text and isinstance(doc_text, str):
337
+ documents_to_rerank.append(doc_text)
338
+ # Store original match keyed by the text for easy lookup after reranking
339
+ original_results_map[doc_text] = match
340
+ else:
341
+ logger.warning(
342
+ f"Skipping result ID {match.get('id')} for reranking - missing or invalid text in field '{self.rerank_text_field}'."
343
+ )
344
+ else:
345
+ logger.warning(
346
+ f"Skipping result ID {match.get('id')} for reranking - metadata is missing or not a dictionary."
347
+ )
348
+
349
+ if not documents_to_rerank:
350
+ logger.warning(
351
+ f"Reranking skipped: No documents found with text in the specified field ('{self.rerank_text_field}'). Returning top {top_k} initial results."
352
+ )
353
+ # Return the originally requested top_k
354
+ return initial_results[:top_k]
355
+
356
+ # 3. Perform Reranking Call
357
+ if not self.pinecone:
358
+ raise RuntimeError("Pinecone client not initialized for reranking.")
359
+
360
+ try:
361
+ logger.info(
362
+ f"Reranking {len(documents_to_rerank)} results using {self.rerank_model} for query: '{query_text_for_rerank[:50]}...'"
363
+ )
364
+ rerank_params = {} # Add model-specific params if needed
365
+
366
+ rerank_request = {
367
+ "query": query_text_for_rerank,
368
+ "documents": documents_to_rerank,
369
+ "model": self.rerank_model,
370
+ "top_n": top_k, # Request the final desired number
371
+ "parameters": rerank_params,
372
+ }
373
+
374
+ rerank_response = await self.pinecone.rerank(**rerank_request)
375
+
376
+ # 4. Process Reranked Results
377
+ reranked_results = []
378
+ if rerank_response and rerank_response.results:
379
+ for result in rerank_response.results:
380
+ # Adjust based on actual rerank response structure (assuming v3+)
381
+ doc_text = result.document.text if result.document else ""
382
+ score = result.relevance_score
383
+ original_match = original_results_map.get(doc_text)
384
+ if original_match:
385
+ # Create a new dict to avoid modifying the original map values
386
+ updated_match = dict(original_match)
387
+ # Update score with relevance score
388
+ updated_match["score"] = score
389
+ reranked_results.append(updated_match)
390
+ else:
391
+ logger.warning(
392
+ f"Reranked document text not found in original results map: '{doc_text[:50]}...'"
393
+ )
394
+
395
+ if reranked_results:
396
+ logger.info(
397
+ f"Reranking complete. Returning {len(reranked_results)} results."
398
+ )
399
+ return reranked_results
400
+ else:
401
+ # Should not happen if rerank_response.results existed, but handle defensively
402
+ logger.warning(
403
+ "No matches found after processing reranking response. Falling back to initial vector search results."
404
+ )
405
+ return initial_results[:top_k]
406
+
407
+ except Exception as rerank_error:
408
+ logger.error(
409
+ f"Error during reranking with {self.rerank_model}: {rerank_error}. Returning initial results.",
410
+ exc_info=True,
411
+ )
412
+ # Fallback to top_k initial results
413
+ return initial_results[:top_k]
414
+
415
+ except Exception as e:
416
+ logger.exception(f"Failed to query or rerank: {e}")
417
+ return []
418
+
419
+ async def query(
420
+ self,
421
+ vector: List[float],
422
+ top_k: int = 5,
423
+ namespace: Optional[str] = None,
424
+ filter: Optional[Dict[str, Any]] = None,
425
+ include_values: bool = False,
426
+ include_metadata: bool = True,
427
+ ) -> List[Dict[str, Any]]: # pragma: no cover
428
+ """
429
+ Query Pinecone for similar vectors asynchronously (no reranking).
430
+
431
+ Args:
432
+ vector: The query vector.
433
+ top_k: The number of results to return.
434
+ namespace: Optional Pinecone namespace.
435
+ filter: Optional metadata filter.
436
+ include_values: Whether to include vector values in the results.
437
+ include_metadata: Whether to include metadata in the results.
438
+
439
+ Returns:
440
+ A list of result dictionaries.
441
+ """
442
+ await self._ensure_initialized()
443
+ try:
444
+ async with self.pinecone.IndexAsyncio(
445
+ host=self.index_host
446
+ ) as index_instance:
447
+ query_params = {
448
+ "vector": vector,
449
+ "top_k": top_k,
450
+ "include_values": include_values,
451
+ "include_metadata": include_metadata,
452
+ }
453
+ if namespace:
454
+ query_params["namespace"] = namespace
455
+ if filter:
456
+ query_params["filter"] = filter
457
+ query_response = await index_instance.query(**query_params)
458
+
459
+ # Ensure response structure is handled safely
460
+ matches = query_response.get("matches", []) if query_response else []
461
+ return matches
462
+
463
+ except PineconeApiException as e:
464
+ logger.error(f"Pinecone API error during async query: {e}", exc_info=True)
465
+ raise # Re-raise API errors
466
+ except Exception as e:
467
+ logger.exception(f"Error during async query: {e}")
468
+ return [] # Return empty list for general errors
469
+
470
+ async def delete(
471
+ self, ids: List[str], namespace: Optional[str] = None
472
+ ) -> None: # pragma: no cover
473
+ """Delete vectors by IDs from Pinecone asynchronously."""
474
+ await self._ensure_initialized()
475
+ if not ids:
476
+ logger.info("Delete skipped: No IDs provided.")
477
+ return
478
+ try:
479
+ async with self.pinecone.IndexAsyncio(
480
+ host=self.index_host
481
+ ) as index_instance:
482
+ delete_params = {"ids": ids}
483
+ if namespace:
484
+ delete_params["namespace"] = namespace
485
+ await index_instance.delete(**delete_params)
486
+ logger.info(
487
+ f"Attempted to delete {len(ids)} vectors from namespace '{namespace or 'default'}'."
488
+ )
489
+ except PineconeApiException as e:
490
+ logger.error(f"Pinecone API error during async delete: {e}", exc_info=True)
491
+ raise
492
+ except Exception as e:
493
+ logger.exception(f"Error during async delete: {e}")
494
+ raise
495
+
496
+ async def describe_index_stats(self) -> Dict[str, Any]: # pragma: no cover
497
+ """Get statistics about the index asynchronously."""
498
+ logger.debug(
499
+ f"describe_index_stats: Entering for host {self.index_host}"
500
+ ) # Changed to debug
501
+ try:
502
+ logger.debug(
503
+ f"describe_index_stats: Getting IndexAsyncio context for host {self.index_host}..."
504
+ ) # Changed to debug
505
+ async with self.pinecone.IndexAsyncio(
506
+ host=self.index_host
507
+ ) as index_instance:
508
+ logger.debug(
509
+ "describe_index_stats: Context acquired. Calling describe_index_stats on index instance..."
510
+ ) # Changed to debug
511
+ stats_response = await index_instance.describe_index_stats()
512
+ logger.debug(
513
+ f"describe_index_stats: Call completed. Response: {stats_response}"
514
+ ) # Changed to debug
515
+
516
+ # Convert response to dict if necessary (handle potential None or different types)
517
+ if hasattr(stats_response, "to_dict"):
518
+ result_dict = stats_response.to_dict()
519
+ elif isinstance(stats_response, dict):
520
+ result_dict = stats_response
521
+ else:
522
+ # Attempt basic conversion or return empty
523
+ try:
524
+ result_dict = dict(stats_response)
525
+ except (TypeError, ValueError):
526
+ logger.warning(
527
+ f"Could not convert stats_response to dict: {stats_response}"
528
+ )
529
+ result_dict = {}
530
+
531
+ logger.debug(
532
+ f"describe_index_stats: Returning stats dict: {result_dict}"
533
+ ) # Changed to debug
534
+ return result_dict
535
+ except PineconeApiException as e:
536
+ logger.error(
537
+ f"Pinecone API error describing index stats asynchronously: {e}",
538
+ exc_info=True,
539
+ )
540
+ raise # Re-raise API errors
541
+ except Exception as e:
542
+ logger.exception(f"Error describing index stats asynchronously: {e}")
543
+ return {} # Return empty dict for general errors
solana_agent/cli.py ADDED
@@ -0,0 +1,128 @@
1
+ from typing import Optional
2
+ import typer
3
+ import asyncio
4
+ import logging
5
+ from typing_extensions import Annotated
6
+ from rich.console import Console
7
+ from rich.live import Live
8
+ from rich.spinner import Spinner
9
+ from rich.prompt import Prompt
10
+
11
+ from solana_agent.client.solana_agent import SolanaAgent
12
+
13
+ # --- Basic Logging Configuration ---
14
+ logging.basicConfig(level=logging.WARNING, format="%(levelname)s:%(name)s:%(message)s")
15
+ # --- End Logging Configuration ---
16
+
17
+ app = typer.Typer()
18
+ console = Console()
19
+
20
+
21
+ async def stream_agent_response(
22
+ agent: SolanaAgent,
23
+ user_id: str,
24
+ message: str,
25
+ prompt: Optional[str] = None,
26
+ ):
27
+ """Helper function to stream and display agent response."""
28
+ full_response = ""
29
+ with Live(console=console, refresh_per_second=10, transient=True) as live:
30
+ live.update(Spinner("dots", "Thinking..."))
31
+ try:
32
+ first_chunk = True
33
+ async for chunk in agent.process(
34
+ user_id=user_id,
35
+ message=message,
36
+ output_format="text",
37
+ prompt=prompt, # Pass prompt override if provided
38
+ ):
39
+ if first_chunk:
40
+ live.update("", refresh=True) # Clear spinner
41
+ first_chunk = False
42
+ full_response += chunk
43
+ live.update(full_response)
44
+
45
+ if first_chunk: # No response received
46
+ live.update("[yellow]Agent did not produce a response.[/yellow]")
47
+
48
+ except Exception as e:
49
+ # Display error within the Live context
50
+ live.update(f"[bold red]\nError during processing:[/bold red] {e}")
51
+ # Keep the error message visible after Live exits by printing it again
52
+ console.print(f"[bold red]Error during processing:[/bold red] {e}")
53
+ full_response = "" # Ensure error message isn't printed as final response
54
+
55
+ # Print the final complete response cleanly after Live context exits
56
+ if full_response:
57
+ console.print(f"[bright_blue]Agent:[/bright_blue] {full_response}")
58
+
59
+
60
+ @app.command()
61
+ def chat(
62
+ user_id: Annotated[
63
+ str, typer.Option(help="The user ID for the conversation.")
64
+ ] = "cli_user",
65
+ config: Annotated[
66
+ str, typer.Option(help="Path to the configuration JSON file.")
67
+ ] = "config.json",
68
+ prompt: Annotated[ # Allow prompt override via option
69
+ str, typer.Option(help="Optional system prompt override for the session.")
70
+ ] = None,
71
+ ):
72
+ """
73
+ Start an interactive chat session with the Solana Agent.
74
+ Type 'exit' or 'quit' to end the session.
75
+ """
76
+ try:
77
+ with console.status("[bold green]Initializing agent...", spinner="dots"):
78
+ agent = SolanaAgent(config_path=config)
79
+ console.print("[green]Agent initialized. Start chatting![/green]")
80
+ console.print("[dim]Type 'exit' or 'quit' to end.[/dim]")
81
+
82
+ except FileNotFoundError:
83
+ console.print(
84
+ f"[bold red]Error:[/bold red] Configuration file not found at '{config}'"
85
+ )
86
+ raise typer.Exit(code=1)
87
+ except ValueError as e:
88
+ console.print(f"[bold red]Error loading configuration:[/bold red] {e}")
89
+ raise typer.Exit(code=1)
90
+ except Exception as e:
91
+ console.print(
92
+ f"[bold red]An unexpected error occurred during initialization:[/bold red] {e}"
93
+ )
94
+ raise typer.Exit(code=1)
95
+
96
+ # --- Main Interaction Loop ---
97
+ while True:
98
+ try:
99
+ # Use Rich's Prompt for better input handling
100
+ user_message = Prompt.ask("[bold green]You[/bold green]")
101
+
102
+ if user_message.lower() in ["exit", "quit"]:
103
+ console.print("[yellow]Exiting chat session.[/yellow]")
104
+ break
105
+
106
+ if not user_message.strip(): # Handle empty input
107
+ continue
108
+
109
+ # Run the async streaming function for the user's message
110
+ # Pass the optional prompt override from the command line option
111
+ asyncio.run(stream_agent_response(agent, user_id, user_message, prompt))
112
+
113
+ except KeyboardInterrupt: # Allow Ctrl+C to exit gracefully
114
+ console.print(
115
+ "\n[yellow]Exiting chat session (KeyboardInterrupt).[/yellow]"
116
+ )
117
+ break
118
+ except Exception as loop_error:
119
+ # Catch errors during the input/processing loop without crashing
120
+ console.print(
121
+ f"[bold red]An error occurred in the chat loop:[/bold red] {loop_error}"
122
+ )
123
+ # Optionally add a small delay or specific error handling here
124
+ # Consider if you want to break the loop on certain errors
125
+
126
+
127
+ if __name__ == "__main__":
128
+ app()