solana-agent 1.4.1__tar.gz → 1.4.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {solana_agent-1.4.1 → solana_agent-1.4.3}/PKG-INFO +1 -1
- {solana_agent-1.4.1 → solana_agent-1.4.3}/pyproject.toml +1 -1
- {solana_agent-1.4.1 → solana_agent-1.4.3}/solana_agent/ai.py +9 -5
- {solana_agent-1.4.1 → solana_agent-1.4.3}/LICENSE +0 -0
- {solana_agent-1.4.1 → solana_agent-1.4.3}/README.md +0 -0
- {solana_agent-1.4.1 → solana_agent-1.4.3}/solana_agent/__init__.py +0 -0
|
@@ -380,7 +380,7 @@ class AI:
|
|
|
380
380
|
filename (str): The name of the CSV file
|
|
381
381
|
prompt (str, optional): Custom prompt for summarization. Defaults to "Summarize the markdown table into a report, include important metrics and totals."
|
|
382
382
|
namespace (str, optional): Knowledge base namespace. Defaults to "global".
|
|
383
|
-
model (Literal["gemini-2.0-flash", "gemini-1.5-pro"], optional):
|
|
383
|
+
model (Literal["gemini-2.0-flash", "gemini-1.5-pro"], optional):
|
|
384
384
|
Gemini model for summarization. Defaults to "gemini-1.5-pro"
|
|
385
385
|
|
|
386
386
|
Example:
|
|
@@ -766,6 +766,7 @@ class AI:
|
|
|
766
766
|
self,
|
|
767
767
|
user_id: str,
|
|
768
768
|
query: str,
|
|
769
|
+
prompt: str = "You combine the data with your reasoning to answer the query.",
|
|
769
770
|
use_perplexity: bool = True,
|
|
770
771
|
use_grok: bool = True,
|
|
771
772
|
use_facts: bool = True,
|
|
@@ -774,7 +775,7 @@ class AI:
|
|
|
774
775
|
"sonar", "sonar-pro", "sonar-reasoning-pro", "sonar-reasoning"
|
|
775
776
|
] = "sonar",
|
|
776
777
|
openai_model: Literal["o1", "o3-mini"] = "o3-mini",
|
|
777
|
-
grok_model: Literal["grok-
|
|
778
|
+
grok_model: Literal["grok-2-latest"] = "grok-2-latest",
|
|
778
779
|
namespace: str = "global",
|
|
779
780
|
) -> str:
|
|
780
781
|
"""Combine multiple data sources with AI reasoning to answer queries.
|
|
@@ -782,6 +783,7 @@ class AI:
|
|
|
782
783
|
Args:
|
|
783
784
|
user_id (str): Unique identifier for the user
|
|
784
785
|
query (str): The question or query to reason about
|
|
786
|
+
prompt (str, optional): Prompt for reasoning. Defaults to "You combine the data with your reasoning to answer the query."
|
|
785
787
|
use_perplexity (bool, optional): Include Perplexity search results. Defaults to True
|
|
786
788
|
use_grok (bool, optional): Include X/Twitter search results. Defaults to True
|
|
787
789
|
use_facts (bool, optional): Include stored conversation facts. Defaults to True
|
|
@@ -844,7 +846,7 @@ class AI:
|
|
|
844
846
|
messages=[
|
|
845
847
|
{
|
|
846
848
|
"role": "system",
|
|
847
|
-
"content":
|
|
849
|
+
"content": prompt,
|
|
848
850
|
},
|
|
849
851
|
{
|
|
850
852
|
"role": "user",
|
|
@@ -857,13 +859,15 @@ class AI:
|
|
|
857
859
|
return f"Failed to reason. Error: {e}"
|
|
858
860
|
|
|
859
861
|
# x search tool - has to be sync
|
|
860
|
-
def search_x(
|
|
862
|
+
def search_x(
|
|
863
|
+
self, query: str, model: Literal["grok-2-latest"] = "grok-2-latest"
|
|
864
|
+
) -> str:
|
|
861
865
|
try:
|
|
862
866
|
"""Search X (formerly Twitter) using Grok API integration.
|
|
863
867
|
|
|
864
868
|
Args:
|
|
865
869
|
query (str): Search query to find relevant X posts
|
|
866
|
-
model (Literal["grok-
|
|
870
|
+
model (Literal["grok-2-latest"], optional): Grok model to use. Defaults to "grok-2-latest"
|
|
867
871
|
|
|
868
872
|
Returns:
|
|
869
873
|
str: Search results from X or error message if search fails
|
|
File without changes
|
|
File without changes
|
|
File without changes
|