solana-agent 1.4.2__tar.gz → 1.4.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: solana-agent
3
- Version: 1.4.2
3
+ Version: 1.4.3
4
4
  Summary: Build self-learning AI Agents
5
5
  License: MIT
6
6
  Keywords: ai,openai,ai agents
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "solana-agent"
3
- version = "1.4.2"
3
+ version = "1.4.3"
4
4
  description = "Build self-learning AI Agents"
5
5
  authors = ["Bevan Hunt <bevan@bevanhunt.com>"]
6
6
  license = "MIT"
@@ -380,7 +380,7 @@ class AI:
380
380
  filename (str): The name of the CSV file
381
381
  prompt (str, optional): Custom prompt for summarization. Defaults to "Summarize the markdown table into a report, include important metrics and totals."
382
382
  namespace (str, optional): Knowledge base namespace. Defaults to "global".
383
- model (Literal["gemini-2.0-flash", "gemini-1.5-pro"], optional):
383
+ model (Literal["gemini-2.0-flash", "gemini-1.5-pro"], optional):
384
384
  Gemini model for summarization. Defaults to "gemini-1.5-pro"
385
385
 
386
386
  Example:
@@ -766,6 +766,7 @@ class AI:
766
766
  self,
767
767
  user_id: str,
768
768
  query: str,
769
+ prompt: str = "You combine the data with your reasoning to answer the query.",
769
770
  use_perplexity: bool = True,
770
771
  use_grok: bool = True,
771
772
  use_facts: bool = True,
@@ -782,6 +783,7 @@ class AI:
782
783
  Args:
783
784
  user_id (str): Unique identifier for the user
784
785
  query (str): The question or query to reason about
786
+ prompt (str, optional): Prompt for reasoning. Defaults to "You combine the data with your reasoning to answer the query."
785
787
  use_perplexity (bool, optional): Include Perplexity search results. Defaults to True
786
788
  use_grok (bool, optional): Include X/Twitter search results. Defaults to True
787
789
  use_facts (bool, optional): Include stored conversation facts. Defaults to True
@@ -844,7 +846,7 @@ class AI:
844
846
  messages=[
845
847
  {
846
848
  "role": "system",
847
- "content": "You combine the data with your reasoning to answer the query.",
849
+ "content": prompt,
848
850
  },
849
851
  {
850
852
  "role": "user",
@@ -857,7 +859,9 @@ class AI:
857
859
  return f"Failed to reason. Error: {e}"
858
860
 
859
861
  # x search tool - has to be sync
860
- def search_x(self, query: str, model: Literal["grok-2-latest"] = "grok-2-latest") -> str:
862
+ def search_x(
863
+ self, query: str, model: Literal["grok-2-latest"] = "grok-2-latest"
864
+ ) -> str:
861
865
  try:
862
866
  """Search X (formerly Twitter) using Grok API integration.
863
867
 
File without changes
File without changes