solana-agent 3.0.2__tar.gz → 4.0.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: solana-agent
3
- Version: 3.0.2
3
+ Version: 4.0.0
4
4
  Summary: Build self-learning AI Agents
5
5
  License: MIT
6
6
  Keywords: ai,openai,ai agents
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "solana-agent"
3
- version = "3.0.2"
3
+ version = "4.0.0"
4
4
  description = "Build self-learning AI Agents"
5
5
  authors = ["Bevan Hunt <bevan@bevanhunt.com>"]
6
6
  license = "MIT"
@@ -70,9 +70,9 @@ class AI:
70
70
  pinecone_embed_model: Literal["llama-text-embed-v2"] = "llama-text-embed-v2",
71
71
  gemini_api_key: str = None,
72
72
  openai_base_url: str = None,
73
- main_model: str = "gpt-4o-mini",
74
- tool_formatting_model: str = "gpt-4o-mini",
75
- tool_formatting_instructions: str = None,
73
+ tool_calling_model: str = "gpt-4o-mini",
74
+ reasoning_model: str = "gpt-4o-mini",
75
+ reasoning_instructions: str = None,
76
76
  ):
77
77
  """Initialize a new AI assistant instance.
78
78
 
@@ -89,9 +89,9 @@ class AI:
89
89
  pinecone_embed_model (Literal["llama-text-embed-v2"], optional): Pinecone embedding model. Defaults to "llama-text-embed-v2"
90
90
  gemini_api_key (str, optional): API key for Gemini search. Defaults to None
91
91
  openai_base_url (str, optional): Base URL for OpenAI API. Defaults to None
92
- main_model (str, optional): Main OpenAI model for conversation. Defaults to "gpt-4o-mini"
93
- tool_formatting_model (str, optional): OpenAI model for tool formatting. Defaults to "gpt-4o-mini"
94
- tool_formatting_instructions (str, optional): Instructions for tool formatting
92
+ tool_calling_model (str, optional): Model for tool calling. Defaults to "gpt-4o-mini"
93
+ reasoning_model (str, optional): Model for reasoning. Defaults to "gpt-4o-mini"
94
+ reasoning_instructions (str, optional): Instructions for reasoning. Defaults to None
95
95
  Example:
96
96
  ```python
97
97
  ai = AI(
@@ -124,9 +124,10 @@ class AI:
124
124
  You always take the Tool Result over the Memory Context in terms of priority.
125
125
  """
126
126
  self._instructions = instructions
127
- self._tool_formatting_instructions = (
128
- tool_formatting_instructions + " " +
129
- self._memory_instructions if tool_formatting_instructions else self._memory_instructions
127
+ self._reasoning_instructions = (
128
+ reasoning_instructions + " " + self._memory_instructions
129
+ if reasoning_instructions
130
+ else self._memory_instructions
130
131
  )
131
132
  self._database: MongoDatabase = database
132
133
  self._accumulated_value_queue = asyncio.Queue()
@@ -152,8 +153,8 @@ class AI:
152
153
  self._pinecone_index_name) if self._pinecone else None
153
154
  )
154
155
  self._openai_base_url = openai_base_url
155
- self._main_model = main_model
156
- self._tool_formatting_model = tool_formatting_model
156
+ self._tool_calling_model = tool_calling_model
157
+ self._reasoning_model = reasoning_model
157
158
  self._tools = []
158
159
 
159
160
  async def __aenter__(self):
@@ -585,101 +586,6 @@ class AI:
585
586
  except Exception as e:
586
587
  return f"Failed to search Perplexity. Error: {e}"
587
588
 
588
- # reason tool - has to be sync
589
- def reason(
590
- self,
591
- user_id: str,
592
- query: str,
593
- prompt: str = "You combine the data with your reasoning to answer the query.",
594
- use_perplexity: bool = True,
595
- use_grok: bool = True,
596
- use_kb: bool = True,
597
- perplexity_model: Literal[
598
- "sonar", "sonar-pro", "sonar-reasoning-pro", "sonar-reasoning"
599
- ] = "sonar",
600
- openai_model: Literal["o1", "o3-mini"] = "o3-mini",
601
- grok_model: Literal["grok-2-latest"] = "grok-2-latest",
602
- namespace: str = "global",
603
- ) -> str:
604
- """Combine multiple data sources with AI reasoning to answer queries.
605
-
606
- Args:
607
- user_id (str): Unique identifier for the user
608
- query (str): The question or query to reason about
609
- prompt (str, optional): Prompt for reasoning. Defaults to "You combine the data with your reasoning to answer the query."
610
- use_perplexity (bool, optional): Include Perplexity search results. Defaults to True
611
- use_grok (bool, optional): Include X/Twitter search results. Defaults to True
612
- use_kb (bool, optional): Include Pinecone knowledge base search results. Defaults to True
613
- perplexity_model (Literal, optional): Perplexity model to use. Defaults to "sonar"
614
- openai_model (Literal, optional): OpenAI model for reasoning. Defaults to "o3-mini"
615
- grok_model (Literal, optional): Grok model for X search. Defaults to "grok-beta"
616
- namespace (str): Namespace of the Pinecone index to search. Defaults to "global"
617
-
618
- Returns:
619
- str: Reasoned response combining all enabled data sources or error message
620
-
621
- Example:
622
- ```python
623
- result = ai.reason(
624
- user_id="user123",
625
- query="What are the latest AI trends?",
626
- )
627
- # Returns: "Based on multiple sources: [comprehensive answer]"
628
- ```
629
-
630
- Note:
631
- This is a synchronous tool method required for OpenAI function calling.
632
- Requires configuration of relevant API keys for enabled data sources.
633
- Will gracefully handle missing or failed data sources.
634
- """
635
- try:
636
- if use_kb:
637
- try:
638
- kb_results = self.search_kb(query, namespace)
639
- except Exception:
640
- kb_results = ""
641
- else:
642
- kb_results = ""
643
-
644
- if use_perplexity:
645
- try:
646
- search_results = self.search_internet(
647
- query, perplexity_model)
648
- except Exception:
649
- search_results = ""
650
- else:
651
- search_results = ""
652
-
653
- if use_grok:
654
- try:
655
- x_search_results = self.search_x(query, grok_model)
656
- except Exception:
657
- x_search_results = ""
658
- else:
659
- x_search_results = ""
660
-
661
- if self._zep:
662
- memory = self._sync_zep.memory.get(session_id=user_id)
663
- else:
664
- memory = ""
665
-
666
- response = self._client.chat.completions.create(
667
- model=openai_model,
668
- messages=[
669
- {
670
- "role": "system",
671
- "content": prompt,
672
- },
673
- {
674
- "role": "user",
675
- "content": f"Query: {query}, Memory: {memory}, KB Results: {kb_results}, Internet Search Results: {search_results}, X Search Results: {x_search_results}",
676
- },
677
- ],
678
- )
679
- return response.choices[0].message.content
680
- except Exception as e:
681
- return f"Failed to reason. Error: {e}"
682
-
683
589
  # x search tool - has to be sync
684
590
  def search_x(
685
591
  self, query: str, model: Literal["grok-2-latest"] = "grok-2-latest"
@@ -801,11 +707,21 @@ class AI:
801
707
  final_tool_calls = {} # Accumulate tool call deltas
802
708
  final_response = ""
803
709
 
710
+ if self._zep:
711
+ messages = [
712
+ Message(
713
+ role="user",
714
+ role_type="user",
715
+ content=user_text,
716
+ ),
717
+ ]
718
+ await self._zep.memory.add(session_id=user_id, messages=messages)
719
+
804
720
  async def stream_processor():
805
721
  memory = self.get_memory_context(user_id)
806
- regular_content = "" # Add this to accumulate regular content
722
+ regular_content = ""
807
723
  response = self._client.chat.completions.create(
808
- model=self._main_model,
724
+ model=self._tool_calling_model,
809
725
  messages=[
810
726
  {
811
727
  "role": "system",
@@ -847,11 +763,11 @@ class AI:
847
763
  # Execute the tool call (synchronously; adjust if async is needed)
848
764
  result = func(**args)
849
765
  response = self._client.chat.completions.create(
850
- model=self._tool_formatting_model,
766
+ model=self._reasoning_model,
851
767
  messages=[
852
768
  {
853
769
  "role": "system",
854
- "content": f"Rules: {self._tool_formatting_instructions}, Tool Result: {result}, Memory Context: {memory}",
770
+ "content": f"Rules: {self._reasoning_instructions}, Tool Result: {result}, Memory Context: {memory}",
855
771
  },
856
772
  ],
857
773
  stream=True,
@@ -871,22 +787,24 @@ class AI:
871
787
 
872
788
  # Process regular response content
873
789
  if delta.content is not None:
874
- regular_content += delta.content # Accumulate instead of directly sending
790
+ regular_content += (
791
+ delta.content
792
+ ) # Accumulate instead of directly sending
875
793
 
876
794
  # After processing all chunks from the first response
877
795
  if regular_content: # Only if we have regular content
878
796
  # Format the regular content with memory context, similar to tool results
879
797
  response = self._client.chat.completions.create(
880
- model=self._tool_formatting_model,
798
+ model=self._reasoning_model,
881
799
  messages=[
882
800
  {
883
801
  "role": "system",
884
- "content": f"Rules: {self._memory_instructions}, Regular Content: {regular_content}, Memory Context: {memory}",
802
+ "content": f"Rules: {self._reasoning_instructions}, Memory Context: {memory}",
885
803
  },
886
804
  {
887
805
  "role": "user",
888
806
  "content": user_text,
889
- }
807
+ },
890
808
  ],
891
809
  stream=True,
892
810
  )
@@ -920,20 +838,6 @@ class AI:
920
838
  "timestamp": datetime.datetime.now(datetime.timezone.utc),
921
839
  }
922
840
  self._database.save_message(user_id, metadata)
923
- if self._zep:
924
- messages = [
925
- Message(
926
- role="user",
927
- role_type="user",
928
- content=user_text,
929
- ),
930
- Message(
931
- role="assistant",
932
- role_type="assistant",
933
- content=final_response,
934
- ),
935
- ]
936
- await self._zep.memory.add(session_id=user_id, messages=messages)
937
841
 
938
842
  async def conversation(
939
843
  self,
@@ -987,11 +891,21 @@ class AI:
987
891
  final_tool_calls = {} # Accumulate tool call deltas
988
892
  final_response = ""
989
893
 
894
+ if self._zep:
895
+ messages = [
896
+ Message(
897
+ role="user",
898
+ role_type="user",
899
+ content=transcript,
900
+ ),
901
+ ]
902
+ await self._zep.memory.add(session_id=user_id, messages=messages)
903
+
990
904
  async def stream_processor():
991
905
  memory = self.get_memory_context(user_id)
992
906
  regular_content = "" # Add this to accumulate regular content
993
907
  response = self._client.chat.completions.create(
994
- model=self._main_model,
908
+ model=self._tool_calling_model,
995
909
  messages=[
996
910
  {
997
911
  "role": "system",
@@ -1033,11 +947,11 @@ class AI:
1033
947
  # Execute the tool call (synchronously; adjust if async is needed)
1034
948
  result = func(**args)
1035
949
  response = self._client.chat.completions.create(
1036
- model=self._tool_formatting_model,
950
+ model=self._reasoning_model,
1037
951
  messages=[
1038
952
  {
1039
953
  "role": "system",
1040
- "content": f"Rules: {self._tool_formatting_instructions}, Tool Result: {result}, Memory Context: {memory}",
954
+ "content": f"Rules: {self._reasoning_instructions}, Tool Result: {result}, Memory Context: {memory}",
1041
955
  },
1042
956
  ],
1043
957
  stream=True,
@@ -1057,22 +971,24 @@ class AI:
1057
971
 
1058
972
  # Process regular response content
1059
973
  if delta.content is not None:
1060
- regular_content += delta.content # Accumulate instead of directly sending
974
+ regular_content += (
975
+ delta.content
976
+ ) # Accumulate instead of directly sending
1061
977
 
1062
978
  # After processing all chunks from the first response
1063
979
  if regular_content: # Only if we have regular content
1064
980
  # Format the regular content with memory context, similar to tool results
1065
981
  response = self._client.chat.completions.create(
1066
- model=self._tool_formatting_model,
982
+ model=self._reasoning_model,
1067
983
  messages=[
1068
984
  {
1069
985
  "role": "system",
1070
- "content": f"Rules: {self._memory_instructions}, Regular Content: {regular_content}, Memory Context: {memory}",
986
+ "content": f"Rules: {self._reasoning_instructions}, Memory Context: {memory}",
1071
987
  },
1072
988
  {
1073
989
  "role": "user",
1074
990
  "content": transcript,
1075
- }
991
+ },
1076
992
  ],
1077
993
  stream=True,
1078
994
  )
@@ -1106,20 +1022,6 @@ class AI:
1106
1022
  "timestamp": datetime.datetime.now(datetime.timezone.utc),
1107
1023
  }
1108
1024
  self._database.save_message(user_id, metadata)
1109
- if self._zep:
1110
- messages = [
1111
- Message(
1112
- role="user",
1113
- role_type="user",
1114
- content=transcript,
1115
- ),
1116
- Message(
1117
- role="assistant",
1118
- role_type="assistant",
1119
- content=final_response,
1120
- ),
1121
- ]
1122
- await self._zep.memory.add(session_id=user_id, messages=messages)
1123
1025
 
1124
1026
  # Generate and stream the audio response
1125
1027
  with self._client.audio.speech.with_streaming_response.create(
File without changes
File without changes