solana-agent 3.0.2__tar.gz → 4.0.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: solana-agent
3
- Version: 3.0.2
3
+ Version: 4.0.1
4
4
  Summary: Build self-learning AI Agents
5
5
  License: MIT
6
6
  Keywords: ai,openai,ai agents
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "solana-agent"
3
- version = "3.0.2"
3
+ version = "4.0.1"
4
4
  description = "Build self-learning AI Agents"
5
5
  authors = ["Bevan Hunt <bevan@bevanhunt.com>"]
6
6
  license = "MIT"
@@ -70,9 +70,8 @@ class AI:
70
70
  pinecone_embed_model: Literal["llama-text-embed-v2"] = "llama-text-embed-v2",
71
71
  gemini_api_key: str = None,
72
72
  openai_base_url: str = None,
73
- main_model: str = "gpt-4o-mini",
74
- tool_formatting_model: str = "gpt-4o-mini",
75
- tool_formatting_instructions: str = None,
73
+ tool_calling_model: str = "gpt-4o-mini",
74
+ reasoning_model: str = "gpt-4o-mini",
76
75
  ):
77
76
  """Initialize a new AI assistant instance.
78
77
 
@@ -89,9 +88,8 @@ class AI:
89
88
  pinecone_embed_model (Literal["llama-text-embed-v2"], optional): Pinecone embedding model. Defaults to "llama-text-embed-v2"
90
89
  gemini_api_key (str, optional): API key for Gemini search. Defaults to None
91
90
  openai_base_url (str, optional): Base URL for OpenAI API. Defaults to None
92
- main_model (str, optional): Main OpenAI model for conversation. Defaults to "gpt-4o-mini"
93
- tool_formatting_model (str, optional): OpenAI model for tool formatting. Defaults to "gpt-4o-mini"
94
- tool_formatting_instructions (str, optional): Instructions for tool formatting
91
+ tool_calling_model (str, optional): Model for tool calling. Defaults to "gpt-4o-mini"
92
+ reasoning_model (str, optional): Model for reasoning. Defaults to "gpt-4o-mini"
95
93
  Example:
96
94
  ```python
97
95
  ai = AI(
@@ -124,10 +122,7 @@ class AI:
124
122
  You always take the Tool Result over the Memory Context in terms of priority.
125
123
  """
126
124
  self._instructions = instructions
127
- self._tool_formatting_instructions = (
128
- tool_formatting_instructions + " " +
129
- self._memory_instructions if tool_formatting_instructions else self._memory_instructions
130
- )
125
+ self._reasoning_instructions = self._memory_instructions + " " + instructions
131
126
  self._database: MongoDatabase = database
132
127
  self._accumulated_value_queue = asyncio.Queue()
133
128
  if zep_api_key and not zep_base_url:
@@ -152,8 +147,8 @@ class AI:
152
147
  self._pinecone_index_name) if self._pinecone else None
153
148
  )
154
149
  self._openai_base_url = openai_base_url
155
- self._main_model = main_model
156
- self._tool_formatting_model = tool_formatting_model
150
+ self._tool_calling_model = tool_calling_model
151
+ self._reasoning_model = reasoning_model
157
152
  self._tools = []
158
153
 
159
154
  async def __aenter__(self):
@@ -585,101 +580,6 @@ class AI:
585
580
  except Exception as e:
586
581
  return f"Failed to search Perplexity. Error: {e}"
587
582
 
588
- # reason tool - has to be sync
589
- def reason(
590
- self,
591
- user_id: str,
592
- query: str,
593
- prompt: str = "You combine the data with your reasoning to answer the query.",
594
- use_perplexity: bool = True,
595
- use_grok: bool = True,
596
- use_kb: bool = True,
597
- perplexity_model: Literal[
598
- "sonar", "sonar-pro", "sonar-reasoning-pro", "sonar-reasoning"
599
- ] = "sonar",
600
- openai_model: Literal["o1", "o3-mini"] = "o3-mini",
601
- grok_model: Literal["grok-2-latest"] = "grok-2-latest",
602
- namespace: str = "global",
603
- ) -> str:
604
- """Combine multiple data sources with AI reasoning to answer queries.
605
-
606
- Args:
607
- user_id (str): Unique identifier for the user
608
- query (str): The question or query to reason about
609
- prompt (str, optional): Prompt for reasoning. Defaults to "You combine the data with your reasoning to answer the query."
610
- use_perplexity (bool, optional): Include Perplexity search results. Defaults to True
611
- use_grok (bool, optional): Include X/Twitter search results. Defaults to True
612
- use_kb (bool, optional): Include Pinecone knowledge base search results. Defaults to True
613
- perplexity_model (Literal, optional): Perplexity model to use. Defaults to "sonar"
614
- openai_model (Literal, optional): OpenAI model for reasoning. Defaults to "o3-mini"
615
- grok_model (Literal, optional): Grok model for X search. Defaults to "grok-beta"
616
- namespace (str): Namespace of the Pinecone index to search. Defaults to "global"
617
-
618
- Returns:
619
- str: Reasoned response combining all enabled data sources or error message
620
-
621
- Example:
622
- ```python
623
- result = ai.reason(
624
- user_id="user123",
625
- query="What are the latest AI trends?",
626
- )
627
- # Returns: "Based on multiple sources: [comprehensive answer]"
628
- ```
629
-
630
- Note:
631
- This is a synchronous tool method required for OpenAI function calling.
632
- Requires configuration of relevant API keys for enabled data sources.
633
- Will gracefully handle missing or failed data sources.
634
- """
635
- try:
636
- if use_kb:
637
- try:
638
- kb_results = self.search_kb(query, namespace)
639
- except Exception:
640
- kb_results = ""
641
- else:
642
- kb_results = ""
643
-
644
- if use_perplexity:
645
- try:
646
- search_results = self.search_internet(
647
- query, perplexity_model)
648
- except Exception:
649
- search_results = ""
650
- else:
651
- search_results = ""
652
-
653
- if use_grok:
654
- try:
655
- x_search_results = self.search_x(query, grok_model)
656
- except Exception:
657
- x_search_results = ""
658
- else:
659
- x_search_results = ""
660
-
661
- if self._zep:
662
- memory = self._sync_zep.memory.get(session_id=user_id)
663
- else:
664
- memory = ""
665
-
666
- response = self._client.chat.completions.create(
667
- model=openai_model,
668
- messages=[
669
- {
670
- "role": "system",
671
- "content": prompt,
672
- },
673
- {
674
- "role": "user",
675
- "content": f"Query: {query}, Memory: {memory}, KB Results: {kb_results}, Internet Search Results: {search_results}, X Search Results: {x_search_results}",
676
- },
677
- ],
678
- )
679
- return response.choices[0].message.content
680
- except Exception as e:
681
- return f"Failed to reason. Error: {e}"
682
-
683
583
  # x search tool - has to be sync
684
584
  def search_x(
685
585
  self, query: str, model: Literal["grok-2-latest"] = "grok-2-latest"
@@ -801,11 +701,21 @@ class AI:
801
701
  final_tool_calls = {} # Accumulate tool call deltas
802
702
  final_response = ""
803
703
 
704
+ if self._zep:
705
+ messages = [
706
+ Message(
707
+ role="user",
708
+ role_type="user",
709
+ content=user_text,
710
+ ),
711
+ ]
712
+ await self._zep.memory.add(session_id=user_id, messages=messages)
713
+
804
714
  async def stream_processor():
805
715
  memory = self.get_memory_context(user_id)
806
- regular_content = "" # Add this to accumulate regular content
716
+ regular_content = ""
807
717
  response = self._client.chat.completions.create(
808
- model=self._main_model,
718
+ model=self._tool_calling_model,
809
719
  messages=[
810
720
  {
811
721
  "role": "system",
@@ -847,11 +757,11 @@ class AI:
847
757
  # Execute the tool call (synchronously; adjust if async is needed)
848
758
  result = func(**args)
849
759
  response = self._client.chat.completions.create(
850
- model=self._tool_formatting_model,
760
+ model=self._reasoning_model,
851
761
  messages=[
852
762
  {
853
763
  "role": "system",
854
- "content": f"Rules: {self._tool_formatting_instructions}, Tool Result: {result}, Memory Context: {memory}",
764
+ "content": f"Rules: {self._reasoning_instructions}, Tool Result: {result}, Memory Context: {memory}",
855
765
  },
856
766
  ],
857
767
  stream=True,
@@ -871,22 +781,24 @@ class AI:
871
781
 
872
782
  # Process regular response content
873
783
  if delta.content is not None:
874
- regular_content += delta.content # Accumulate instead of directly sending
784
+ regular_content += (
785
+ delta.content
786
+ ) # Accumulate instead of directly sending
875
787
 
876
788
  # After processing all chunks from the first response
877
789
  if regular_content: # Only if we have regular content
878
790
  # Format the regular content with memory context, similar to tool results
879
791
  response = self._client.chat.completions.create(
880
- model=self._tool_formatting_model,
792
+ model=self._reasoning_model,
881
793
  messages=[
882
794
  {
883
795
  "role": "system",
884
- "content": f"Rules: {self._memory_instructions}, Regular Content: {regular_content}, Memory Context: {memory}",
796
+ "content": f"Rules: {self._reasoning_instructions}, Memory Context: {memory}",
885
797
  },
886
798
  {
887
799
  "role": "user",
888
800
  "content": user_text,
889
- }
801
+ },
890
802
  ],
891
803
  stream=True,
892
804
  )
@@ -920,20 +832,6 @@ class AI:
920
832
  "timestamp": datetime.datetime.now(datetime.timezone.utc),
921
833
  }
922
834
  self._database.save_message(user_id, metadata)
923
- if self._zep:
924
- messages = [
925
- Message(
926
- role="user",
927
- role_type="user",
928
- content=user_text,
929
- ),
930
- Message(
931
- role="assistant",
932
- role_type="assistant",
933
- content=final_response,
934
- ),
935
- ]
936
- await self._zep.memory.add(session_id=user_id, messages=messages)
937
835
 
938
836
  async def conversation(
939
837
  self,
@@ -987,11 +885,21 @@ class AI:
987
885
  final_tool_calls = {} # Accumulate tool call deltas
988
886
  final_response = ""
989
887
 
888
+ if self._zep:
889
+ messages = [
890
+ Message(
891
+ role="user",
892
+ role_type="user",
893
+ content=transcript,
894
+ ),
895
+ ]
896
+ await self._zep.memory.add(session_id=user_id, messages=messages)
897
+
990
898
  async def stream_processor():
991
899
  memory = self.get_memory_context(user_id)
992
900
  regular_content = "" # Add this to accumulate regular content
993
901
  response = self._client.chat.completions.create(
994
- model=self._main_model,
902
+ model=self._tool_calling_model,
995
903
  messages=[
996
904
  {
997
905
  "role": "system",
@@ -1033,11 +941,11 @@ class AI:
1033
941
  # Execute the tool call (synchronously; adjust if async is needed)
1034
942
  result = func(**args)
1035
943
  response = self._client.chat.completions.create(
1036
- model=self._tool_formatting_model,
944
+ model=self._reasoning_model,
1037
945
  messages=[
1038
946
  {
1039
947
  "role": "system",
1040
- "content": f"Rules: {self._tool_formatting_instructions}, Tool Result: {result}, Memory Context: {memory}",
948
+ "content": f"Rules: {self._reasoning_instructions}, Tool Result: {result}, Memory Context: {memory}",
1041
949
  },
1042
950
  ],
1043
951
  stream=True,
@@ -1057,22 +965,24 @@ class AI:
1057
965
 
1058
966
  # Process regular response content
1059
967
  if delta.content is not None:
1060
- regular_content += delta.content # Accumulate instead of directly sending
968
+ regular_content += (
969
+ delta.content
970
+ ) # Accumulate instead of directly sending
1061
971
 
1062
972
  # After processing all chunks from the first response
1063
973
  if regular_content: # Only if we have regular content
1064
974
  # Format the regular content with memory context, similar to tool results
1065
975
  response = self._client.chat.completions.create(
1066
- model=self._tool_formatting_model,
976
+ model=self._reasoning_model,
1067
977
  messages=[
1068
978
  {
1069
979
  "role": "system",
1070
- "content": f"Rules: {self._memory_instructions}, Regular Content: {regular_content}, Memory Context: {memory}",
980
+ "content": f"Rules: {self._reasoning_instructions}, Memory Context: {memory}",
1071
981
  },
1072
982
  {
1073
983
  "role": "user",
1074
984
  "content": transcript,
1075
- }
985
+ },
1076
986
  ],
1077
987
  stream=True,
1078
988
  )
@@ -1106,20 +1016,6 @@ class AI:
1106
1016
  "timestamp": datetime.datetime.now(datetime.timezone.utc),
1107
1017
  }
1108
1018
  self._database.save_message(user_id, metadata)
1109
- if self._zep:
1110
- messages = [
1111
- Message(
1112
- role="user",
1113
- role_type="user",
1114
- content=transcript,
1115
- ),
1116
- Message(
1117
- role="assistant",
1118
- role_type="assistant",
1119
- content=final_response,
1120
- ),
1121
- ]
1122
- await self._zep.memory.add(session_id=user_id, messages=messages)
1123
1019
 
1124
1020
  # Generate and stream the audio response
1125
1021
  with self._client.audio.speech.with_streaming_response.create(
File without changes
File without changes