solana-agent 3.0.0__tar.gz → 3.0.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: solana-agent
3
- Version: 3.0.0
3
+ Version: 3.0.2
4
4
  Summary: Build self-learning AI Agents
5
5
  License: MIT
6
6
  Keywords: ai,openai,ai agents
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "solana-agent"
3
- version = "3.0.0"
3
+ version = "3.0.2"
4
4
  description = "Build self-learning AI Agents"
5
5
  authors = ["Bevan Hunt <bevan@bevanhunt.com>"]
6
6
  license = "MIT"
@@ -70,7 +70,7 @@ class AI:
70
70
  pinecone_embed_model: Literal["llama-text-embed-v2"] = "llama-text-embed-v2",
71
71
  gemini_api_key: str = None,
72
72
  openai_base_url: str = None,
73
- main_model: str = "o3-mini",
73
+ main_model: str = "gpt-4o-mini",
74
74
  tool_formatting_model: str = "gpt-4o-mini",
75
75
  tool_formatting_instructions: str = None,
76
76
  ):
@@ -89,7 +89,7 @@ class AI:
89
89
  pinecone_embed_model (Literal["llama-text-embed-v2"], optional): Pinecone embedding model. Defaults to "llama-text-embed-v2"
90
90
  gemini_api_key (str, optional): API key for Gemini search. Defaults to None
91
91
  openai_base_url (str, optional): Base URL for OpenAI API. Defaults to None
92
- main_model (str, optional): Main OpenAI model for conversation. Defaults to "o3-mini"
92
+ main_model (str, optional): Main OpenAI model for conversation. Defaults to "gpt-4o-mini"
93
93
  tool_formatting_model (str, optional): OpenAI model for tool formatting. Defaults to "gpt-4o-mini"
94
94
  tool_formatting_instructions (str, optional): Instructions for tool formatting
95
95
  Example:
@@ -108,9 +108,12 @@ class AI:
108
108
  - Optional integrations for Perplexity, Pinecone, Gemini, and Grok
109
109
  - You must create the Pinecone index in the dashboard before using it
110
110
  """
111
- self._client = OpenAI(api_key=openai_api_key, base_url=openai_base_url) if openai_base_url else OpenAI(
112
- api_key=openai_api_key)
113
- memory_instructions = """
111
+ self._client = (
112
+ OpenAI(api_key=openai_api_key, base_url=openai_base_url)
113
+ if openai_base_url
114
+ else OpenAI(api_key=openai_api_key)
115
+ )
116
+ self._memory_instructions = """
114
117
  You are a highly intelligent, context-aware conversational AI. When a user sends a query or statement, you should not only process the current input but also retrieve and integrate relevant context from their previous interactions. Use the memory data to:
115
118
  - Infer nuances in the user's intent.
116
119
  - Recall previous topics, preferences, or facts that might be relevant.
@@ -118,9 +121,13 @@ class AI:
118
121
  - Clarify ambiguous queries by relating them to known user history.
119
122
 
120
123
  Always be concise and ensure that your response maintains coherence across the conversation while respecting the user's context and previous data.
124
+ You always take the Tool Result over the Memory Context in terms of priority.
121
125
  """
122
- self._instructions = instructions + " " + memory_instructions
123
- self._tool_formatting_instructions = tool_formatting_instructions
126
+ self._instructions = instructions
127
+ self._tool_formatting_instructions = (
128
+ tool_formatting_instructions + " " +
129
+ self._memory_instructions if tool_formatting_instructions else self._memory_instructions
130
+ )
124
131
  self._database: MongoDatabase = database
125
132
  self._accumulated_value_queue = asyncio.Queue()
126
133
  if zep_api_key and not zep_base_url:
@@ -463,7 +470,8 @@ class AI:
463
470
 
464
471
  # Get UTC time from NTP response
465
472
  utc_dt = datetime.datetime.fromtimestamp(
466
- response.tx_time, datetime.timezone.utc)
473
+ response.tx_time, datetime.timezone.utc
474
+ )
467
475
 
468
476
  # Convert to requested timezone
469
477
  try:
@@ -739,18 +747,6 @@ class AI:
739
747
  except Exception:
740
748
  pass
741
749
 
742
- async def delete_assistant_thread(self, user_id: str):
743
- """Delete stored conversation thread for a user on OpenAI.
744
-
745
- Example:
746
- ```python
747
- await ai.delete_assistant_thread("user123")
748
- # Deletes the assistant conversation thread for a user
749
- ```
750
- """
751
- thread_id = self._database.get_thread_id(user_id)
752
- await self._client.beta.threads.delete(thread_id=thread_id)
753
-
754
750
  async def delete_memory(self, user_id: str):
755
751
  """Delete memory for a specific user from Zep memory.
756
752
 
@@ -807,13 +803,13 @@ class AI:
807
803
 
808
804
  async def stream_processor():
809
805
  memory = self.get_memory_context(user_id)
806
+ regular_content = "" # Add this to accumulate regular content
810
807
  response = self._client.chat.completions.create(
811
808
  model=self._main_model,
812
809
  messages=[
813
810
  {
814
811
  "role": "system",
815
- "content": self._instructions + f" Memory: {memory}",
816
-
812
+ "content": self._instructions,
817
813
  },
818
814
  {
819
815
  "role": "user",
@@ -824,6 +820,7 @@ class AI:
824
820
  stream=True,
825
821
  )
826
822
  for chunk in response:
823
+ result = ""
827
824
  delta = chunk.choices[0].delta
828
825
 
829
826
  # Process tool call deltas (if any)
@@ -834,11 +831,13 @@ class AI:
834
831
  # Initialize a new tool call record
835
832
  final_tool_calls[index] = {
836
833
  "name": tool_call.function.name,
837
- "arguments": tool_call.function.arguments or ""
834
+ "arguments": tool_call.function.arguments or "",
838
835
  }
839
836
  elif tool_call.function.arguments:
840
837
  # Append additional arguments if provided in subsequent chunks
841
- final_tool_calls[index]["arguments"] += tool_call.function.arguments
838
+ final_tool_calls[index]["arguments"] += (
839
+ tool_call.function.arguments
840
+ )
842
841
 
843
842
  try:
844
843
  args = json.loads(
@@ -852,11 +851,7 @@ class AI:
852
851
  messages=[
853
852
  {
854
853
  "role": "system",
855
- "content": self._tool_formatting_instructions,
856
- },
857
- {
858
- "role": "user",
859
- "content": result,
854
+ "content": f"Rules: {self._tool_formatting_instructions}, Tool Result: {result}, Memory Context: {memory}",
860
855
  },
861
856
  ],
862
857
  stream=True,
@@ -865,7 +860,9 @@ class AI:
865
860
  delta = chunk.choices[0].delta
866
861
 
867
862
  if delta.content is not None:
868
- await self._accumulated_value_queue.put(delta.content)
863
+ await self._accumulated_value_queue.put(
864
+ delta.content
865
+ )
869
866
  # Remove the cached tool call info so it doesn't block future calls
870
867
  del final_tool_calls[index]
871
868
  except json.JSONDecodeError:
@@ -874,7 +871,29 @@ class AI:
874
871
 
875
872
  # Process regular response content
876
873
  if delta.content is not None:
877
- await self._accumulated_value_queue.put(delta.content)
874
+ regular_content += delta.content # Accumulate instead of directly sending
875
+
876
+ # After processing all chunks from the first response
877
+ if regular_content: # Only if we have regular content
878
+ # Format the regular content with memory context, similar to tool results
879
+ response = self._client.chat.completions.create(
880
+ model=self._tool_formatting_model,
881
+ messages=[
882
+ {
883
+ "role": "system",
884
+ "content": f"Rules: {self._memory_instructions}, Regular Content: {regular_content}, Memory Context: {memory}",
885
+ },
886
+ {
887
+ "role": "user",
888
+ "content": user_text,
889
+ }
890
+ ],
891
+ stream=True,
892
+ )
893
+ for chunk in response:
894
+ delta = chunk.choices[0].delta
895
+ if delta.content is not None:
896
+ await self._accumulated_value_queue.put(delta.content)
878
897
 
879
898
  # Start the stream processor as a background task
880
899
  asyncio.create_task(stream_processor())
@@ -882,7 +901,9 @@ class AI:
882
901
  # Yield values from the queue as they become available.
883
902
  while True:
884
903
  try:
885
- value = await asyncio.wait_for(self._accumulated_value_queue.get(), timeout=0.1)
904
+ value = await asyncio.wait_for(
905
+ self._accumulated_value_queue.get(), timeout=0.1
906
+ )
886
907
  if value is not None:
887
908
  final_response += value
888
909
  yield value
@@ -968,22 +989,24 @@ class AI:
968
989
 
969
990
  async def stream_processor():
970
991
  memory = self.get_memory_context(user_id)
992
+ regular_content = "" # Add this to accumulate regular content
971
993
  response = self._client.chat.completions.create(
972
- model=self._openai_model,
994
+ model=self._main_model,
973
995
  messages=[
974
996
  {
975
997
  "role": "system",
976
- "content": self._instructions + f" Memory: {memory}",
998
+ "content": self._instructions,
977
999
  },
978
1000
  {
979
1001
  "role": "user",
980
- "content": transcript,
1002
+ "content": transcript,
981
1003
  },
982
1004
  ],
983
1005
  tools=self._tools,
984
1006
  stream=True,
985
1007
  )
986
1008
  for chunk in response:
1009
+ result = ""
987
1010
  delta = chunk.choices[0].delta
988
1011
 
989
1012
  # Process tool call deltas (if any)
@@ -994,11 +1017,13 @@ class AI:
994
1017
  # Initialize a new tool call record
995
1018
  final_tool_calls[index] = {
996
1019
  "name": tool_call.function.name,
997
- "arguments": tool_call.function.arguments or ""
1020
+ "arguments": tool_call.function.arguments or "",
998
1021
  }
999
1022
  elif tool_call.function.arguments:
1000
1023
  # Append additional arguments if provided in subsequent chunks
1001
- final_tool_calls[index]["arguments"] += tool_call.function.arguments
1024
+ final_tool_calls[index]["arguments"] += (
1025
+ tool_call.function.arguments
1026
+ )
1002
1027
 
1003
1028
  try:
1004
1029
  args = json.loads(
@@ -1012,11 +1037,7 @@ class AI:
1012
1037
  messages=[
1013
1038
  {
1014
1039
  "role": "system",
1015
- "content": self._tool_formatting_instructions,
1016
- },
1017
- {
1018
- "role": "user",
1019
- "content": result,
1040
+ "content": f"Rules: {self._tool_formatting_instructions}, Tool Result: {result}, Memory Context: {memory}",
1020
1041
  },
1021
1042
  ],
1022
1043
  stream=True,
@@ -1025,7 +1046,9 @@ class AI:
1025
1046
  delta = chunk.choices[0].delta
1026
1047
 
1027
1048
  if delta.content is not None:
1028
- await self._accumulated_value_queue.put(delta.content)
1049
+ await self._accumulated_value_queue.put(
1050
+ delta.content
1051
+ )
1029
1052
  # Remove the cached tool call info so it doesn't block future calls
1030
1053
  del final_tool_calls[index]
1031
1054
  except json.JSONDecodeError:
@@ -1034,7 +1057,29 @@ class AI:
1034
1057
 
1035
1058
  # Process regular response content
1036
1059
  if delta.content is not None:
1037
- await self._accumulated_value_queue.put(delta.content)
1060
+ regular_content += delta.content # Accumulate instead of directly sending
1061
+
1062
+ # After processing all chunks from the first response
1063
+ if regular_content: # Only if we have regular content
1064
+ # Format the regular content with memory context, similar to tool results
1065
+ response = self._client.chat.completions.create(
1066
+ model=self._tool_formatting_model,
1067
+ messages=[
1068
+ {
1069
+ "role": "system",
1070
+ "content": f"Rules: {self._memory_instructions}, Regular Content: {regular_content}, Memory Context: {memory}",
1071
+ },
1072
+ {
1073
+ "role": "user",
1074
+ "content": transcript,
1075
+ }
1076
+ ],
1077
+ stream=True,
1078
+ )
1079
+ for chunk in response:
1080
+ delta = chunk.choices[0].delta
1081
+ if delta.content is not None:
1082
+ await self._accumulated_value_queue.put(delta.content)
1038
1083
 
1039
1084
  # Start the stream processor as a background task
1040
1085
  asyncio.create_task(stream_processor())
@@ -1042,7 +1087,9 @@ class AI:
1042
1087
  # Yield values from the queue as they become available.
1043
1088
  while True:
1044
1089
  try:
1045
- value = await asyncio.wait_for(self._accumulated_value_queue.get(), timeout=0.1)
1090
+ value = await asyncio.wait_for(
1091
+ self._accumulated_value_queue.get(), timeout=0.1
1092
+ )
1046
1093
  if value is not None:
1047
1094
  final_response += value
1048
1095
  yield value
File without changes
File without changes