solana-agent 2.1.3__py3-none-any.whl → 3.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
solana_agent/ai.py CHANGED
@@ -9,6 +9,7 @@ from pydantic import BaseModel
9
9
  from pymongo import MongoClient
10
10
  from openai import OpenAI
11
11
  import inspect
12
+ import pytz
12
13
  import requests
13
14
  from zep_cloud.client import AsyncZep as AsyncZepCloud
14
15
  from zep_cloud.client import Zep as ZepCloud
@@ -69,7 +70,9 @@ class AI:
69
70
  pinecone_embed_model: Literal["llama-text-embed-v2"] = "llama-text-embed-v2",
70
71
  gemini_api_key: str = None,
71
72
  openai_base_url: str = None,
72
- openai_model: str = "gpt-4o-mini",
73
+ main_model: str = "gpt-4o-mini",
74
+ tool_formatting_model: str = "gpt-4o-mini",
75
+ tool_formatting_instructions: str = None,
73
76
  ):
74
77
  """Initialize a new AI assistant instance.
75
78
 
@@ -86,8 +89,9 @@ class AI:
86
89
  pinecone_embed_model (Literal["llama-text-embed-v2"], optional): Pinecone embedding model. Defaults to "llama-text-embed-v2"
87
90
  gemini_api_key (str, optional): API key for Gemini search. Defaults to None
88
91
  openai_base_url (str, optional): Base URL for OpenAI API. Defaults to None
89
- openai_model (str, optional): OpenAI model to use. Defaults to "gpt-4o-mini"
90
-
92
+ main_model (str, optional): Main OpenAI model for conversation. Defaults to "gpt-4o-mini"
93
+ tool_formatting_model (str, optional): OpenAI model for tool formatting. Defaults to "gpt-4o-mini"
94
+ tool_formatting_instructions (str, optional): Instructions for tool formatting
91
95
  Example:
92
96
  ```python
93
97
  ai = AI(
@@ -104,9 +108,12 @@ class AI:
104
108
  - Optional integrations for Perplexity, Pinecone, Gemini, and Grok
105
109
  - You must create the Pinecone index in the dashboard before using it
106
110
  """
107
- self._client = OpenAI(api_key=openai_api_key, base_url=openai_base_url) if openai_base_url else OpenAI(
108
- api_key=openai_api_key)
109
- memory_instructions = """
111
+ self._client = (
112
+ OpenAI(api_key=openai_api_key, base_url=openai_base_url)
113
+ if openai_base_url
114
+ else OpenAI(api_key=openai_api_key)
115
+ )
116
+ self._memory_instructions = """
110
117
  You are a highly intelligent, context-aware conversational AI. When a user sends a query or statement, you should not only process the current input but also retrieve and integrate relevant context from their previous interactions. Use the memory data to:
111
118
  - Infer nuances in the user's intent.
112
119
  - Recall previous topics, preferences, or facts that might be relevant.
@@ -114,8 +121,13 @@ class AI:
114
121
  - Clarify ambiguous queries by relating them to known user history.
115
122
 
116
123
  Always be concise and ensure that your response maintains coherence across the conversation while respecting the user's context and previous data.
124
+ You always take the Tool Result over the Memory Context in terms of priority.
117
125
  """
118
- self._instructions = instructions + " " + memory_instructions
126
+ self._instructions = instructions
127
+ self._tool_formatting_instructions = (
128
+ tool_formatting_instructions + " " +
129
+ self._memory_instructions if tool_formatting_instructions else self._memory_instructions
130
+ )
119
131
  self._database: MongoDatabase = database
120
132
  self._accumulated_value_queue = asyncio.Queue()
121
133
  if zep_api_key and not zep_base_url:
@@ -140,7 +152,8 @@ class AI:
140
152
  self._pinecone_index_name) if self._pinecone else None
141
153
  )
142
154
  self._openai_base_url = openai_base_url
143
- self._openai_model = openai_model
155
+ self._main_model = main_model
156
+ self._tool_formatting_model = tool_formatting_model
144
157
  self._tools = []
145
158
 
146
159
  async def __aenter__(self):
@@ -431,16 +444,19 @@ class AI:
431
444
  self.kb.delete(ids=[id], namespace=user_id)
432
445
  self._database.kb.delete_one({"reference": id})
433
446
 
434
- def check_time(self) -> str:
447
+ def check_time(self, timezone: str) -> str:
435
448
  """Get current UTC time formatted as a string via Cloudflare's NTP service.
436
449
 
450
+ Args:
451
+ timezone (str): Timezone to convert the time to (e.g., "America/New_York")
452
+
437
453
  Returns:
438
- str: Current UTC time in format 'YYYY-MM-DD HH:MM:SS UTC'
454
+ str: Current time in the requested timezone in format 'YYYY-MM-DD HH:MM:SS'
439
455
 
440
456
  Example:
441
457
  ```python
442
- time = ai.check_time()
443
- # Returns: "2025-02-26 15:30:45 UTC"
458
+ time = ai.check_time("America/New_York")
459
+ # Returns: "The current time in America/New_York is 2025-02-26 10:30:45"
444
460
  ```
445
461
 
446
462
  Note:
@@ -448,14 +464,24 @@ class AI:
448
464
  Fetches time over NTP from Cloudflare's time server (time.cloudflare.com).
449
465
  """
450
466
  try:
467
+ # Request time from Cloudflare's NTP server
451
468
  client = ntplib.NTPClient()
452
- # Request time from Cloudflare's NTP server.
453
469
  response = client.request("time.cloudflare.com", version=3)
454
- dt = datetime.datetime.fromtimestamp(
455
- response.tx_time, datetime.timezone.utc)
456
- # convert time based on location
457
- the_time = dt.strftime("%Y-%m-%d %H:%M:%S UTC")
458
- return f"The current time is {the_time}"
470
+
471
+ # Get UTC time from NTP response
472
+ utc_dt = datetime.datetime.fromtimestamp(
473
+ response.tx_time, datetime.timezone.utc
474
+ )
475
+
476
+ # Convert to requested timezone
477
+ try:
478
+ tz = pytz.timezone(timezone)
479
+ local_dt = utc_dt.astimezone(tz)
480
+ formatted_time = local_dt.strftime("%Y-%m-%d %H:%M:%S")
481
+ return f"The current time in {timezone} is {formatted_time}"
482
+ except pytz.exceptions.UnknownTimeZoneError:
483
+ return f"Error: Unknown timezone '{timezone}'. Please use a valid timezone like 'America/New_York'."
484
+
459
485
  except Exception as e:
460
486
  return f"Error getting the current time: {e}"
461
487
 
@@ -721,18 +747,6 @@ class AI:
721
747
  except Exception:
722
748
  pass
723
749
 
724
- async def delete_assistant_thread(self, user_id: str):
725
- """Delete stored conversation thread for a user on OpenAI.
726
-
727
- Example:
728
- ```python
729
- await ai.delete_assistant_thread("user123")
730
- # Deletes the assistant conversation thread for a user
731
- ```
732
- """
733
- thread_id = self._database.get_thread_id(user_id)
734
- await self._client.beta.threads.delete(thread_id=thread_id)
735
-
736
750
  async def delete_memory(self, user_id: str):
737
751
  """Delete memory for a specific user from Zep memory.
738
752
 
@@ -789,13 +803,13 @@ class AI:
789
803
 
790
804
  async def stream_processor():
791
805
  memory = self.get_memory_context(user_id)
806
+ regular_content = "" # Add this to accumulate regular content
792
807
  response = self._client.chat.completions.create(
793
- model=self._openai_model,
808
+ model=self._main_model,
794
809
  messages=[
795
810
  {
796
811
  "role": "system",
797
- "content": self._instructions + f" Memory: {memory}",
798
-
812
+ "content": self._instructions,
799
813
  },
800
814
  {
801
815
  "role": "user",
@@ -806,6 +820,7 @@ class AI:
806
820
  stream=True,
807
821
  )
808
822
  for chunk in response:
823
+ result = ""
809
824
  delta = chunk.choices[0].delta
810
825
 
811
826
  # Process tool call deltas (if any)
@@ -816,11 +831,13 @@ class AI:
816
831
  # Initialize a new tool call record
817
832
  final_tool_calls[index] = {
818
833
  "name": tool_call.function.name,
819
- "arguments": tool_call.function.arguments or ""
834
+ "arguments": tool_call.function.arguments or "",
820
835
  }
821
836
  elif tool_call.function.arguments:
822
837
  # Append additional arguments if provided in subsequent chunks
823
- final_tool_calls[index]["arguments"] += tool_call.function.arguments
838
+ final_tool_calls[index]["arguments"] += (
839
+ tool_call.function.arguments
840
+ )
824
841
 
825
842
  try:
826
843
  args = json.loads(
@@ -830,15 +847,11 @@ class AI:
830
847
  # Execute the tool call (synchronously; adjust if async is needed)
831
848
  result = func(**args)
832
849
  response = self._client.chat.completions.create(
833
- model=self._openai_model,
850
+ model=self._tool_formatting_model,
834
851
  messages=[
835
852
  {
836
853
  "role": "system",
837
- "content": self._instructions,
838
- },
839
- {
840
- "role": "user",
841
- "content": result,
854
+ "content": f"Rules: {self._tool_formatting_instructions}, Tool Result: {result}, Memory Context: {memory}",
842
855
  },
843
856
  ],
844
857
  stream=True,
@@ -847,7 +860,9 @@ class AI:
847
860
  delta = chunk.choices[0].delta
848
861
 
849
862
  if delta.content is not None:
850
- await self._accumulated_value_queue.put(delta.content)
863
+ await self._accumulated_value_queue.put(
864
+ delta.content
865
+ )
851
866
  # Remove the cached tool call info so it doesn't block future calls
852
867
  del final_tool_calls[index]
853
868
  except json.JSONDecodeError:
@@ -856,7 +871,29 @@ class AI:
856
871
 
857
872
  # Process regular response content
858
873
  if delta.content is not None:
859
- await self._accumulated_value_queue.put(delta.content)
874
+ regular_content += delta.content # Accumulate instead of directly sending
875
+
876
+ # After processing all chunks from the first response
877
+ if regular_content: # Only if we have regular content
878
+ # Format the regular content with memory context, similar to tool results
879
+ response = self._client.chat.completions.create(
880
+ model=self._tool_formatting_model,
881
+ messages=[
882
+ {
883
+ "role": "system",
884
+ "content": f"Rules: {self._memory_instructions}, Regular Content: {regular_content}, Memory Context: {memory}",
885
+ },
886
+ {
887
+ "role": "user",
888
+ "content": user_text,
889
+ }
890
+ ],
891
+ stream=True,
892
+ )
893
+ for chunk in response:
894
+ delta = chunk.choices[0].delta
895
+ if delta.content is not None:
896
+ await self._accumulated_value_queue.put(delta.content)
860
897
 
861
898
  # Start the stream processor as a background task
862
899
  asyncio.create_task(stream_processor())
@@ -864,7 +901,9 @@ class AI:
864
901
  # Yield values from the queue as they become available.
865
902
  while True:
866
903
  try:
867
- value = await asyncio.wait_for(self._accumulated_value_queue.get(), timeout=0.1)
904
+ value = await asyncio.wait_for(
905
+ self._accumulated_value_queue.get(), timeout=0.1
906
+ )
868
907
  if value is not None:
869
908
  final_response += value
870
909
  yield value
@@ -959,7 +998,7 @@ class AI:
959
998
  },
960
999
  {
961
1000
  "role": "user",
962
- "content": transcript,
1001
+ "content": transcript,
963
1002
  },
964
1003
  ],
965
1004
  tools=self._tools,
@@ -976,11 +1015,13 @@ class AI:
976
1015
  # Initialize a new tool call record
977
1016
  final_tool_calls[index] = {
978
1017
  "name": tool_call.function.name,
979
- "arguments": tool_call.function.arguments or ""
1018
+ "arguments": tool_call.function.arguments or "",
980
1019
  }
981
1020
  elif tool_call.function.arguments:
982
1021
  # Append additional arguments if provided in subsequent chunks
983
- final_tool_calls[index]["arguments"] += tool_call.function.arguments
1022
+ final_tool_calls[index]["arguments"] += (
1023
+ tool_call.function.arguments
1024
+ )
984
1025
 
985
1026
  try:
986
1027
  args = json.loads(
@@ -990,15 +1031,11 @@ class AI:
990
1031
  # Execute the tool call (synchronously; adjust if async is needed)
991
1032
  result = func(**args)
992
1033
  response = self._client.chat.completions.create(
993
- model=self._openai_model,
1034
+ model=self._tool_formatting_model,
994
1035
  messages=[
995
1036
  {
996
1037
  "role": "system",
997
- "content": self._instructions,
998
- },
999
- {
1000
- "role": "user",
1001
- "content": result,
1038
+ "content": f" Rules: {self._tool_formatting_instructions}, Result: {result}",
1002
1039
  },
1003
1040
  ],
1004
1041
  stream=True,
@@ -1007,7 +1044,9 @@ class AI:
1007
1044
  delta = chunk.choices[0].delta
1008
1045
 
1009
1046
  if delta.content is not None:
1010
- await self._accumulated_value_queue.put(delta.content)
1047
+ await self._accumulated_value_queue.put(
1048
+ delta.content
1049
+ )
1011
1050
  # Remove the cached tool call info so it doesn't block future calls
1012
1051
  del final_tool_calls[index]
1013
1052
  except json.JSONDecodeError:
@@ -1024,7 +1063,9 @@ class AI:
1024
1063
  # Yield values from the queue as they become available.
1025
1064
  while True:
1026
1065
  try:
1027
- value = await asyncio.wait_for(self._accumulated_value_queue.get(), timeout=0.1)
1066
+ value = await asyncio.wait_for(
1067
+ self._accumulated_value_queue.get(), timeout=0.1
1068
+ )
1028
1069
  if value is not None:
1029
1070
  final_response += value
1030
1071
  yield value
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: solana-agent
3
- Version: 2.1.3
3
+ Version: 3.0.1
4
4
  Summary: Build self-learning AI Agents
5
5
  License: MIT
6
6
  Keywords: ai,openai,ai agents
@@ -0,0 +1,6 @@
1
+ solana_agent/__init__.py,sha256=zpfnWqANd3OHGWm7NCF5Y6m01BWG4NkNk8SK9Ex48nA,18
2
+ solana_agent/ai.py,sha256=TXXd--Iuqe0jwJb83BUCITbnmy7wbvSnmQK6okGsBBs,46417
3
+ solana_agent-3.0.1.dist-info/LICENSE,sha256=BnSRc-NSFuyF2s496l_4EyrwAP6YimvxWcjPiJ0J7g4,1057
4
+ solana_agent-3.0.1.dist-info/METADATA,sha256=nhxvZe2DJYk7KeYsQNx79O1E2Dt4mtBFkQvL9Di4Neo,4808
5
+ solana_agent-3.0.1.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
6
+ solana_agent-3.0.1.dist-info/RECORD,,
@@ -1,6 +0,0 @@
1
- solana_agent/__init__.py,sha256=zpfnWqANd3OHGWm7NCF5Y6m01BWG4NkNk8SK9Ex48nA,18
2
- solana_agent/ai.py,sha256=IlhPK4xSR5xCTL69xudcgYz0fbPSY6i0H7dI77pqKsU,44343
3
- solana_agent-2.1.3.dist-info/LICENSE,sha256=BnSRc-NSFuyF2s496l_4EyrwAP6YimvxWcjPiJ0J7g4,1057
4
- solana_agent-2.1.3.dist-info/METADATA,sha256=Y2HjYIvBwmb3Tf3wxZ-bYMKBpUeCFXTIC4_DnsLZAnI,4808
5
- solana_agent-2.1.3.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
6
- solana_agent-2.1.3.dist-info/RECORD,,