solana-agent 3.0.1__tar.gz → 4.0.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {solana_agent-3.0.1 → solana_agent-4.0.0}/PKG-INFO +1 -1
- {solana_agent-3.0.1 → solana_agent-4.0.0}/pyproject.toml +1 -1
- {solana_agent-3.0.1 → solana_agent-4.0.0}/solana_agent/ai.py +73 -147
- {solana_agent-3.0.1 → solana_agent-4.0.0}/LICENSE +0 -0
- {solana_agent-3.0.1 → solana_agent-4.0.0}/README.md +0 -0
- {solana_agent-3.0.1 → solana_agent-4.0.0}/solana_agent/__init__.py +0 -0
|
@@ -70,9 +70,9 @@ class AI:
|
|
|
70
70
|
pinecone_embed_model: Literal["llama-text-embed-v2"] = "llama-text-embed-v2",
|
|
71
71
|
gemini_api_key: str = None,
|
|
72
72
|
openai_base_url: str = None,
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
73
|
+
tool_calling_model: str = "gpt-4o-mini",
|
|
74
|
+
reasoning_model: str = "gpt-4o-mini",
|
|
75
|
+
reasoning_instructions: str = None,
|
|
76
76
|
):
|
|
77
77
|
"""Initialize a new AI assistant instance.
|
|
78
78
|
|
|
@@ -89,9 +89,9 @@ class AI:
|
|
|
89
89
|
pinecone_embed_model (Literal["llama-text-embed-v2"], optional): Pinecone embedding model. Defaults to "llama-text-embed-v2"
|
|
90
90
|
gemini_api_key (str, optional): API key for Gemini search. Defaults to None
|
|
91
91
|
openai_base_url (str, optional): Base URL for OpenAI API. Defaults to None
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
92
|
+
tool_calling_model (str, optional): Model for tool calling. Defaults to "gpt-4o-mini"
|
|
93
|
+
reasoning_model (str, optional): Model for reasoning. Defaults to "gpt-4o-mini"
|
|
94
|
+
reasoning_instructions (str, optional): Instructions for reasoning. Defaults to None
|
|
95
95
|
Example:
|
|
96
96
|
```python
|
|
97
97
|
ai = AI(
|
|
@@ -124,9 +124,10 @@ class AI:
|
|
|
124
124
|
You always take the Tool Result over the Memory Context in terms of priority.
|
|
125
125
|
"""
|
|
126
126
|
self._instructions = instructions
|
|
127
|
-
self.
|
|
128
|
-
|
|
129
|
-
|
|
127
|
+
self._reasoning_instructions = (
|
|
128
|
+
reasoning_instructions + " " + self._memory_instructions
|
|
129
|
+
if reasoning_instructions
|
|
130
|
+
else self._memory_instructions
|
|
130
131
|
)
|
|
131
132
|
self._database: MongoDatabase = database
|
|
132
133
|
self._accumulated_value_queue = asyncio.Queue()
|
|
@@ -152,8 +153,8 @@ class AI:
|
|
|
152
153
|
self._pinecone_index_name) if self._pinecone else None
|
|
153
154
|
)
|
|
154
155
|
self._openai_base_url = openai_base_url
|
|
155
|
-
self.
|
|
156
|
-
self.
|
|
156
|
+
self._tool_calling_model = tool_calling_model
|
|
157
|
+
self._reasoning_model = reasoning_model
|
|
157
158
|
self._tools = []
|
|
158
159
|
|
|
159
160
|
async def __aenter__(self):
|
|
@@ -585,101 +586,6 @@ class AI:
|
|
|
585
586
|
except Exception as e:
|
|
586
587
|
return f"Failed to search Perplexity. Error: {e}"
|
|
587
588
|
|
|
588
|
-
# reason tool - has to be sync
|
|
589
|
-
def reason(
|
|
590
|
-
self,
|
|
591
|
-
user_id: str,
|
|
592
|
-
query: str,
|
|
593
|
-
prompt: str = "You combine the data with your reasoning to answer the query.",
|
|
594
|
-
use_perplexity: bool = True,
|
|
595
|
-
use_grok: bool = True,
|
|
596
|
-
use_kb: bool = True,
|
|
597
|
-
perplexity_model: Literal[
|
|
598
|
-
"sonar", "sonar-pro", "sonar-reasoning-pro", "sonar-reasoning"
|
|
599
|
-
] = "sonar",
|
|
600
|
-
openai_model: Literal["o1", "o3-mini"] = "o3-mini",
|
|
601
|
-
grok_model: Literal["grok-2-latest"] = "grok-2-latest",
|
|
602
|
-
namespace: str = "global",
|
|
603
|
-
) -> str:
|
|
604
|
-
"""Combine multiple data sources with AI reasoning to answer queries.
|
|
605
|
-
|
|
606
|
-
Args:
|
|
607
|
-
user_id (str): Unique identifier for the user
|
|
608
|
-
query (str): The question or query to reason about
|
|
609
|
-
prompt (str, optional): Prompt for reasoning. Defaults to "You combine the data with your reasoning to answer the query."
|
|
610
|
-
use_perplexity (bool, optional): Include Perplexity search results. Defaults to True
|
|
611
|
-
use_grok (bool, optional): Include X/Twitter search results. Defaults to True
|
|
612
|
-
use_kb (bool, optional): Include Pinecone knowledge base search results. Defaults to True
|
|
613
|
-
perplexity_model (Literal, optional): Perplexity model to use. Defaults to "sonar"
|
|
614
|
-
openai_model (Literal, optional): OpenAI model for reasoning. Defaults to "o3-mini"
|
|
615
|
-
grok_model (Literal, optional): Grok model for X search. Defaults to "grok-beta"
|
|
616
|
-
namespace (str): Namespace of the Pinecone index to search. Defaults to "global"
|
|
617
|
-
|
|
618
|
-
Returns:
|
|
619
|
-
str: Reasoned response combining all enabled data sources or error message
|
|
620
|
-
|
|
621
|
-
Example:
|
|
622
|
-
```python
|
|
623
|
-
result = ai.reason(
|
|
624
|
-
user_id="user123",
|
|
625
|
-
query="What are the latest AI trends?",
|
|
626
|
-
)
|
|
627
|
-
# Returns: "Based on multiple sources: [comprehensive answer]"
|
|
628
|
-
```
|
|
629
|
-
|
|
630
|
-
Note:
|
|
631
|
-
This is a synchronous tool method required for OpenAI function calling.
|
|
632
|
-
Requires configuration of relevant API keys for enabled data sources.
|
|
633
|
-
Will gracefully handle missing or failed data sources.
|
|
634
|
-
"""
|
|
635
|
-
try:
|
|
636
|
-
if use_kb:
|
|
637
|
-
try:
|
|
638
|
-
kb_results = self.search_kb(query, namespace)
|
|
639
|
-
except Exception:
|
|
640
|
-
kb_results = ""
|
|
641
|
-
else:
|
|
642
|
-
kb_results = ""
|
|
643
|
-
|
|
644
|
-
if use_perplexity:
|
|
645
|
-
try:
|
|
646
|
-
search_results = self.search_internet(
|
|
647
|
-
query, perplexity_model)
|
|
648
|
-
except Exception:
|
|
649
|
-
search_results = ""
|
|
650
|
-
else:
|
|
651
|
-
search_results = ""
|
|
652
|
-
|
|
653
|
-
if use_grok:
|
|
654
|
-
try:
|
|
655
|
-
x_search_results = self.search_x(query, grok_model)
|
|
656
|
-
except Exception:
|
|
657
|
-
x_search_results = ""
|
|
658
|
-
else:
|
|
659
|
-
x_search_results = ""
|
|
660
|
-
|
|
661
|
-
if self._zep:
|
|
662
|
-
memory = self._sync_zep.memory.get(session_id=user_id)
|
|
663
|
-
else:
|
|
664
|
-
memory = ""
|
|
665
|
-
|
|
666
|
-
response = self._client.chat.completions.create(
|
|
667
|
-
model=openai_model,
|
|
668
|
-
messages=[
|
|
669
|
-
{
|
|
670
|
-
"role": "system",
|
|
671
|
-
"content": prompt,
|
|
672
|
-
},
|
|
673
|
-
{
|
|
674
|
-
"role": "user",
|
|
675
|
-
"content": f"Query: {query}, Memory: {memory}, KB Results: {kb_results}, Internet Search Results: {search_results}, X Search Results: {x_search_results}",
|
|
676
|
-
},
|
|
677
|
-
],
|
|
678
|
-
)
|
|
679
|
-
return response.choices[0].message.content
|
|
680
|
-
except Exception as e:
|
|
681
|
-
return f"Failed to reason. Error: {e}"
|
|
682
|
-
|
|
683
589
|
# x search tool - has to be sync
|
|
684
590
|
def search_x(
|
|
685
591
|
self, query: str, model: Literal["grok-2-latest"] = "grok-2-latest"
|
|
@@ -801,11 +707,21 @@ class AI:
|
|
|
801
707
|
final_tool_calls = {} # Accumulate tool call deltas
|
|
802
708
|
final_response = ""
|
|
803
709
|
|
|
710
|
+
if self._zep:
|
|
711
|
+
messages = [
|
|
712
|
+
Message(
|
|
713
|
+
role="user",
|
|
714
|
+
role_type="user",
|
|
715
|
+
content=user_text,
|
|
716
|
+
),
|
|
717
|
+
]
|
|
718
|
+
await self._zep.memory.add(session_id=user_id, messages=messages)
|
|
719
|
+
|
|
804
720
|
async def stream_processor():
|
|
805
721
|
memory = self.get_memory_context(user_id)
|
|
806
|
-
regular_content = ""
|
|
722
|
+
regular_content = ""
|
|
807
723
|
response = self._client.chat.completions.create(
|
|
808
|
-
model=self.
|
|
724
|
+
model=self._tool_calling_model,
|
|
809
725
|
messages=[
|
|
810
726
|
{
|
|
811
727
|
"role": "system",
|
|
@@ -847,11 +763,11 @@ class AI:
|
|
|
847
763
|
# Execute the tool call (synchronously; adjust if async is needed)
|
|
848
764
|
result = func(**args)
|
|
849
765
|
response = self._client.chat.completions.create(
|
|
850
|
-
model=self.
|
|
766
|
+
model=self._reasoning_model,
|
|
851
767
|
messages=[
|
|
852
768
|
{
|
|
853
769
|
"role": "system",
|
|
854
|
-
"content": f"Rules: {self.
|
|
770
|
+
"content": f"Rules: {self._reasoning_instructions}, Tool Result: {result}, Memory Context: {memory}",
|
|
855
771
|
},
|
|
856
772
|
],
|
|
857
773
|
stream=True,
|
|
@@ -871,22 +787,24 @@ class AI:
|
|
|
871
787
|
|
|
872
788
|
# Process regular response content
|
|
873
789
|
if delta.content is not None:
|
|
874
|
-
regular_content +=
|
|
790
|
+
regular_content += (
|
|
791
|
+
delta.content
|
|
792
|
+
) # Accumulate instead of directly sending
|
|
875
793
|
|
|
876
794
|
# After processing all chunks from the first response
|
|
877
795
|
if regular_content: # Only if we have regular content
|
|
878
796
|
# Format the regular content with memory context, similar to tool results
|
|
879
797
|
response = self._client.chat.completions.create(
|
|
880
|
-
model=self.
|
|
798
|
+
model=self._reasoning_model,
|
|
881
799
|
messages=[
|
|
882
800
|
{
|
|
883
801
|
"role": "system",
|
|
884
|
-
"content": f"Rules: {self.
|
|
802
|
+
"content": f"Rules: {self._reasoning_instructions}, Memory Context: {memory}",
|
|
885
803
|
},
|
|
886
804
|
{
|
|
887
805
|
"role": "user",
|
|
888
806
|
"content": user_text,
|
|
889
|
-
}
|
|
807
|
+
},
|
|
890
808
|
],
|
|
891
809
|
stream=True,
|
|
892
810
|
)
|
|
@@ -920,20 +838,6 @@ class AI:
|
|
|
920
838
|
"timestamp": datetime.datetime.now(datetime.timezone.utc),
|
|
921
839
|
}
|
|
922
840
|
self._database.save_message(user_id, metadata)
|
|
923
|
-
if self._zep:
|
|
924
|
-
messages = [
|
|
925
|
-
Message(
|
|
926
|
-
role="user",
|
|
927
|
-
role_type="user",
|
|
928
|
-
content=user_text,
|
|
929
|
-
),
|
|
930
|
-
Message(
|
|
931
|
-
role="assistant",
|
|
932
|
-
role_type="assistant",
|
|
933
|
-
content=final_response,
|
|
934
|
-
),
|
|
935
|
-
]
|
|
936
|
-
await self._zep.memory.add(session_id=user_id, messages=messages)
|
|
937
841
|
|
|
938
842
|
async def conversation(
|
|
939
843
|
self,
|
|
@@ -987,14 +891,25 @@ class AI:
|
|
|
987
891
|
final_tool_calls = {} # Accumulate tool call deltas
|
|
988
892
|
final_response = ""
|
|
989
893
|
|
|
894
|
+
if self._zep:
|
|
895
|
+
messages = [
|
|
896
|
+
Message(
|
|
897
|
+
role="user",
|
|
898
|
+
role_type="user",
|
|
899
|
+
content=transcript,
|
|
900
|
+
),
|
|
901
|
+
]
|
|
902
|
+
await self._zep.memory.add(session_id=user_id, messages=messages)
|
|
903
|
+
|
|
990
904
|
async def stream_processor():
|
|
991
905
|
memory = self.get_memory_context(user_id)
|
|
906
|
+
regular_content = "" # Add this to accumulate regular content
|
|
992
907
|
response = self._client.chat.completions.create(
|
|
993
|
-
model=self.
|
|
908
|
+
model=self._tool_calling_model,
|
|
994
909
|
messages=[
|
|
995
910
|
{
|
|
996
911
|
"role": "system",
|
|
997
|
-
"content": self._instructions
|
|
912
|
+
"content": self._instructions,
|
|
998
913
|
},
|
|
999
914
|
{
|
|
1000
915
|
"role": "user",
|
|
@@ -1005,6 +920,7 @@ class AI:
|
|
|
1005
920
|
stream=True,
|
|
1006
921
|
)
|
|
1007
922
|
for chunk in response:
|
|
923
|
+
result = ""
|
|
1008
924
|
delta = chunk.choices[0].delta
|
|
1009
925
|
|
|
1010
926
|
# Process tool call deltas (if any)
|
|
@@ -1031,11 +947,11 @@ class AI:
|
|
|
1031
947
|
# Execute the tool call (synchronously; adjust if async is needed)
|
|
1032
948
|
result = func(**args)
|
|
1033
949
|
response = self._client.chat.completions.create(
|
|
1034
|
-
model=self.
|
|
950
|
+
model=self._reasoning_model,
|
|
1035
951
|
messages=[
|
|
1036
952
|
{
|
|
1037
953
|
"role": "system",
|
|
1038
|
-
"content": f"
|
|
954
|
+
"content": f"Rules: {self._reasoning_instructions}, Tool Result: {result}, Memory Context: {memory}",
|
|
1039
955
|
},
|
|
1040
956
|
],
|
|
1041
957
|
stream=True,
|
|
@@ -1055,7 +971,31 @@ class AI:
|
|
|
1055
971
|
|
|
1056
972
|
# Process regular response content
|
|
1057
973
|
if delta.content is not None:
|
|
1058
|
-
|
|
974
|
+
regular_content += (
|
|
975
|
+
delta.content
|
|
976
|
+
) # Accumulate instead of directly sending
|
|
977
|
+
|
|
978
|
+
# After processing all chunks from the first response
|
|
979
|
+
if regular_content: # Only if we have regular content
|
|
980
|
+
# Format the regular content with memory context, similar to tool results
|
|
981
|
+
response = self._client.chat.completions.create(
|
|
982
|
+
model=self._reasoning_model,
|
|
983
|
+
messages=[
|
|
984
|
+
{
|
|
985
|
+
"role": "system",
|
|
986
|
+
"content": f"Rules: {self._reasoning_instructions}, Memory Context: {memory}",
|
|
987
|
+
},
|
|
988
|
+
{
|
|
989
|
+
"role": "user",
|
|
990
|
+
"content": transcript,
|
|
991
|
+
},
|
|
992
|
+
],
|
|
993
|
+
stream=True,
|
|
994
|
+
)
|
|
995
|
+
for chunk in response:
|
|
996
|
+
delta = chunk.choices[0].delta
|
|
997
|
+
if delta.content is not None:
|
|
998
|
+
await self._accumulated_value_queue.put(delta.content)
|
|
1059
999
|
|
|
1060
1000
|
# Start the stream processor as a background task
|
|
1061
1001
|
asyncio.create_task(stream_processor())
|
|
@@ -1082,20 +1022,6 @@ class AI:
|
|
|
1082
1022
|
"timestamp": datetime.datetime.now(datetime.timezone.utc),
|
|
1083
1023
|
}
|
|
1084
1024
|
self._database.save_message(user_id, metadata)
|
|
1085
|
-
if self._zep:
|
|
1086
|
-
messages = [
|
|
1087
|
-
Message(
|
|
1088
|
-
role="user",
|
|
1089
|
-
role_type="user",
|
|
1090
|
-
content=transcript,
|
|
1091
|
-
),
|
|
1092
|
-
Message(
|
|
1093
|
-
role="assistant",
|
|
1094
|
-
role_type="assistant",
|
|
1095
|
-
content=final_response,
|
|
1096
|
-
),
|
|
1097
|
-
]
|
|
1098
|
-
await self._zep.memory.add(session_id=user_id, messages=messages)
|
|
1099
1025
|
|
|
1100
1026
|
# Generate and stream the audio response
|
|
1101
1027
|
with self._client.audio.speech.with_streaming_response.create(
|
|
File without changes
|
|
File without changes
|
|
File without changes
|