solana-agent 21.0.0__py3-none-any.whl → 22.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -20,6 +20,7 @@ class OpenAIAdapter(LLMProvider):
20
20
  self.client = OpenAI(api_key=api_key)
21
21
  self.parse_model = "gpt-4o-mini"
22
22
  self.text_model = "gpt-4o-mini"
23
+ self.internet_search_model = "gpt-4o-mini-search-preview"
23
24
  self.transcription_model = "gpt-4o-mini-transcribe"
24
25
  self.tts_model = "tts-1"
25
26
 
@@ -105,6 +106,7 @@ class OpenAIAdapter(LLMProvider):
105
106
  self,
106
107
  prompt: str,
107
108
  system_prompt: str = "",
109
+ internet_search: bool = False,
108
110
  ) -> AsyncGenerator[str, None]: # pragma: no cover
109
111
  """Generate text from OpenAI models."""
110
112
  messages = []
@@ -114,11 +116,15 @@ class OpenAIAdapter(LLMProvider):
114
116
 
115
117
  messages.append({"role": "user", "content": prompt})
116
118
 
119
+ model = self.text_model
120
+ if internet_search:
121
+ model = self.internet_search_model
122
+
117
123
  # Prepare request parameters
118
124
  request_params = {
119
125
  "messages": messages,
120
126
  "stream": True,
121
- "model": self.text_model,
127
+ "model": model,
122
128
  }
123
129
  try:
124
130
  response = self.client.chat.completions.create(**request_params)
@@ -56,6 +56,7 @@ class SolanaAgent(SolanaAgentInterface):
56
56
  "flac", "mp3", "mp4", "mpeg", "mpga", "m4a", "ogg", "wav", "webm"
57
57
  ] = "mp4",
58
58
  router: Optional[RoutingInterface] = None,
59
+ internet_search: bool = True,
59
60
  ) -> AsyncGenerator[Union[str, bytes], None]: # pragma: no cover
60
61
  """Process a user message and return the response stream.
61
62
 
@@ -69,6 +70,7 @@ class SolanaAgent(SolanaAgentInterface):
69
70
  audio_output_format: Audio output format
70
71
  audio_input_format: Audio input format
71
72
  router: Optional routing service for processing
73
+ internet_search: Flag to use OpenAI Internet search
72
74
 
73
75
  Returns:
74
76
  Async generator yielding response chunks (text strings or audio bytes)
@@ -83,6 +85,7 @@ class SolanaAgent(SolanaAgentInterface):
83
85
  audio_input_format=audio_input_format,
84
86
  prompt=prompt,
85
87
  router=router,
88
+ internet_search=internet_search,
86
89
  ):
87
90
  yield chunk
88
91
 
@@ -74,17 +74,16 @@ class SolanaAgentFactory:
74
74
  if "api_key" not in config["zep"]:
75
75
  raise ValueError("Zep API key is required.")
76
76
  memory_provider = MemoryRepository(
77
- db_adapter, config["zep"].get("api_key"), config["zep"].get("base_url"))
77
+ mongo_adapter=db_adapter, zep_api_key=config["zep"].get("api_key"))
78
78
 
79
79
  if "mongo" in config and not "zep" in config:
80
- memory_provider = MemoryRepository(db_adapter)
80
+ memory_provider = MemoryRepository(mongo_adapter=db_adapter)
81
81
 
82
82
  if "zep" in config and not "mongo" in config:
83
83
  if "api_key" not in config["zep"]:
84
84
  raise ValueError("Zep API key is required.")
85
85
  memory_provider = MemoryRepository(
86
- zep_api_key=config["zep"].get("api_key"),
87
- zep_base_url=config["zep"].get("base_url")
86
+ zep_api_key=config["zep"].get("api_key")
88
87
  )
89
88
 
90
89
  # Create primary services
@@ -2,6 +2,7 @@ from abc import ABC, abstractmethod
2
2
  from typing import Any, AsyncGenerator, Dict, Literal, Optional, Union
3
3
 
4
4
  from solana_agent.interfaces.plugins.plugins import Tool
5
+ from solana_agent.interfaces.services.routing import RoutingService as RoutingInterface
5
6
 
6
7
 
7
8
  class SolanaAgent(ABC):
@@ -12,6 +13,7 @@ class SolanaAgent(ABC):
12
13
  self,
13
14
  user_id: str,
14
15
  message: Union[str, bytes],
16
+ prompt: Optional[str] = None,
15
17
  output_format: Literal["text", "audio"] = "text",
16
18
  audio_voice: Literal["alloy", "ash", "ballad", "coral", "echo",
17
19
  "fable", "onyx", "nova", "sage", "shimmer"] = "nova",
@@ -21,7 +23,8 @@ class SolanaAgent(ABC):
21
23
  audio_input_format: Literal[
22
24
  "flac", "mp3", "mp4", "mpeg", "mpga", "m4a", "ogg", "wav", "webm"
23
25
  ] = "mp4",
24
- prompt: Optional[str] = None,
26
+ router: Optional[RoutingInterface] = None,
27
+ internet_search: bool = True,
25
28
  ) -> AsyncGenerator[Union[str, bytes], None]:
26
29
  """Process a user message and return the response stream."""
27
30
  pass
@@ -15,6 +15,7 @@ class LLMProvider(ABC):
15
15
  self,
16
16
  prompt: str,
17
17
  system_prompt: str = "",
18
+ internet_search: bool = False,
18
19
  ) -> AsyncGenerator[str, None]:
19
20
  """Generate text from the language model."""
20
21
  pass
@@ -34,6 +34,7 @@ class AgentService(ABC):
34
34
  "flac", "mp3", "mp4", "mpeg", "mpga", "m4a", "ogg", "wav", "webm"
35
35
  ] = "mp4",
36
36
  prompt: Optional[str] = None,
37
+ internet_search: bool = True,
37
38
  ) -> AsyncGenerator[Union[str, bytes], None]:
38
39
  """Generate a response from an agent."""
39
40
  pass
@@ -20,6 +20,7 @@ class QueryService(ABC):
20
20
  "flac", "mp3", "mp4", "mpeg", "mpga", "m4a", "ogg", "wav", "webm"
21
21
  ] = "mp4",
22
22
  prompt: Optional[str] = None,
23
+ internet_search: bool = True,
23
24
  ) -> AsyncGenerator[Union[str, bytes], None]:
24
25
  """Process the user request and generate a response."""
25
26
  pass
@@ -1,7 +1,7 @@
1
+ from copy import deepcopy
1
2
  from typing import List, Dict, Any, Optional, Tuple
2
3
  from datetime import datetime, timezone
3
4
  from zep_cloud.client import AsyncZep as AsyncZepCloud
4
- from zep_python.client import AsyncZep
5
5
  from zep_cloud.types import Message
6
6
  from solana_agent.interfaces.providers.memory import MemoryProvider
7
7
  from solana_agent.adapters.mongodb_adapter import MongoDBAdapter
@@ -14,7 +14,6 @@ class MemoryRepository(MemoryProvider):
14
14
  self,
15
15
  mongo_adapter: Optional[MongoDBAdapter] = None,
16
16
  zep_api_key: Optional[str] = None,
17
- zep_base_url: Optional[str] = None
18
17
  ):
19
18
  """Initialize the combined memory provider."""
20
19
  if not mongo_adapter:
@@ -33,13 +32,10 @@ class MemoryRepository(MemoryProvider):
33
32
  except Exception as e:
34
33
  print(f"Error initializing MongoDB: {e}")
35
34
 
35
+ self.zep = None
36
36
  # Initialize Zep
37
- if zep_api_key and not zep_base_url:
37
+ if zep_api_key:
38
38
  self.zep = AsyncZepCloud(api_key=zep_api_key)
39
- elif zep_api_key and zep_base_url:
40
- self.zep = AsyncZep(api_key=zep_api_key, base_url=zep_base_url)
41
- else:
42
- self.zep = None
43
39
 
44
40
  async def store(self, user_id: str, messages: List[Dict[str, Any]]) -> None:
45
41
  """Store messages in both Zep and MongoDB."""
@@ -99,9 +95,10 @@ class MemoryRepository(MemoryProvider):
99
95
  zep_messages = []
100
96
  for msg in messages:
101
97
  if "role" in msg and "content" in msg:
98
+ content = self._truncate(deepcopy(msg["content"]))
102
99
  zep_msg = Message(
103
100
  role=msg["role"],
104
- content=msg["content"],
101
+ content=content,
105
102
  role_type=msg["role"],
106
103
  )
107
104
  zep_messages.append(zep_msg)
@@ -196,4 +193,4 @@ class MemoryRepository(MemoryProvider):
196
193
  return text[:last_period + 1]
197
194
 
198
195
  # If no period found, truncate at limit and add ellipsis
199
- return text[:limit] + "..."
196
+ return text[:limit-3] + "..."
@@ -176,29 +176,15 @@ class AgentService(AgentServiceInterface):
176
176
  "flac", "mp3", "mp4", "mpeg", "mpga", "m4a", "ogg", "wav", "webm"
177
177
  ] = "mp4",
178
178
  prompt: Optional[str] = None,
179
+ internet_search: bool = True,
179
180
  ) -> AsyncGenerator[Union[str, bytes], None]: # pragma: no cover
180
- """Generate a response with support for text/audio input/output.
181
-
182
- Args:
183
- agent_name: Agent name
184
- user_id: User ID
185
- query: Text query or audio bytes
186
- memory_context: Optional conversation context
187
- output_format: Response format ("text" or "audio")
188
- audio_voice: Voice to use for audio output
189
- audio_instructions: Optional instructions for audio synthesis
190
- audio_output_format: Audio output format
191
- audio_input_format: Audio input format
192
- prompt: Optional prompt for the agent
193
-
194
- Yields:
195
- Text chunks or audio bytes depending on output_format
196
- """
181
+ """Generate a response with support for text/audio input/output."""
197
182
  agent = next((a for a in self.agents if a.name == agent_name), None)
198
183
  if not agent:
199
184
  error_msg = f"Agent '{agent_name}' not found."
200
185
  if output_format == "audio":
201
- async for chunk in self.llm_provider.tts(error_msg, instructions=audio_instructions, response_format=audio_output_format, voice=audio_voice):
186
+ async for chunk in self.llm_provider.tts(error_msg, instructions=audio_instructions,
187
+ response_format=audio_output_format, voice=audio_voice):
202
188
  yield chunk
203
189
  else:
204
190
  yield error_msg
@@ -227,19 +213,24 @@ class AgentService(AgentServiceInterface):
227
213
  if prompt:
228
214
  system_prompt += f"\n\nADDITIONAL PROMPT: {prompt}"
229
215
 
230
- # Keep track of the complete text response
216
+ # Variables for tracking the response
231
217
  complete_text_response = ""
218
+
219
+ # For audio output, we'll collect everything first
220
+ full_response_buffer = ""
221
+
222
+ # Variables for handling JSON processing
232
223
  json_buffer = ""
233
224
  is_json = False
234
- text_buffer = ""
235
225
 
236
226
  # Generate and stream response
237
227
  async for chunk in self.llm_provider.generate_text(
238
228
  prompt=query_text,
239
229
  system_prompt=system_prompt,
230
+ internet_search=internet_search,
240
231
  ):
241
- # Check for JSON start
242
- if chunk.strip().startswith("{") and not is_json:
232
+ # Check if the chunk is JSON or a tool call
233
+ if (chunk.strip().startswith("{") or "{\"tool_call\":" in chunk) and not is_json:
243
234
  is_json = True
244
235
  json_buffer = chunk
245
236
  continue
@@ -253,107 +244,105 @@ class AgentService(AgentServiceInterface):
253
244
 
254
245
  # Valid JSON found, handle it
255
246
  if "tool_call" in data:
256
- # Process tool call with existing method
257
247
  response_text = await self._handle_tool_call(
258
248
  agent_name=agent_name,
259
249
  json_chunk=json_buffer
260
250
  )
261
251
 
262
- system_prompt = system_prompt + \
252
+ # Update system prompt to prevent further tool calls
253
+ tool_system_prompt = system_prompt + \
263
254
  "\n DO NOT make any tool calls or return JSON."
264
255
 
256
+ # Create prompt with tool response
265
257
  user_prompt = f"\n USER QUERY: {query_text} \n"
266
258
  user_prompt += f"\n TOOL RESPONSE: {response_text} \n"
267
259
 
268
- # Collect all processed text first
269
- processed_text = ""
270
- async for processed_chunk in self.llm_provider.generate_text(
271
- prompt=user_prompt,
272
- system_prompt=system_prompt,
273
- ):
274
- processed_text += processed_chunk
275
- # For text output, yield chunks as they come
276
- if output_format == "text":
260
+ # For text output, process chunks directly
261
+ if output_format == "text":
262
+ # Stream text response for text output
263
+ async for processed_chunk in self.llm_provider.generate_text(
264
+ prompt=user_prompt,
265
+ system_prompt=tool_system_prompt,
266
+ ):
267
+ complete_text_response += processed_chunk
277
268
  yield processed_chunk
278
-
279
- # Add to complete response
280
- complete_text_response += processed_text
281
-
282
- # For audio output, process the complete text
283
- if output_format == "audio":
284
- async for audio_chunk in self.llm_provider.tts(
285
- text=processed_text,
286
- voice=audio_voice,
287
- response_format=audio_output_format
269
+ else:
270
+ # For audio output, collect the full tool response first
271
+ tool_response = ""
272
+ async for processed_chunk in self.llm_provider.generate_text(
273
+ prompt=user_prompt,
274
+ system_prompt=tool_system_prompt,
288
275
  ):
289
- yield audio_chunk
276
+ tool_response += processed_chunk
277
+
278
+ # Add to our complete text record and full audio buffer
279
+ tool_response = self._remove_markdown(
280
+ tool_response)
281
+ complete_text_response += tool_response
282
+ full_response_buffer += tool_response
290
283
  else:
291
284
  # For non-tool JSON, still capture the text
292
285
  complete_text_response += json_buffer
293
286
 
294
- if output_format == "audio":
295
- async for audio_chunk in self.llm_provider.tts(
296
- text=json_buffer,
297
- voice=audio_voice,
298
- response_format=audio_output_format
299
- ):
300
- yield audio_chunk
301
- else:
287
+ if output_format == "text":
302
288
  yield json_buffer
289
+ else:
290
+ # Add to full response buffer for audio
291
+ full_response_buffer += json_buffer
303
292
 
304
293
  # Reset JSON handling
305
294
  is_json = False
306
295
  json_buffer = ""
307
296
 
308
297
  except json.JSONDecodeError:
298
+ # JSON not complete yet, continue collecting
309
299
  pass
310
300
  else:
311
- # For regular text, always add to the complete response
301
+ # For regular text
312
302
  complete_text_response += chunk
313
303
 
314
- # Handle audio buffering or direct text output
315
- if output_format == "audio":
316
- text_buffer += chunk
317
- if any(punct in chunk for punct in ".!?"):
318
- async for audio_chunk in self.llm_provider.tts(
319
- text=text_buffer,
320
- voice=audio_voice,
321
- response_format=audio_output_format
322
- ):
323
- yield audio_chunk
324
- text_buffer = ""
325
- else:
304
+ if output_format == "text":
305
+ # For text output, yield directly
326
306
  yield chunk
327
-
328
- # Handle any remaining text or incomplete JSON
329
- remaining_text = ""
330
- if text_buffer:
331
- remaining_text += text_buffer
332
- if is_json and json_buffer:
333
- remaining_text += json_buffer
334
-
335
- if remaining_text:
336
- # Add remaining text to complete response
337
- complete_text_response += remaining_text
338
-
339
- if output_format == "audio":
340
- async for audio_chunk in self.llm_provider.tts(
341
- text=remaining_text,
342
- voice=audio_voice,
343
- response_format=audio_output_format
344
- ):
345
- yield audio_chunk
307
+ else:
308
+ # For audio output, add to the full response buffer
309
+ full_response_buffer += chunk
310
+
311
+ # Handle any leftover JSON buffer
312
+ if json_buffer:
313
+ complete_text_response += json_buffer
314
+ if output_format == "text":
315
+ yield json_buffer
346
316
  else:
347
- yield remaining_text
348
-
349
- # Store the complete text response for the caller to access
350
- # This needs to be done in the query service using the self.last_text_response
317
+ full_response_buffer += json_buffer
318
+
319
+ # For audio output, now process the complete response
320
+ if output_format == "audio" and full_response_buffer:
321
+ # Clean markdown before TTS
322
+ full_response_buffer = self._remove_markdown(
323
+ full_response_buffer)
324
+
325
+ # Process the entire response with TTS
326
+ async for audio_chunk in self.llm_provider.tts(
327
+ text=full_response_buffer,
328
+ voice=audio_voice,
329
+ response_format=audio_output_format,
330
+ instructions=audio_instructions
331
+ ):
332
+ yield audio_chunk
333
+
334
+ # Store the complete text response
351
335
  self.last_text_response = complete_text_response
352
336
 
353
337
  except Exception as e:
354
338
  error_msg = f"I apologize, but I encountered an error: {str(e)}"
355
339
  if output_format == "audio":
356
- async for chunk in self.llm_provider.tts(error_msg, voice=audio_voice, response_format=audio_output_format):
340
+ async for chunk in self.llm_provider.tts(
341
+ error_msg,
342
+ voice=audio_voice,
343
+ response_format=audio_output_format,
344
+ instructions=audio_instructions
345
+ ):
357
346
  yield chunk
358
347
  else:
359
348
  yield error_msg
@@ -376,15 +365,31 @@ class AgentService(AgentServiceInterface):
376
365
  parameters = tool_data.get("parameters", {})
377
366
 
378
367
  if tool_name:
379
- result = await self.execute_tool(
380
- agent_name, tool_name, parameters)
368
+ # Execute the tool and get the result
369
+ result = await self.execute_tool(agent_name, tool_name, parameters)
370
+
381
371
  if result.get("status") == "success":
382
- return result.get("result", "")
372
+ tool_result = result.get("result", "")
373
+ return tool_result
383
374
  else:
384
- return f"I apologize, but I encountered an issue: {result.get('message', 'Unknown error')}"
385
- return json_chunk
386
- except json.JSONDecodeError:
375
+ error_message = f"I apologize, but I encountered an issue with the {tool_name} tool: {result.get('message', 'Unknown error')}"
376
+ print(f"Tool error: {error_message}")
377
+ return error_message
378
+ else:
379
+ return "Tool name was not provided in the tool call."
380
+ else:
381
+ print(f"JSON received but no tool_call found: {json_chunk}")
382
+
383
+ # If we get here, it wasn't properly handled as a tool
384
+ return f"The following request was not processed as a valid tool call:\n{json_chunk}"
385
+ except json.JSONDecodeError as e:
386
+ print(f"JSON decode error in tool call: {e}")
387
387
  return json_chunk
388
+ except Exception as e:
389
+ print(f"Unexpected error in tool call handling: {str(e)}")
390
+ import traceback
391
+ print(traceback.format_exc())
392
+ return f"Error processing tool call: {str(e)}"
388
393
 
389
394
  def _get_tool_usage_prompt(self, agent_name: str) -> str:
390
395
  """Generate JSON-based instructions for tool usage."""
@@ -421,3 +426,47 @@ class AgentService(AgentServiceInterface):
421
426
  - No explanation text before or after
422
427
  - Use exact tool names as shown in AVAILABLE TOOLS
423
428
  """
429
+
430
+ def _remove_markdown(self, text: str) -> str:
431
+ """Remove Markdown formatting and links from text.
432
+
433
+ Args:
434
+ text: Input text with potential Markdown formatting
435
+
436
+ Returns:
437
+ Clean text without Markdown formatting
438
+ """
439
+ import re
440
+
441
+ if not text:
442
+ return ""
443
+
444
+ # Remove Markdown links - [text](url) -> text
445
+ text = re.sub(r'\[([^\]]+)\]\([^\)]+\)', r'\1', text)
446
+
447
+ # Remove inline code with backticks
448
+ text = re.sub(r'`([^`]+)`', r'\1', text)
449
+
450
+ # Remove bold formatting - **text** or __text__ -> text
451
+ text = re.sub(r'(\*\*|__)(.*?)\1', r'\2', text)
452
+
453
+ # Remove italic formatting - *text* or _text_ -> text
454
+ text = re.sub(r'(\*|_)(.*?)\1', r'\2', text)
455
+
456
+ # Remove headers - ## Header -> Header
457
+ text = re.sub(r'^\s*#+\s*(.*?)$', r'\1', text, flags=re.MULTILINE)
458
+
459
+ # Remove blockquotes - > Text -> Text
460
+ text = re.sub(r'^\s*>\s*(.*?)$', r'\1', text, flags=re.MULTILINE)
461
+
462
+ # Remove horizontal rules (---, ***, ___)
463
+ text = re.sub(r'^\s*[-*_]{3,}\s*$', '', text, flags=re.MULTILINE)
464
+
465
+ # Remove list markers - * Item or - Item or 1. Item -> Item
466
+ text = re.sub(r'^\s*[-*+]\s+(.*?)$', r'\1', text, flags=re.MULTILINE)
467
+ text = re.sub(r'^\s*\d+\.\s+(.*?)$', r'\1', text, flags=re.MULTILINE)
468
+
469
+ # Remove multiple consecutive newlines (keep just one)
470
+ text = re.sub(r'\n{3,}', '\n\n', text)
471
+
472
+ return text.strip()
@@ -49,6 +49,7 @@ class QueryService(QueryServiceInterface):
49
49
  ] = "mp4",
50
50
  prompt: Optional[str] = None,
51
51
  router: Optional[RoutingServiceInterface] = None,
52
+ internet_search: bool = True,
52
53
  ) -> AsyncGenerator[Union[str, bytes], None]: # pragma: no cover
53
54
  """Process the user request with appropriate agent.
54
55
 
@@ -62,6 +63,7 @@ class QueryService(QueryServiceInterface):
62
63
  audio_input_format: Audio input format
63
64
  prompt: Optional prompt for the agent
64
65
  router: Optional routing service for processing
66
+ internet_search: Flag to use OpenAI Internet search
65
67
 
66
68
  Yields:
67
69
  Response chunks (text strings or audio bytes)
@@ -119,6 +121,7 @@ class QueryService(QueryServiceInterface):
119
121
  audio_output_format=audio_output_format,
120
122
  audio_instructions=audio_instructions,
121
123
  prompt=prompt,
124
+ internet_search=internet_search,
122
125
  ):
123
126
  yield audio_chunk
124
127
 
@@ -137,6 +140,7 @@ class QueryService(QueryServiceInterface):
137
140
  memory_context=memory_context,
138
141
  output_format="text",
139
142
  prompt=prompt,
143
+ internet_search=internet_search,
140
144
  ):
141
145
  yield chunk
142
146
  full_text_response += chunk
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: solana-agent
3
- Version: 21.0.0
3
+ Version: 22.0.0
4
4
  Summary: Agentic IQ
5
5
  License: MIT
6
6
  Keywords: ai,openai,ai agents,agi
@@ -14,11 +14,10 @@ Classifier: Programming Language :: Python :: 3
14
14
  Classifier: Programming Language :: Python :: 3.12
15
15
  Classifier: Programming Language :: Python :: 3.13
16
16
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
17
- Requires-Dist: openai (>=1.68.2,<2.0.0)
18
- Requires-Dist: pydantic (>=2.11.1,<3.0.0)
17
+ Requires-Dist: openai (>=1.70.0,<2.0.0)
18
+ Requires-Dist: pydantic (>=2.11.2,<3.0.0)
19
19
  Requires-Dist: pymongo (>=4.11.3,<5.0.0)
20
- Requires-Dist: zep-cloud (>=2.8.0,<3.0.0)
21
- Requires-Dist: zep-python (>=2.0.2,<3.0.0)
20
+ Requires-Dist: zep-cloud (>=2.9.0,<3.0.0)
22
21
  Project-URL: Documentation, https://docs.solana-agent.com
23
22
  Project-URL: Repository, https://github.com/truemagic-coder/solana-agent
24
23
  Description-Content-Type: text/markdown
@@ -44,6 +43,7 @@ Build your AI business in three lines of code!
44
43
  * Multi-Agent Swarm
45
44
  * Multi-Modal Streaming
46
45
  * Conversational Memory & History
46
+ * Built-in Internet Search
47
47
  * Intelligent Routing
48
48
  * Business Alignment
49
49
  * Extensible Tooling
@@ -59,6 +59,7 @@ Build your AI business in three lines of code!
59
59
  * Designed for a multi-agent swarm
60
60
  * Seamless text and audio streaming with real-time multi-modal processing
61
61
  * Persistent memory that preserves context across all agent interactions
62
+ * Quick built-in Internet Search to answer your queries
62
63
  * Streamlined message history for all agent interactions
63
64
  * Intelligent query routing to agents with optimal domain expertise or your own custom routing
64
65
  * Unified value system ensuring brand-aligned agent responses
@@ -72,7 +73,7 @@ Build your AI business in three lines of code!
72
73
  * [Python](https://python.org) - Programming Language
73
74
  * [OpenAI](https://openai.com) - LLMs
74
75
  * [MongoDB](https://mongodb.com) - Conversational History (optional)
75
- * [Zep](https://getzep.com) - Conversational Memory (optional)
76
+ * [Zep Cloud](https://getzep.com) - Conversational Memory (optional)
76
77
 
77
78
  ## Installation
78
79
 
@@ -238,94 +239,18 @@ config = {
238
239
  ```python
239
240
  config = {
240
241
  "zep": {
241
- "api_key": "your-zep-api-key",
242
- "base_url": "your-zep-base-url", # not applicable if using Zep Cloud
242
+ "api_key": "your-zep-cloud-api-key",
243
243
  },
244
244
  }
245
245
  ```
246
246
 
247
- ## Plugins
247
+ ### Disable Internet Searching
248
248
 
249
- Plugins like Solana Agent Kit (sakit) integrate automatically with Solana Agent.
250
-
251
- `pip install sakit`
252
-
253
- ### Internet Search
254
- ```python
255
- from solana_agent import SolanaAgent
256
-
257
- config = {
258
- "openai": {
259
- "api_key": "your-openai-api-key",
260
- },
261
- "tools": {
262
- "search_internet": {
263
- "api_key": "your-perplexity-key", # Required
264
- "citations": True, # Optional, defaults to True
265
- "model": "sonar" # Optional, defaults to "sonar"
266
- },
267
- },
268
- "agents": [
269
- {
270
- "name": "research_specialist",
271
- "instructions": "You are an expert researcher who synthesizes complex information clearly.",
272
- "specialization": "Research and knowledge synthesis",
273
- "tools": ["search_internet"],
274
- },
275
- {
276
- "name": "customer_support",
277
- "instructions": "You provide friendly, helpful customer support responses.",
278
- "specialization": "Customer inquiries",
279
- }
280
- ],
281
- }
282
-
283
- solana_agent = SolanaAgent(config=config)
284
-
285
- async for response in solana_agent.process("user123", "What are the latest AI developments?"):
286
- print(response, end="")
287
- ```
288
-
289
- ### MCP
290
249
  ```python
291
- from solana_agent import SolanaAgent
292
-
293
- config = {
294
- "openai": {
295
- "api_key": "your-openai-api-key",
296
- },
297
- "tools": {
298
- "mcp": {
299
- "server_urls": [
300
- "http://mcp-server1.com/mcp",
301
- "http://mcp-server2.com/mcp",
302
- "http://mcp-server3.com/mcp"
303
- ]
304
- }
305
- },
306
- "agents": [
307
- {
308
- "name": "research_specialist",
309
- "instructions": "You are an expert researcher who synthesizes complex information clearly.",
310
- "specialization": "Research and knowledge synthesis",
311
- "tools": ["mcp"],
312
- },
313
- {
314
- "name": "customer_support",
315
- "instructions": "You provide friendly, helpful customer support responses.",
316
- "specialization": "Customer inquiries",
317
- }
318
- ],
319
- }
320
-
321
- solana_agent = SolanaAgent(config=config)
322
-
323
- async for response in solana_agent.process("user123", "What are the latest AI developments?"):
250
+ async for response in solana_agent.process("user123", "Write me a poem.", internet_search=False):
324
251
  print(response, end="")
325
252
  ```
326
253
 
327
- To create a plugin like Solana Agent Kit - read the [code](https://github.com/truemagic-coder/solana-agent-kit)
328
-
329
254
  ## Advanced
330
255
 
331
256
  ### Custom Inline Tools
@@ -410,6 +335,8 @@ async for response in solana_agent.process("user123", "What are the latest AI de
410
335
 
411
336
  ### Custom Prompt Injection at Runtime
412
337
 
338
+ Useful for Knowledge Base answers and FAQs
339
+
413
340
  ```python
414
341
  from solana_agent import SolanaAgent
415
342
 
@@ -1,22 +1,22 @@
1
1
  solana_agent/__init__.py,sha256=ceYeUpjIitpln8YK1r0JVJU8mzG6cRPYu-HLny3d-Tw,887
2
2
  solana_agent/adapters/__init__.py,sha256=tiEEuuy0NF3ngc_tGEcRTt71zVI58v3dYY9RvMrF2Cg,204
3
- solana_agent/adapters/llm_adapter.py,sha256=3_7whVsPSJdlzBVUBlV7RBRCCo2dMXNmlACIrCoQxQ4,5426
3
+ solana_agent/adapters/llm_adapter.py,sha256=PsSkMrsSqZzXAL3NcQ9Zz7UCtyJYU0USvZ7uTD_I8NI,5629
4
4
  solana_agent/adapters/mongodb_adapter.py,sha256=qqEFbY_v1XGyFXBmwd5HSXSSHnA9wWo-Hm1vGEyIG0k,2718
5
5
  solana_agent/client/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
- solana_agent/client/solana_agent.py,sha256=dmrSBHyH7czn6qvitqA9iiHwUiDHtvWyz2Ok6paoJdw,5171
6
+ solana_agent/client/solana_agent.py,sha256=8mu0OdLBQnZXKS2mFrWvvY_4bykzhT73oyIxs9tmJvY,5318
7
7
  solana_agent/domains/__init__.py,sha256=HiC94wVPRy-QDJSSRywCRrhrFfTBeHjfi5z-QfZv46U,168
8
8
  solana_agent/domains/agent.py,sha256=WTo-pEc66V6D_35cpDE-kTsw1SJM-dtylPZ7em5em7Q,2659
9
9
  solana_agent/domains/routing.py,sha256=UDlgTjUoC9xIBVYu_dnf9-KG_bBgdEXAv_UtDOrYo0w,650
10
10
  solana_agent/factories/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
- solana_agent/factories/agent_factory.py,sha256=c5TgZpDZ6m_q8F-Bv36Rl4dXq5yWNvAN2q9Nq_PtqqM,5547
11
+ solana_agent/factories/agent_factory.py,sha256=mJQb1G0-gebizZvSVHm4NAxRMB1kemm2w_BAcYlN15Y,5496
12
12
  solana_agent/interfaces/__init__.py,sha256=IQs1WIM1FeKP1-kY2FEfyhol_dB-I-VAe2rD6jrVF6k,355
13
- solana_agent/interfaces/client/client.py,sha256=TIfuO206gIm7nqBopcSCLl8oR_mPuLfN4EV-yywmU18,1520
13
+ solana_agent/interfaces/client/client.py,sha256=RbZWS_YaAyav56cODECMNkYL0MJwA_8bFeUreZp26qo,1697
14
14
  solana_agent/interfaces/plugins/plugins.py,sha256=T8HPBsekmzVwfU_Rizp-vtzAeYkMlKMYD7U9d0Wjq9c,3338
15
15
  solana_agent/interfaces/providers/data_storage.py,sha256=NqGeFvAzhz9rr-liLPRNCGjooB2EIhe-EVsMmX__b0M,1658
16
- solana_agent/interfaces/providers/llm.py,sha256=f58kDrvESBfIr2XoZJ-VVa8vL56qyuhkZaRnZ2mx8l4,1509
16
+ solana_agent/interfaces/providers/llm.py,sha256=Fy7_iTI8sez2NVeE9lDed4W5cXx95HRy0ctEpIqr5N0,1548
17
17
  solana_agent/interfaces/providers/memory.py,sha256=oNOH8WZXVW8assDigIWZAWiwkxbpDiKupxA2RB6tQvQ,1010
18
- solana_agent/interfaces/services/agent.py,sha256=34luGrUF5FNXLhF6JXwbfOSuo_SbMOmLMywG310sMDw,2082
19
- solana_agent/interfaces/services/query.py,sha256=m8Uc0uXT3apSOhX3N1QjLMPk1KdJhj7HDrJjWUpDPBc,1309
18
+ solana_agent/interfaces/services/agent.py,sha256=7HOGcvvHTxeK-dMlqw460yqKwGd72JokhDIr3kzroVg,2120
19
+ solana_agent/interfaces/services/query.py,sha256=PGW2w60R615og28Bw6sS1cCcBN_26KkkOsYDYclS1KQ,1347
20
20
  solana_agent/interfaces/services/routing.py,sha256=UzJC-z-Q9puTWPFGEo2_CAhIxuxP5IRnze7S66NSrsI,397
21
21
  solana_agent/plugins/__init__.py,sha256=coZdgJKq1ExOaj6qB810i3rEhbjdVlrkN76ozt_Ojgo,193
22
22
  solana_agent/plugins/manager.py,sha256=Il49hXeqvu0b02pURNNp7mY8kp9_sqpi_vJIWBW5Hc0,5044
@@ -24,12 +24,12 @@ solana_agent/plugins/registry.py,sha256=5S0DlUQKogsg1zLiRUIGMHEmGYHtOovU-S-5W1Mw
24
24
  solana_agent/plugins/tools/__init__.py,sha256=c0z7ij42gs94_VJrcn4Y8gUlTxMhsFNY6ahIsNswdLk,231
25
25
  solana_agent/plugins/tools/auto_tool.py,sha256=DgES_cZ6xKSf_HJpFINpvJxrjVlk5oeqa7pZRBsR9SM,1575
26
26
  solana_agent/repositories/__init__.py,sha256=fP83w83CGzXLnSdq-C5wbw9EhWTYtqE2lQTgp46-X_4,163
27
- solana_agent/repositories/memory.py,sha256=eecl1P0fr_xFSWFKIJg99q90oCS9--ihPrMLH3G2AzM,7136
27
+ solana_agent/repositories/memory.py,sha256=mrpmNSQ0D_eLebNY-cBqtecVVpIGXE7s9jCzOWEAuR4,6984
28
28
  solana_agent/services/__init__.py,sha256=ab_NXJmwYUCmCrCzuTlZ47bJZINW0Y0F5jfQ9OovidU,163
29
- solana_agent/services/agent.py,sha256=MdSPIC81JNuP2hfzXNGWOnRfe7OxwYHgDVZAphVCCo8,16450
30
- solana_agent/services/query.py,sha256=QduAeiltFTwNDlAbC_emu544U4XGNioj-OauRGt9HSY,11070
29
+ solana_agent/services/agent.py,sha256=uLZvMl8U40H-Lbxsf6PFR4SSfggjByBUoumvB1Afduo,18441
30
+ solana_agent/services/query.py,sha256=gUIMJaTcGUjn7TuwJHE-CHMjQIdcYUNoxqJ3duE-QUg,11278
31
31
  solana_agent/services/routing.py,sha256=PMCSG5m3uLMaHMj3dxNvNfcFZaeaDi7kMr7AEBCzwDE,6499
32
- solana_agent-21.0.0.dist-info/LICENSE,sha256=BnSRc-NSFuyF2s496l_4EyrwAP6YimvxWcjPiJ0J7g4,1057
33
- solana_agent-21.0.0.dist-info/METADATA,sha256=k-idmj9YmU_lhJXaViBwZ563tl68Cl7Rp0b5rGvLHXM,15170
34
- solana_agent-21.0.0.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
35
- solana_agent-21.0.0.dist-info/RECORD,,
32
+ solana_agent-22.0.0.dist-info/LICENSE,sha256=BnSRc-NSFuyF2s496l_4EyrwAP6YimvxWcjPiJ0J7g4,1057
33
+ solana_agent-22.0.0.dist-info/METADATA,sha256=sLsz9J8dpuTDP22Ach0YFD46qBzJMKEYD7T3iU1qVb8,13050
34
+ solana_agent-22.0.0.dist-info/WHEEL,sha256=fGIA9gx4Qxk2KDKeNJCbOEwSrmLtjWCwzBz351GyrPQ,88
35
+ solana_agent-22.0.0.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 2.1.1
2
+ Generator: poetry-core 2.1.2
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any