solana-agent 17.0.0__py3-none-any.whl → 17.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -19,9 +19,9 @@ class OpenAIAdapter(LLMProvider):
19
19
  def __init__(self, api_key: str):
20
20
  self.client = OpenAI(api_key=api_key)
21
21
  self.parse_model = "gpt-4o-mini"
22
- self.search_model = "gpt-4o-mini-search-preview"
22
+ self.text_model = "gpt-4o-mini"
23
23
  self.transcription_model = "gpt-4o-mini-transcribe"
24
- self.tts_model = "gpt-4o-mini-tts"
24
+ self.tts_model = "tts-1"
25
25
 
26
26
  async def tts(
27
27
  self,
@@ -48,7 +48,6 @@ class OpenAIAdapter(LLMProvider):
48
48
  model=self.tts_model,
49
49
  voice=voice,
50
50
  input=text,
51
- instructions=instructions,
52
51
  response_format=response_format
53
52
  )
54
53
 
@@ -107,7 +106,7 @@ class OpenAIAdapter(LLMProvider):
107
106
  prompt: str,
108
107
  system_prompt: str = "",
109
108
  ) -> AsyncGenerator[str, None]: # pragma: no cover
110
- """Generate text from OpenAI models with web search capability."""
109
+ """Generate text from OpenAI models."""
111
110
  messages = []
112
111
 
113
112
  if system_prompt:
@@ -119,7 +118,7 @@ class OpenAIAdapter(LLMProvider):
119
118
  request_params = {
120
119
  "messages": messages,
121
120
  "stream": True,
122
- "model": self.search_model,
121
+ "model": self.text_model,
123
122
  }
124
123
  try:
125
124
  response = self.client.chat.completions.create(**request_params)
@@ -247,7 +247,8 @@ class AgentService(AgentServiceInterface):
247
247
  if memory_context:
248
248
  system_prompt += f"\n\nMemory Context: {memory_context}"
249
249
 
250
- # Buffer for collecting text when generating audio
250
+ json_buffer = ""
251
+ is_json = False
251
252
  text_buffer = ""
252
253
 
253
254
  # Generate and stream response
@@ -255,40 +256,93 @@ class AgentService(AgentServiceInterface):
255
256
  prompt=query_text,
256
257
  system_prompt=system_prompt,
257
258
  ):
258
- if chunk.strip().startswith("{"):
259
- # Handle tool calls
260
- result = await self._handle_tool_call(
261
- agent_name, chunk,
262
- )
263
- if output_format == "audio":
264
- async for audio_chunk in self.llm_provider.tts(result, instructions=audio_instructions, response_format=audio_output_format, voice=audio_voice):
265
- yield audio_chunk
266
- else:
267
- yield result
259
+ # Check for JSON start
260
+ if chunk.strip().startswith("{") and not is_json:
261
+ is_json = True
262
+ json_buffer = chunk
263
+ continue
264
+
265
+ # Collect JSON or handle normal text
266
+ if is_json:
267
+ json_buffer += chunk
268
+ try:
269
+ # Try to parse complete JSON
270
+ data = json.loads(json_buffer)
271
+
272
+ # Valid JSON found, handle it
273
+ if "tool_call" in data:
274
+ # Process tool call with existing method
275
+ response_text = await self._handle_tool_call(
276
+ agent_name=agent_name,
277
+ json_chunk=json_buffer
278
+ )
279
+
280
+ # Output response based on format
281
+ if output_format == "audio":
282
+ async for audio_chunk in self.llm_provider.tts(
283
+ text=response_text,
284
+ voice=audio_voice,
285
+ response_format=audio_output_format
286
+ ):
287
+ yield audio_chunk
288
+ else:
289
+ yield response_text
290
+ else:
291
+ # Not a tool call, return as normal text
292
+ if output_format == "audio":
293
+ async for audio_chunk in self.llm_provider.tts(
294
+ text=json_buffer,
295
+ voice=audio_voice,
296
+ response_format=audio_output_format
297
+ ):
298
+ yield audio_chunk
299
+ else:
300
+ yield json_buffer
301
+
302
+ # Reset JSON handling
303
+ is_json = False
304
+ json_buffer = ""
305
+
306
+ except json.JSONDecodeError:
307
+ # Incomplete JSON, continue collecting
308
+ pass
268
309
  else:
310
+ # Regular text processing
269
311
  if output_format == "audio":
270
- # Buffer text until we have a complete sentence
271
312
  text_buffer += chunk
272
313
  if any(punct in chunk for punct in ".!?"):
273
314
  async for audio_chunk in self.llm_provider.tts(
274
- text_buffer, instructions=audio_instructions, response_format=audio_output_format, voice=audio_voice
315
+ text=text_buffer,
316
+ voice=audio_voice,
317
+ response_format=audio_output_format
275
318
  ):
276
319
  yield audio_chunk
277
320
  text_buffer = ""
278
321
  else:
279
322
  yield chunk
280
323
 
281
- # Handle any remaining text in buffer
282
- if output_format == "audio" and text_buffer:
324
+ # Handle any remaining text or incomplete JSON
325
+ remaining_text = ""
326
+ if text_buffer:
327
+ remaining_text += text_buffer
328
+ if is_json and json_buffer:
329
+ # If we have incomplete JSON at the end, yield it as text
330
+ remaining_text += json_buffer
331
+
332
+ if remaining_text and output_format == "audio":
283
333
  async for audio_chunk in self.llm_provider.tts(
284
- text_buffer, instructions=audio_instructions, response_format=audio_output_format, voice=audio_voice
334
+ text=remaining_text,
335
+ voice=audio_voice,
336
+ response_format=audio_output_format
285
337
  ):
286
338
  yield audio_chunk
339
+ elif remaining_text:
340
+ yield remaining_text
287
341
 
288
342
  except Exception as e:
289
343
  error_msg = f"I apologize, but I encountered an error: {str(e)}"
290
344
  if output_format == "audio":
291
- async for chunk in self.llm_provider.tts(error_msg, instructions=audio_instructions, response_format=audio_output_format, voice=audio_voice):
345
+ async for chunk in self.llm_provider.tts(error_msg, voice=audio_voice, response_format=audio_output_format):
292
346
  yield chunk
293
347
  else:
294
348
  yield error_msg
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: solana-agent
3
- Version: 17.0.0
3
+ Version: 17.0.2
4
4
  Summary: The Future of Work
5
5
  License: MIT
6
6
  Keywords: ai,openai,ai agents,agi
@@ -37,7 +37,6 @@ Description-Content-Type: text/markdown
37
37
  * Multi-modal input-output streaming with text or audio by AI Agents
38
38
  * Conversational memory per user shared by all AI Agents
39
39
  * Routing based on AI Agent specializations
40
- * Built-in Internet Search for all AI Agents
41
40
  * Organizational mission, values, goals, and guidance for all AI Agents
42
41
  * Robust AI Agent tool plugins based on standard python packages
43
42
 
@@ -109,11 +108,11 @@ async for response in solana_agent.process("user123", "What are the latest AI de
109
108
  print(response, end="")
110
109
  ```
111
110
 
112
- ## LLMs Used
113
- * The model used for AI Agents is `gpt-4o-mini-search-preview`
111
+ ## Models Used
112
+ * The model used for AI Agents is `gpt-4o-mini`
114
113
  * The model used for internal structured outputs is `gpt-4o-mini`
115
114
  * The model used for audio_transcription is `gpt-4o-mini-transcribe`
116
- * The model used for tts is `gpt-4o-mini-tts`
115
+ * The model used for tts is `tts-1`
117
116
 
118
117
  ## Solana Agent Kit
119
118
 
@@ -1,6 +1,6 @@
1
1
  solana_agent/__init__.py,sha256=ceYeUpjIitpln8YK1r0JVJU8mzG6cRPYu-HLny3d-Tw,887
2
2
  solana_agent/adapters/__init__.py,sha256=tiEEuuy0NF3ngc_tGEcRTt71zVI58v3dYY9RvMrF2Cg,204
3
- solana_agent/adapters/llm_adapter.py,sha256=9pnQr_386LK2sX38-BLWFjFqoYMxlmVZ6t-nByrcqN8,6133
3
+ solana_agent/adapters/llm_adapter.py,sha256=hi2JYj6CvhNLJxznIv_7Ef7Y0mk7aztHd_OMYe2scMQ,6034
4
4
  solana_agent/adapters/mongodb_adapter.py,sha256=qqEFbY_v1XGyFXBmwd5HSXSSHnA9wWo-Hm1vGEyIG0k,2718
5
5
  solana_agent/client/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
6
  solana_agent/client/solana_agent.py,sha256=G0m6GD6N8J3tpjfLoKBMzIR3cZU9qs9cDBfY1mngvzI,4207
@@ -28,10 +28,10 @@ solana_agent/repositories/__init__.py,sha256=fP83w83CGzXLnSdq-C5wbw9EhWTYtqE2lQT
28
28
  solana_agent/repositories/agent.py,sha256=e1rnsQiigkKwJNLKro86a3b6TBiky3GMfmCRc5b_jPw,3187
29
29
  solana_agent/repositories/memory.py,sha256=GABGwaz00thjviHewLvb18NeKE8dkBROxy_stsiiWrE,4722
30
30
  solana_agent/services/__init__.py,sha256=ab_NXJmwYUCmCrCzuTlZ47bJZINW0Y0F5jfQ9OovidU,163
31
- solana_agent/services/agent.py,sha256=5BCzy8h37HAzvxiuOanu-RM_ARxwIdOUNie8xPLGDj0,14219
31
+ solana_agent/services/agent.py,sha256=8pRl3_9SDtv3sN9TH91SqxARRALu_QDXkM2eD3fcUM0,16282
32
32
  solana_agent/services/query.py,sha256=N0RMcQm7o4B0MGrbFFrZ_Ar5z9r-UUDgn1xzgoxQtqg,10758
33
33
  solana_agent/services/routing.py,sha256=TPJ2Pas4acE93QzMEV6ZP670OtTNrVEPa76fz6urEV4,4996
34
- solana_agent-17.0.0.dist-info/LICENSE,sha256=BnSRc-NSFuyF2s496l_4EyrwAP6YimvxWcjPiJ0J7g4,1057
35
- solana_agent-17.0.0.dist-info/METADATA,sha256=3zkN6kVcgK6ECfnmM5BBB0Oh7-jl4Tknz54TpKEIKfk,4956
36
- solana_agent-17.0.0.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
37
- solana_agent-17.0.0.dist-info/RECORD,,
34
+ solana_agent-17.0.2.dist-info/LICENSE,sha256=BnSRc-NSFuyF2s496l_4EyrwAP6YimvxWcjPiJ0J7g4,1057
35
+ solana_agent-17.0.2.dist-info/METADATA,sha256=8uenEIcctj_hWi4_sle2mOJxoUsQSGi1RmYwQhqrgNs,4888
36
+ solana_agent-17.0.2.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
37
+ solana_agent-17.0.2.dist-info/RECORD,,