solana-agent 31.2.5__py3-none-any.whl → 31.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -7,6 +7,7 @@ from typing import Any, AsyncGenerator, Dict, Optional
7
7
  from solana_agent.interfaces.providers.realtime import (
8
8
  BaseRealtimeSession,
9
9
  RealtimeSessionOptions,
10
+ RealtimeChunk,
10
11
  )
11
12
  from solana_agent.interfaces.providers.audio import AudioTranscoder
12
13
 
@@ -95,11 +96,18 @@ class RealtimeService:
95
96
  }
96
97
 
97
98
  if output_mime or output_rate_hz is not None or voice is not None:
98
- audio_patch["output"] = {
99
- "format": "pcm16", # session is fixed to PCM16 server-side
100
- "voice": voice or self._options.voice,
101
- "speed": 1.0,
102
- }
99
+ # Only configure audio output if audio is in the output modalities
100
+ modalities = (
101
+ self._options.output_modalities
102
+ if self._options.output_modalities is not None
103
+ else ["audio"]
104
+ )
105
+ if "audio" in modalities:
106
+ audio_patch["output"] = {
107
+ "format": "pcm16", # session is fixed to PCM16 server-side
108
+ "voice": voice or self._options.voice,
109
+ "speed": 1.0,
110
+ }
103
111
 
104
112
  if audio_patch:
105
113
  patch["audio"] = audio_patch
@@ -173,6 +181,12 @@ class RealtimeService:
173
181
  await self._session.clear_input()
174
182
 
175
183
  # --- Out-of-band response (e.g., TTS without new audio) ---
184
+ async def create_conversation_item(
185
+ self, item: Dict[str, Any]
186
+ ) -> None: # pragma: no cover
187
+ """Create a conversation item (e.g., for text input)."""
188
+ await self._session.create_conversation_item(item)
189
+
176
190
  async def create_response( # pragma: no cover
177
191
  self, response_patch: Optional[Dict[str, Any]] = None
178
192
  ) -> None:
@@ -194,8 +208,8 @@ class RealtimeService:
194
208
 
195
209
  async def iter_output_audio_encoded(
196
210
  self,
197
- ) -> AsyncGenerator[bytes, None]: # pragma: no cover
198
- """Stream PCM16 audio, tolerating long tool executions by waiting while calls are pending.
211
+ ) -> AsyncGenerator[RealtimeChunk, None]: # pragma: no cover
212
+ """Stream PCM16 audio as RealtimeChunk objects, tolerating long tool executions by waiting while calls are pending.
199
213
 
200
214
  - If no audio arrives immediately, we keep waiting as long as a function/tool call is pending.
201
215
  - Bridge across multiple audio segments (e.g., pre-call and post-call responses).
@@ -261,10 +275,89 @@ class RealtimeService:
261
275
  async for out in self._transcoder.stream_from_pcm16(
262
276
  _produce_pcm(), self._client_output_mime, self._options.output_rate_hz
263
277
  ):
264
- yield out
278
+ yield RealtimeChunk(modality="audio", data=out)
265
279
  else:
266
280
  async for chunk in _produce_pcm():
267
- yield chunk
281
+ yield RealtimeChunk(modality="audio", data=chunk)
282
+
283
+ async def iter_output_combined(
284
+ self,
285
+ ) -> AsyncGenerator[RealtimeChunk, None]: # pragma: no cover
286
+ """Stream both audio and text chunks as RealtimeChunk objects.
287
+
288
+ This method combines audio and text streams when both modalities are enabled.
289
+ Audio chunks are yielded as they arrive, and text chunks are yielded as transcript deltas arrive.
290
+ """
291
+
292
+ # Determine which modalities to stream based on session options
293
+ modalities = (
294
+ self._options.output_modalities
295
+ if self._options.output_modalities is not None
296
+ else ["audio"]
297
+ )
298
+ should_stream_audio = "audio" in modalities
299
+ should_stream_text = "text" in modalities
300
+
301
+ if not should_stream_audio and not should_stream_text:
302
+ return # No modalities requested
303
+
304
+ # Create tasks for both streams if needed
305
+ tasks = []
306
+ queues = []
307
+
308
+ if should_stream_audio:
309
+ audio_queue = asyncio.Queue()
310
+ queues.append(audio_queue)
311
+
312
+ async def _collect_audio():
313
+ try:
314
+ async for chunk in self.iter_output_audio_encoded():
315
+ await audio_queue.put(chunk)
316
+ finally:
317
+ await audio_queue.put(None) # Sentinel
318
+
319
+ tasks.append(asyncio.create_task(_collect_audio()))
320
+
321
+ if should_stream_text:
322
+ text_queue = asyncio.Queue()
323
+ queues.append(text_queue)
324
+
325
+ async def _collect_text():
326
+ try:
327
+ async for text_chunk in self.iter_output_transcript():
328
+ if text_chunk: # Only yield non-empty text chunks
329
+ await text_queue.put(
330
+ RealtimeChunk(modality="text", data=text_chunk)
331
+ )
332
+ finally:
333
+ await text_queue.put(None) # Sentinel
334
+
335
+ tasks.append(asyncio.create_task(_collect_text()))
336
+
337
+ try:
338
+ # Collect chunks from all queues
339
+ active_queues = len(queues)
340
+
341
+ while active_queues > 0:
342
+ for queue in queues:
343
+ try:
344
+ chunk = queue.get_nowait()
345
+ if chunk is None:
346
+ active_queues -= 1
347
+ else:
348
+ yield chunk
349
+ except asyncio.QueueEmpty:
350
+ continue
351
+
352
+ # Small delay to prevent busy waiting
353
+ if active_queues > 0:
354
+ await asyncio.sleep(0.01)
355
+
356
+ finally:
357
+ # Cancel all tasks
358
+ for task in tasks:
359
+ if not task.done():
360
+ task.cancel()
268
361
 
269
362
  def iter_input_transcript(self) -> AsyncGenerator[str, None]: # pragma: no cover
270
363
  return self._session.iter_input_transcript()
@@ -368,11 +461,18 @@ class TwinRealtimeService:
368
461
  turn_detection = None
369
462
  audio_patch["input"] = {"format": "pcm16", "turn_detection": turn_detection}
370
463
  if output_rate_hz is not None or output_mime is not None or voice is not None:
371
- audio_patch["output"] = {
372
- "format": "pcm16",
373
- "voice": voice or self._conv_opts.voice,
374
- "speed": 1.0,
375
- }
464
+ # Only configure audio output if audio is in the output modalities
465
+ modalities = (
466
+ self._conv_opts.output_modalities
467
+ if self._conv_opts.output_modalities is not None
468
+ else ["audio"]
469
+ )
470
+ if "audio" in modalities:
471
+ audio_patch["output"] = {
472
+ "format": "pcm16",
473
+ "voice": voice or self._conv_opts.voice,
474
+ "speed": 1.0,
475
+ }
376
476
  if audio_patch:
377
477
  patch["audio"] = audio_patch
378
478
  if instructions is not None:
@@ -440,6 +540,12 @@ class TwinRealtimeService:
440
540
  async def clear_input(self) -> None: # pragma: no cover
441
541
  await asyncio.gather(self._conv.clear_input(), self._trans.clear_input())
442
542
 
543
+ async def create_conversation_item(
544
+ self, item: Dict[str, Any]
545
+ ) -> None: # pragma: no cover
546
+ """Create a conversation item (e.g., for text input)."""
547
+ await self._conv.create_conversation_item(item)
548
+
443
549
  async def create_response(
444
550
  self, response_patch: Optional[Dict[str, Any]] = None
445
551
  ) -> None: # pragma: no cover
@@ -463,7 +569,7 @@ class TwinRealtimeService:
463
569
 
464
570
  async def iter_output_audio_encoded(
465
571
  self,
466
- ) -> AsyncGenerator[bytes, None]: # pragma: no cover
572
+ ) -> AsyncGenerator[RealtimeChunk, None]: # pragma: no cover
467
573
  # Reuse the same encoding pipeline as RealtimeService but source from conversation
468
574
  pcm_gen = self._conv.iter_output_audio()
469
575
 
@@ -494,10 +600,10 @@ class TwinRealtimeService:
494
600
  async for out in self._transcoder.stream_from_pcm16(
495
601
  _pcm_iter(), self._client_output_mime, self._conv_opts.output_rate_hz
496
602
  ):
497
- yield out
603
+ yield RealtimeChunk(modality="audio", data=out)
498
604
  else:
499
605
  async for chunk in _pcm_iter():
500
- yield chunk
606
+ yield RealtimeChunk(modality="audio", data=chunk)
501
607
 
502
608
  def iter_input_transcript(self) -> AsyncGenerator[str, None]: # pragma: no cover
503
609
  return self._trans.iter_input_transcript()
@@ -1,11 +1,3 @@
1
- """
2
- Routing service implementation.
3
-
4
- This service manages query routing to appropriate agents using an LLM-based
5
- analysis. It defaults to a small, low-cost model for routing to minimize
6
- overhead while maintaining quality.
7
- """
8
-
9
1
  import logging
10
2
  from typing import Dict, List, Optional, Any
11
3
  from solana_agent.interfaces.services.routing import (
@@ -81,17 +73,18 @@ class RoutingService(RoutingServiceInterface):
81
73
 
82
74
  USER QUERY: {query}
83
75
 
76
+ ROUTING RULES:
77
+ - Match the user query to the agent whose specialization best fits the user's intent
78
+ - Return the EXACT agent name that matches best
79
+
84
80
  INSTRUCTIONS:
85
- - Look at the user query and match it to the most appropriate agent from the list above
86
- - If the user mentions a specific topic or need that matches an agent's specialization, choose that agent
87
- - Return the EXACT agent name (not the specialization description)
88
-
89
- Please determine:
90
- 1. primary_agent: The exact name of the best matching agent (e.g., "onboarding", "support")
91
- 2. secondary_agents: Names of other agents that might help (empty list if none)
92
- 3. complexity_level: 1-5 (5 being most complex)
93
- 4. topics: Key topics mentioned
94
- 5. confidence: 0.0-1.0 (how confident you are in this routing decision)
81
+ - primary_agent: The exact name of the best matching agent (e.g., "onboarding", "event_feedback")
82
+ - secondary_agents: Other agents that might help (usually empty)
83
+ - complexity_level: 1-5 (5 being most complex)
84
+ - topics: Key topics mentioned
85
+ - confidence: 0.0-1.0 (how confident you are in this routing decision)
86
+
87
+ For the query "{query}", which agent should handle it?
95
88
  """
96
89
 
97
90
  try:
@@ -114,12 +107,14 @@ class RoutingService(RoutingServiceInterface):
114
107
  "confidence": analysis.confidence,
115
108
  }
116
109
  except Exception as e:
117
- logger.error(f"Error analyzing query: {e}") # Use logger.error
110
+ logger.error(f"Error analyzing query: {e}")
111
+ logger.debug(f"Query that failed: {query}")
112
+ logger.debug(f"Available agents: {list(agents.keys())}")
118
113
  # Return default analysis on error
114
+ first_agent = list(agents.keys())[0] if agents else "general"
115
+ logger.debug(f"Defaulting to first agent: {first_agent}")
119
116
  return {
120
- "primary_specialization": list(agents.keys())[0]
121
- if agents
122
- else "general",
117
+ "primary_specialization": first_agent,
123
118
  "secondary_specializations": [],
124
119
  "complexity_level": 1,
125
120
  "topics": [],
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: solana-agent
3
- Version: 31.2.5
3
+ Version: 31.3.0
4
4
  Summary: AI Agents for Solana
5
5
  License: MIT
6
6
  Keywords: solana,solana ai,solana agent,ai,ai agent,ai agents
@@ -15,7 +15,7 @@ Classifier: Programming Language :: Python :: 3.12
15
15
  Classifier: Programming Language :: Python :: 3.13
16
16
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
17
17
  Requires-Dist: instructor (==1.11.3)
18
- Requires-Dist: llama-index-core (==0.14.0)
18
+ Requires-Dist: llama-index-core (==0.14.1)
19
19
  Requires-Dist: llama-index-embeddings-openai (==0.5.1)
20
20
  Requires-Dist: logfire (==4.7.0)
21
21
  Requires-Dist: openai (==1.107.2)
@@ -98,6 +98,7 @@ Smart workflows are as easy as combining your tools and prompts.
98
98
  * Simple agent definition using JSON
99
99
  * Designed for a multi-agent swarm
100
100
  * Fast multi-modal processing of text, audio, and images
101
+ * Dual modality realtime streaming with simultaneous audio and text output
101
102
  * Smart workflows that keep flows simple and smart
102
103
  * Interact with the Solana blockchain with many useful tools
103
104
  * MCP tool usage with first-class support for [Zapier](https://zapier.com/mcp)
@@ -132,7 +133,7 @@ Smart workflows are as easy as combining your tools and prompts.
132
133
  **OpenAI**
133
134
  * [gpt-4.1](https://platform.openai.com/docs/models/gpt-4.1) (agent & router)
134
135
  * [text-embedding-3-large](https://platform.openai.com/docs/models/text-embedding-3-large) (embedding)
135
- * [gpt-realtime](https://platform.openai.com/docs/models/gpt-realtime) (realtime audio agent)
136
+ * [gpt-realtime](https://platform.openai.com/docs/models/gpt-realtime) (realtime audio agent with dual modality support)
136
137
  * [tts-1](https://platform.openai.com/docs/models/tts-1) (audio TTS)
137
138
  * [gpt-4o-mini-transcribe](https://platform.openai.com/docs/models/gpt-4o-mini-transcribe) (audio transcription)
138
139
 
@@ -281,6 +282,7 @@ async for response in solana_agent.process("user123", "What is the latest news o
281
282
  ### Audio/Text Streaming
282
283
 
283
284
  ```python
285
+ ## Realtime Usage
284
286
  from solana_agent import SolanaAgent
285
287
 
286
288
  config = {
@@ -311,28 +313,32 @@ async for response in solana_agent.process("user123", audio_content, audio_input
311
313
 
312
314
  ### Realtime Audio Streaming
313
315
 
314
- If input and/or output is encoded (compressed) like mp4/aac then you must have `ffmpeg` installed.
316
+ If input and/or output is encoded (compressed) like mp4/mp3 then you must have `ffmpeg` installed.
315
317
 
316
318
  Due to the overhead of the router (API call) - realtime only supports a single agent setup.
317
319
 
318
320
  Realtime uses MongoDB for memory so Zep is not needed.
319
321
 
322
+ By default, when `realtime=True` and you supply raw/encoded audio bytes as input, the system **always skips the HTTP transcription (STT) path** and relies solely on the realtime websocket session for input transcription. If you don't specify `rt_transcription_model`, a sensible default (`gpt-4o-mini-transcribe`) is auto-selected so you still receive input transcript events with minimal latency.
323
+
324
+ Implications:
325
+ - `llm_provider.transcribe_audio` is never invoked for realtime turns.
326
+ - Lower end-to-end latency (no duplicate network round trip for STT).
327
+ - Unified transcript sourcing from realtime events.
328
+ - If you explicitly want to disable transcription altogether, send text (not audio bytes) or ignore transcript events client-side.
329
+
320
330
  This example will work using expo-audio on Android and iOS.
321
331
 
322
332
  ```python
323
333
  from solana_agent import SolanaAgent
324
334
 
325
335
  solana_agent = SolanaAgent(config=config)
326
-
327
- audio_content = await audio_file.read()
328
-
329
- async def generate():
330
- async for chunk in solana_agent.process(
331
- user_id=user_id,
336
+ user_id="user123",
332
337
  message=audio_content,
333
338
  realtime=True,
334
339
  rt_encode_input=True,
335
340
  rt_encode_output=True,
341
+ rt_output_modalities=["audio"],
336
342
  rt_voice="marin",
337
343
  output_format="audio",
338
344
  audio_output_format="mp3",
@@ -350,6 +356,106 @@ return StreamingResponse(
350
356
  "X-Accel-Buffering": "no",
351
357
  },
352
358
  )
359
+ ```
360
+
361
+ ### Realtime Text Streaming
362
+
363
+ Due to the overhead of the router (API call) - realtime only supports a single agent setup.
364
+
365
+ Realtime uses MongoDB for memory so Zep is not needed.
366
+
367
+ When using realtime with text input, no audio transcription is needed. The same bypass rules apply—HTTP STT is never called in realtime mode.
368
+
369
+ ```python
370
+ from solana_agent import SolanaAgent
371
+
372
+ solana_agent = SolanaAgent(config=config)
373
+
374
+ async def generate():
375
+ async for chunk in solana_agent.process(
376
+ user_id="user123",
377
+ message="What is the latest news on Solana?",
378
+ realtime=True,
379
+ rt_output_modalities=["text"],
380
+ ):
381
+ yield chunk
382
+ ```
383
+
384
+ ### Dual Modality Realtime Streaming
385
+
386
+ Solana Agent supports **dual modality realtime streaming**, allowing you to stream both audio and text simultaneously from a single realtime session. This enables rich conversational experiences where users can receive both voice responses and text transcripts in real-time.
387
+
388
+ #### Features
389
+ - **Simultaneous Audio & Text**: Stream both modalities from the same conversation
390
+ - **Flexible Output**: Choose audio-only, text-only, or both modalities
391
+ - **Real-time Demuxing**: Automatically separate audio and text streams
392
+ - **Mobile Optimized**: Works seamlessly with compressed audio formats (MP4/AAC)
393
+ - **Memory Efficient**: Smart buffering and streaming for optimal performance
394
+
395
+ #### Mobile App Integration Example
396
+
397
+ ```python
398
+ from fastapi import UploadFile
399
+ from fastapi.responses import StreamingResponse
400
+ from solana_agent import SolanaAgent
401
+ from solana_agent.interfaces.providers.realtime import RealtimeChunk
402
+ import base64
403
+
404
+ solana_agent = SolanaAgent(config=config)
405
+
406
+ @app.post("/realtime/dual")
407
+ async def realtime_dual_endpoint(audio_file: UploadFile):
408
+ """
409
+ Dual modality (audio + text) realtime endpoint using Server-Sent Events (SSE).
410
+ Emits:
411
+ event: audio (base64 encoded audio frames)
412
+ event: transcript (incremental text)
413
+ Notes:
414
+ - Do NOT set output_format when using both modalities.
415
+ - If only one modality is requested, plain str (text) or raw audio bytes may be yielded instead of RealtimeChunk.
416
+ """
417
+ audio_content = await audio_file.read()
418
+
419
+ async def event_stream():
420
+ async for chunk in solana_agent.process(
421
+ user_id="mobile_user",
422
+ message=audio_content,
423
+ realtime=True,
424
+ rt_encode_input=True,
425
+ rt_encode_output=True,
426
+ rt_output_modalities=["audio", "text"],
427
+ rt_voice="marin",
428
+ audio_input_format="mp4",
429
+ audio_output_format="mp3",
430
+ # Optionally lock transcription model (otherwise default is auto-selected):
431
+ # rt_transcription_model="gpt-4o-mini-transcribe",
432
+ ):
433
+ if isinstance(chunk, RealtimeChunk):
434
+ if chunk.is_audio and chunk.audio_data:
435
+ b64 = base64.b64encode(chunk.audio_data).decode("ascii")
436
+ yield f"event: audio\ndata: {b64}\n\n"
437
+ elif chunk.is_text and chunk.text_data:
438
+ # Incremental transcript (not duplicated at finalize)
439
+ yield f"event: transcript\ndata: {chunk.text_data}\n\n"
440
+ continue
441
+ # (Defensive) fallback: if something else appears
442
+ if isinstance(chunk, bytes):
443
+ b64 = base64.b64encode(chunk).decode("ascii")
444
+ yield f"event: audio\ndata: {b64}\n\n"
445
+ elif isinstance(chunk, str):
446
+ yield f"event: transcript\ndata: {chunk}\n\n"
447
+
448
+ yield "event: done\ndata: end\n\n"
449
+
450
+ return StreamingResponse(
451
+ event_stream(),
452
+ media_type="text/event-stream",
453
+ headers={
454
+ "Cache-Control": "no-store",
455
+ "Access-Control-Allow-Origin": "*",
456
+ },
457
+ )
458
+ ```
353
459
 
354
460
  ### Image/Text Streaming
355
461
 
@@ -3,30 +3,31 @@ solana_agent/adapters/__init__.py,sha256=tiEEuuy0NF3ngc_tGEcRTt71zVI58v3dYY9RvMr
3
3
  solana_agent/adapters/ffmpeg_transcoder.py,sha256=d2T6hDBZe_beLkZTJiWSKeeOB47U12-dF4o4PXehnKU,12166
4
4
  solana_agent/adapters/mongodb_adapter.py,sha256=Hq3S8VzfLmnPjV40z8yJXGqUamOJcX5GbOMd-1nNWO4,3175
5
5
  solana_agent/adapters/openai_adapter.py,sha256=U3x6fMRmdvfvNt7M9-RKzV835WtPxNGrV1VRBMiRHV8,26714
6
- solana_agent/adapters/openai_realtime_ws.py,sha256=ytNanCCkewLlg6Ct37p9-8PFPV7uLPAxuvMlJu1f0j8,77633
6
+ solana_agent/adapters/openai_realtime_ws.py,sha256=z9iJOmAw9skKqrO9d4fvCYl7ZPR-KVBhlYclBrMqQxI,84343
7
7
  solana_agent/adapters/pinecone_adapter.py,sha256=XlfOpoKHwzpaU4KZnovO2TnEYbsw-3B53ZKQDtBeDgU,23847
8
8
  solana_agent/cli.py,sha256=FGvTIQmKLp6XsQdyKtuhIIfbBtMmcCCXfigNrj4bzMc,4704
9
9
  solana_agent/client/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
- solana_agent/client/solana_agent.py,sha256=hLtiR3xD1eFww7XRdg4dTvxlJnTCepilYmEfABn9L7E,10344
10
+ solana_agent/client/solana_agent.py,sha256=MXqoIS5oD9tSdMj76Dqh0__YUqlYoqgkScDE7iYQkjM,10666
11
11
  solana_agent/domains/__init__.py,sha256=HiC94wVPRy-QDJSSRywCRrhrFfTBeHjfi5z-QfZv46U,168
12
12
  solana_agent/domains/agent.py,sha256=8pAi1-kIgzFNANt3dyQjw-1zbThcNdpEllbAGWi79uI,2841
13
- solana_agent/domains/routing.py,sha256=QBlNLzhme3zUDePKHOKycR-9LW6bu-QCM5_6L_EbGpg,727
13
+ solana_agent/domains/routing.py,sha256=rb7YyeH4CJmjtuoBMk7f8kUFddcT5tUBZ9Bj8mJ4VV8,845
14
14
  solana_agent/factories/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
15
15
  solana_agent/factories/agent_factory.py,sha256=d9VuD5E9khqVXU_Qu67zKU2yVvXHK2EmercDmSZ4stk,14226
16
16
  solana_agent/guardrails/pii.py,sha256=FCz1IC3mmkr41QFFf5NaC0fwJrVkwFsxgyOCS2POO5I,4428
17
17
  solana_agent/interfaces/__init__.py,sha256=IQs1WIM1FeKP1-kY2FEfyhol_dB-I-VAe2rD6jrVF6k,355
18
- solana_agent/interfaces/client/client.py,sha256=VWMoxCflhxjwgmaCqDlU5Z9xSWgq0lrHa7ANagfsGVg,3660
18
+ solana_agent/interfaces/client/client.py,sha256=XAp3BmY4N-RlYJHJOfGJl8PBEk9fiy0RsEDmmP7LLRk,3823
19
19
  solana_agent/interfaces/guardrails/guardrails.py,sha256=gZCQ1FrirW-mX6s7FoYrbRs6golsp-x269kk4kQiZzc,572
20
20
  solana_agent/interfaces/plugins/plugins.py,sha256=Rz52cWBLdotwf4kV-2mC79tRYlN29zHSu1z9-y1HVPk,3329
21
+ solana_agent/interfaces/providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
21
22
  solana_agent/interfaces/providers/audio.py,sha256=CescIuGBEUQZ4XRyxb_1VYrO9x3Q80ilp-sxpYpxAyQ,1213
22
23
  solana_agent/interfaces/providers/data_storage.py,sha256=Y92Cq8BtC55VlsYLD7bo3ofqQabNnlg7Q4H1Q6CDsLU,1713
23
24
  solana_agent/interfaces/providers/llm.py,sha256=nerYO7QcbdSY44_YFqf_f4lftL0HbwC_G_er6oW80tw,3484
24
25
  solana_agent/interfaces/providers/memory.py,sha256=28X1LeS-bEac4yoIXdRPyuRU91oW9Kdt2NZtDmwSTxM,1360
25
- solana_agent/interfaces/providers/realtime.py,sha256=P0xKgMOWa0Zrp46g_Z9dzbWhjGdzRgnDeman-bc1xyQ,3089
26
+ solana_agent/interfaces/providers/realtime.py,sha256=gbNKuTyfB5L1zbKhxAJaD8QiSaRr__0Xe0mvgrwpxkk,6557
26
27
  solana_agent/interfaces/providers/vector_storage.py,sha256=XPYzvoWrlDVFCS9ItBmoqCFWXXWNYY-d9I7_pvP7YYk,1561
27
28
  solana_agent/interfaces/services/agent.py,sha256=Hz3ldNb0NDMp8Rm9E3GM0L3kMAO3XLJ6U2HAh6gdPeU,2176
28
29
  solana_agent/interfaces/services/knowledge_base.py,sha256=Mu8lCGFXPmI_IW5LRGti7octLoWZIg4k5PmGwPfe7LQ,1479
29
- solana_agent/interfaces/services/query.py,sha256=jk-k6UeBFfWyZdPUr9imYLmlikTAuHegP0oWg2_ioew,2014
30
+ solana_agent/interfaces/services/query.py,sha256=M_sq9TZl3tzXUe6Yz3RcCqinJStVGbmHsAOnIqgzUaM,2177
30
31
  solana_agent/interfaces/services/routing.py,sha256=Qbn3-DQGVSQKaegHDekSFmn_XCklA0H2f0XUx9-o3wA,367
31
32
  solana_agent/plugins/__init__.py,sha256=coZdgJKq1ExOaj6qB810i3rEhbjdVlrkN76ozt_Ojgo,193
32
33
  solana_agent/plugins/manager.py,sha256=mO_dKSVJ8GToD3wZflMcpKDEBXRoaaMRtY267HENCI0,5542
@@ -38,11 +39,11 @@ solana_agent/repositories/memory.py,sha256=cipt9eC5YApi8ozFXAV5xq7QxQJExJmVdgGjk
38
39
  solana_agent/services/__init__.py,sha256=iko0c2MlF8b_SA_nuBGFllr2E3g_JowOrOzGcnU9tkA,162
39
40
  solana_agent/services/agent.py,sha256=LWjsdmCeygwmjFoazOCVhrb0hdZHQDEQo_DFWZe57Lk,23133
40
41
  solana_agent/services/knowledge_base.py,sha256=ZvOPrSmcNDgUzz4bJIQ4LeRl9vMZiK9hOfs71IpB7Bk,32735
41
- solana_agent/services/query.py,sha256=oqNFbQsz2FiSswGkt8ZlNaOR8DAz66hgWXD5kHc7c-M,71428
42
- solana_agent/services/realtime.py,sha256=6_44-JaKN0V4gkizaisGLPsopM5Z8xymQcCbq5V3yEc,21054
43
- solana_agent/services/routing.py,sha256=V0tO5X7F8DMWl9TpDuRomqWM0fSihmpD4-FS34ckvw8,8735
44
- solana_agent-31.2.5.dist-info/LICENSE,sha256=BnSRc-NSFuyF2s496l_4EyrwAP6YimvxWcjPiJ0J7g4,1057
45
- solana_agent-31.2.5.dist-info/METADATA,sha256=U_uyayD3YB5uRkHX8yCXucKsSB4C_gh2L6jG3KrP5xw,31168
46
- solana_agent-31.2.5.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
47
- solana_agent-31.2.5.dist-info/entry_points.txt,sha256=-AuT_mfqk8dlZ0pHuAjx1ouAWpTRjpqvEUa6YV3lmc0,53
48
- solana_agent-31.2.5.dist-info/RECORD,,
42
+ solana_agent/services/query.py,sha256=VDiqSmq_JG-0WgJ0vtVex6_Gsf6y3BOtY0WPAFk9HkI,90619
43
+ solana_agent/services/realtime.py,sha256=knjExZZd-a5_gvks9p2Sje9QKcAeZ_1oaLiREKFmsc0,25168
44
+ solana_agent/services/routing.py,sha256=FXVeOwcJiZ77JEcr2Xbd_tJaWCTqcct7KJmwKtyl-io,8602
45
+ solana_agent-31.3.0.dist-info/LICENSE,sha256=BnSRc-NSFuyF2s496l_4EyrwAP6YimvxWcjPiJ0J7g4,1057
46
+ solana_agent-31.3.0.dist-info/METADATA,sha256=4vjIh4IH3UszJaJQMqmHDH0kCJTDQOcfg-qNVDl-BP4,35784
47
+ solana_agent-31.3.0.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
48
+ solana_agent-31.3.0.dist-info/entry_points.txt,sha256=-AuT_mfqk8dlZ0pHuAjx1ouAWpTRjpqvEUa6YV3lmc0,53
49
+ solana_agent-31.3.0.dist-info/RECORD,,