atom-audio-engine 0.1.4__py3-none-any.whl → 0.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. {atom_audio_engine-0.1.4.dist-info → atom_audio_engine-0.1.6.dist-info}/METADATA +1 -1
  2. atom_audio_engine-0.1.6.dist-info/RECORD +32 -0
  3. audio_engine/__init__.py +6 -2
  4. audio_engine/asr/__init__.py +48 -0
  5. audio_engine/asr/base.py +89 -0
  6. audio_engine/asr/cartesia.py +350 -0
  7. audio_engine/asr/deepgram.py +196 -0
  8. audio_engine/core/__init__.py +13 -0
  9. audio_engine/core/config.py +162 -0
  10. audio_engine/core/pipeline.py +278 -0
  11. audio_engine/core/types.py +87 -0
  12. audio_engine/integrations/__init__.py +5 -0
  13. audio_engine/integrations/geneface.py +297 -0
  14. audio_engine/llm/__init__.py +40 -0
  15. audio_engine/llm/base.py +106 -0
  16. audio_engine/llm/groq.py +208 -0
  17. audio_engine/pipelines/__init__.py +1 -0
  18. audio_engine/pipelines/personaplex/__init__.py +41 -0
  19. audio_engine/pipelines/personaplex/client.py +259 -0
  20. audio_engine/pipelines/personaplex/config.py +69 -0
  21. audio_engine/pipelines/personaplex/pipeline.py +301 -0
  22. audio_engine/pipelines/personaplex/types.py +173 -0
  23. audio_engine/pipelines/personaplex/utils.py +192 -0
  24. audio_engine/streaming/__init__.py +5 -0
  25. audio_engine/streaming/websocket_server.py +333 -0
  26. audio_engine/tts/__init__.py +35 -0
  27. audio_engine/tts/base.py +153 -0
  28. audio_engine/tts/cartesia.py +370 -0
  29. audio_engine/utils/__init__.py +15 -0
  30. audio_engine/utils/audio.py +218 -0
  31. atom_audio_engine-0.1.4.dist-info/RECORD +0 -5
  32. {atom_audio_engine-0.1.4.dist-info → atom_audio_engine-0.1.6.dist-info}/WHEEL +0 -0
  33. {atom_audio_engine-0.1.4.dist-info → atom_audio_engine-0.1.6.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,301 @@
1
+ """Main PersonaPlex pipeline orchestrator."""
2
+
3
+ import asyncio
4
+ import logging
5
+ from typing import AsyncIterator, Optional, Tuple
6
+
7
+ from .config import PersonaPlexConfig
8
+ from .client import PersonaPlexClient
9
+ from .types import MessageType, AudioChunk, TextChunk, SessionData
10
+ from .utils import generate_session_id, get_timestamp_iso, save_transcript
11
+
12
+ logger = logging.getLogger(__name__)
13
+
14
+
15
+ class PersonaPlexPipeline:
16
+ """
17
+ Full-duplex speech-to-speech pipeline using PersonaPlex.
18
+
19
+ This pipeline handles real-time bidirectional communication:
20
+ - Sends user audio to PersonaPlex
21
+ - Receives assistant audio and text streaming from PersonaPlex
22
+ - Maintains conversation transcript
23
+ - Optionally saves transcripts to disk
24
+
25
+ Unlike the audio-engine's sequential ASR→LLM→TTS pipeline, PersonaPlex
26
+ is truly full-duplex: user can speak while assistant responds simultaneously.
27
+
28
+ Approach:
29
+ 1. Create session with UUID and timestamp
30
+ 2. Connect client with system prompt
31
+ 3. Launch concurrent receive task to handle server messages
32
+ 4. Caller sends user audio; pipeline yields received audio/text chunks
33
+ 5. On stop, save transcript and disconnect
34
+
35
+ Example:
36
+ ```python
37
+ pipeline = PersonaPlexPipeline(
38
+ system_prompt="You are a helpful AI.",
39
+ save_transcripts=True
40
+ )
41
+ await pipeline.start()
42
+
43
+ # Send user audio, receive assistant response
44
+ async for audio_chunk, text_chunk in pipeline.stream(user_audio_stream):
45
+ if audio_chunk:
46
+ play_audio(audio_chunk)
47
+ if text_chunk:
48
+ print(text_chunk.text, end="", flush=True)
49
+
50
+ transcript = await pipeline.stop()
51
+ ```
52
+ """
53
+
54
+ def __init__(
55
+ self,
56
+ config: Optional[PersonaPlexConfig] = None,
57
+ system_prompt: str = "You are a helpful AI assistant.",
58
+ save_transcripts: bool = True,
59
+ debug: bool = False,
60
+ ):
61
+ """
62
+ Initialize PersonaPlex pipeline.
63
+
64
+ Args:
65
+ config: PersonaPlexConfig (uses defaults if None)
66
+ system_prompt: System prompt for persona control
67
+ save_transcripts: Whether to save transcript after session
68
+ debug: Enable debug logging
69
+ """
70
+ self.config = config or PersonaPlexConfig()
71
+ self.config.text_prompt = system_prompt
72
+ self.config.save_transcripts = save_transcripts
73
+
74
+ self.system_prompt = system_prompt
75
+ self.client = PersonaPlexClient(self.config)
76
+
77
+ # Session state
78
+ self.session_id = generate_session_id()
79
+ self.session_data = SessionData(
80
+ session_id=self.session_id,
81
+ timestamp=get_timestamp_iso(),
82
+ system_prompt=system_prompt,
83
+ voice_prompt=self.config.voice_prompt,
84
+ )
85
+
86
+ self._is_running = False
87
+ self._receive_task: Optional[asyncio.Task] = None
88
+ self._audio_queue: asyncio.Queue[Optional[AudioChunk]] = asyncio.Queue()
89
+ self._text_queue: asyncio.Queue[Optional[TextChunk]] = asyncio.Queue()
90
+
91
+ if debug:
92
+ logging.basicConfig(level=logging.DEBUG)
93
+
94
+ logger.info(f"PersonaPlexPipeline initialized (session: {self.session_id})")
95
+
96
+ async def start(self) -> None:
97
+ """
98
+ Connect to PersonaPlex server and start listening for messages.
99
+
100
+ Raises:
101
+ ConnectionError: If connection fails
102
+ """
103
+ if self._is_running:
104
+ logger.warning("Pipeline already running")
105
+ return
106
+
107
+ try:
108
+ await self.client.connect(self.system_prompt)
109
+ self._is_running = True
110
+
111
+ # Start background task to receive messages
112
+ self._receive_task = asyncio.create_task(self._receive_loop())
113
+ logger.info("PersonaPlex pipeline started")
114
+
115
+ except Exception as e:
116
+ logger.error(f"Failed to start pipeline: {e}")
117
+ raise
118
+
119
+ async def stop(self) -> Optional[SessionData]:
120
+ """
121
+ Stop the pipeline, close connection, and optionally save transcript.
122
+
123
+ Returns:
124
+ SessionData with transcript if save_transcripts=True, else None
125
+ """
126
+ if not self._is_running:
127
+ logger.warning("Pipeline not running")
128
+ return None
129
+
130
+ try:
131
+ self._is_running = False
132
+
133
+ # Cancel receive task
134
+ if self._receive_task:
135
+ self._receive_task.cancel()
136
+ try:
137
+ await self._receive_task
138
+ except asyncio.CancelledError:
139
+ pass
140
+
141
+ # Disconnect from server
142
+ await self.client.disconnect()
143
+
144
+ # Save transcript if enabled
145
+ if self.config.save_transcripts:
146
+ transcript_path = save_transcript(
147
+ self.session_data,
148
+ self.config.transcript_path,
149
+ )
150
+ logger.info(f"Transcript saved: {transcript_path}")
151
+
152
+ logger.info("PersonaPlex pipeline stopped")
153
+ return self.session_data
154
+
155
+ except Exception as e:
156
+ logger.error(f"Error stopping pipeline: {e}")
157
+ raise
158
+
159
+ async def _receive_loop(self) -> None:
160
+ """
161
+ Background task: continuously receive messages from server.
162
+
163
+ Puts audio/text chunks into respective queues.
164
+ """
165
+ try:
166
+ async for message in self.client.stream_messages():
167
+ if not self._is_running:
168
+ break
169
+
170
+ if message.type == MessageType.AUDIO:
171
+ chunk = AudioChunk(
172
+ data=message.data, # type: ignore
173
+ sample_rate=self.config.sample_rate,
174
+ )
175
+ await self._audio_queue.put(chunk)
176
+
177
+ elif message.type == MessageType.TEXT:
178
+ text = (
179
+ message.data.decode("utf-8")
180
+ if isinstance(message.data, bytes)
181
+ else message.data
182
+ )
183
+ chunk = TextChunk(text=text)
184
+ # Track in transcript
185
+ if text and text.strip():
186
+ self.session_data.add_message("assistant", text)
187
+ await self._text_queue.put(chunk)
188
+
189
+ elif message.type == MessageType.ERROR:
190
+ error_msg = (
191
+ message.data.decode("utf-8")
192
+ if isinstance(message.data, bytes)
193
+ else str(message.data)
194
+ )
195
+ logger.error(f"Server error: {error_msg}")
196
+
197
+ except asyncio.CancelledError:
198
+ logger.debug("Receive loop cancelled")
199
+ except Exception as e:
200
+ logger.error(f"Error in receive loop: {e}")
201
+
202
+ async def send_audio(self, audio_chunk: bytes) -> None:
203
+ """
204
+ Send audio chunk to PersonaPlex server.
205
+
206
+ Args:
207
+ audio_chunk: Raw Opus-encoded audio bytes
208
+ """
209
+ if not self._is_running:
210
+ raise RuntimeError("Pipeline not running")
211
+
212
+ try:
213
+ await self.client.send_audio(audio_chunk)
214
+ # Track in transcript (user audio sent)
215
+ # Note: We don't transcribe user audio; PersonaPlex returns text
216
+ except Exception as e:
217
+ logger.error(f"Failed to send audio: {e}")
218
+ raise
219
+
220
+ async def stream(
221
+ self,
222
+ audio_stream: Optional[AsyncIterator[bytes]] = None,
223
+ ) -> AsyncIterator[Tuple[Optional[AudioChunk], Optional[TextChunk]]]:
224
+ """
225
+ Stream bidirectional audio/text from PersonaPlex.
226
+
227
+ This is a generator that yields (audio_chunk, text_chunk) tuples.
228
+ If audio_stream is provided, sends user audio concurrently.
229
+
230
+ Approach:
231
+ - If audio_stream provided: spawn task to continuously send user audio
232
+ - Concurrently receive audio and text from server
233
+ - Yield (audio, text) tuples as they arrive (either can be None)
234
+
235
+ Args:
236
+ audio_stream: Optional async iterator of audio bytes to send
237
+
238
+ Yields:
239
+ Tuple of (AudioChunk or None, TextChunk or None)
240
+ """
241
+ if not self._is_running:
242
+ raise RuntimeError("Pipeline not running")
243
+
244
+ # Optional task to send user audio
245
+ send_task: Optional[asyncio.Task] = None
246
+
247
+ if audio_stream:
248
+
249
+ async def send_user_audio():
250
+ """Background task: send audio from user stream."""
251
+ try:
252
+ async for audio_chunk in audio_stream:
253
+ if not self._is_running:
254
+ break
255
+ await self.send_audio(audio_chunk)
256
+ except asyncio.CancelledError:
257
+ logger.debug("Send task cancelled")
258
+ except Exception as e:
259
+ logger.error(f"Error sending audio: {e}")
260
+
261
+ send_task = asyncio.create_task(send_user_audio())
262
+
263
+ try:
264
+ while self._is_running:
265
+ # Wait for either audio or text (non-blocking)
266
+ try:
267
+ # Try to get audio (non-blocking)
268
+ audio_chunk = self._audio_queue.get_nowait()
269
+ except asyncio.QueueEmpty:
270
+ audio_chunk = None
271
+
272
+ try:
273
+ # Try to get text (non-blocking)
274
+ text_chunk = self._text_queue.get_nowait()
275
+ except asyncio.QueueEmpty:
276
+ text_chunk = None
277
+
278
+ # If we got something, yield it
279
+ if audio_chunk or text_chunk:
280
+ yield (audio_chunk, text_chunk)
281
+ else:
282
+ # Nothing available, wait a bit before polling again
283
+ await asyncio.sleep(0.01)
284
+
285
+ finally:
286
+ # Clean up send task
287
+ if send_task:
288
+ send_task.cancel()
289
+ try:
290
+ await send_task
291
+ except asyncio.CancelledError:
292
+ pass
293
+
294
+ async def __aenter__(self) -> "PersonaPlexPipeline":
295
+ """Async context manager entry."""
296
+ await self.start()
297
+ return self
298
+
299
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
300
+ """Async context manager exit."""
301
+ await self.stop()
@@ -0,0 +1,173 @@
1
+ """Data types for PersonaPlex speech-to-speech pipeline."""
2
+
3
+ from dataclasses import dataclass, field
4
+ from typing import Optional
5
+ from enum import Enum
6
+ from datetime import UTC, datetime
7
+
8
+
9
+ class MessageType(Enum):
10
+ """WebSocket message types for PersonaPlex protocol."""
11
+
12
+ HANDSHAKE = 0x00
13
+ AUDIO = 0x01
14
+ TEXT = 0x02
15
+ CONTROL = 0x03
16
+ METADATA = 0x04
17
+ ERROR = 0x05
18
+ PING = 0x06
19
+
20
+
21
+ @dataclass
22
+ class PersonaPlexMessage:
23
+ """
24
+ A message in the PersonaPlex WebSocket protocol.
25
+
26
+ Attributes:
27
+ type: Message type (audio, text, handshake, etc.)
28
+ data: Message payload (bytes for audio, str for text)
29
+ timestamp_ms: Optional timestamp in milliseconds
30
+ """
31
+
32
+ type: MessageType
33
+ data: bytes | str
34
+ timestamp_ms: Optional[int] = None
35
+
36
+ def encode(self) -> bytes:
37
+ """Encode message to binary format for transmission."""
38
+ type_byte = bytes([self.type.value])
39
+ if isinstance(self.data, bytes):
40
+ return type_byte + self.data
41
+ else:
42
+ return type_byte + self.data.encode("utf-8")
43
+
44
+ @classmethod
45
+ def decode(cls, data: bytes) -> "PersonaPlexMessage":
46
+ """Decode binary message from WebSocket."""
47
+ if len(data) < 1:
48
+ raise ValueError("Message too short")
49
+
50
+ msg_type = MessageType(data[0])
51
+ payload = data[1:]
52
+
53
+ # Text messages are UTF-8 decoded
54
+ if msg_type == MessageType.TEXT:
55
+ text_data = payload.decode("utf-8")
56
+ return cls(type=msg_type, data=text_data)
57
+ else:
58
+ return cls(type=msg_type, data=payload)
59
+
60
+
61
+ @dataclass
62
+ class TranscriptMessage:
63
+ """
64
+ A single message in the conversation transcript.
65
+
66
+ Attributes:
67
+ role: "user" or "assistant"
68
+ text: The message content
69
+ timestamp: ISO 8601 timestamp when message was generated
70
+ """
71
+
72
+ role: str
73
+ text: str
74
+ timestamp: str
75
+
76
+
77
+ @dataclass
78
+ class SessionData:
79
+ """
80
+ Metadata and transcript for a PersonaPlex session.
81
+
82
+ Attributes:
83
+ session_id: Unique session identifier (UUID)
84
+ timestamp: Session start time (ISO 8601)
85
+ system_prompt: System prompt used for the session
86
+ voice_prompt: Voice preset used (e.g., "NATF0.pt")
87
+ messages: List of transcript messages (user + assistant)
88
+ """
89
+
90
+ session_id: str
91
+ timestamp: str
92
+ system_prompt: str
93
+ voice_prompt: str
94
+ messages: list[TranscriptMessage] = field(default_factory=list)
95
+
96
+ def add_message(self, role: str, text: str) -> None:
97
+ """Add a message to the transcript."""
98
+ msg = TranscriptMessage(
99
+ role=role,
100
+ text=text,
101
+ timestamp=datetime.now(UTC).isoformat().replace("+00:00", "Z"),
102
+ )
103
+ self.messages.append(msg)
104
+
105
+ def to_dict(self) -> dict:
106
+ """Convert session data to dictionary for JSON serialization."""
107
+ return {
108
+ "session_id": self.session_id,
109
+ "timestamp": self.timestamp,
110
+ "system_prompt": self.system_prompt,
111
+ "voice_prompt": self.voice_prompt,
112
+ "messages": [
113
+ {
114
+ "role": msg.role,
115
+ "text": msg.text,
116
+ "timestamp": msg.timestamp,
117
+ }
118
+ for msg in self.messages
119
+ ],
120
+ }
121
+
122
+ @classmethod
123
+ def from_dict(cls, data: dict) -> "SessionData":
124
+ """Create SessionData from dictionary."""
125
+ messages = [
126
+ TranscriptMessage(
127
+ role=msg["role"],
128
+ text=msg["text"],
129
+ timestamp=msg.get("timestamp", ""),
130
+ )
131
+ for msg in data.get("messages", [])
132
+ ]
133
+ return cls(
134
+ session_id=data["session_id"],
135
+ timestamp=data["timestamp"],
136
+ system_prompt=data["system_prompt"],
137
+ voice_prompt=data["voice_prompt"],
138
+ messages=messages,
139
+ )
140
+
141
+
142
+ @dataclass
143
+ class AudioChunk:
144
+ """
145
+ A chunk of audio data from PersonaPlex.
146
+
147
+ Attributes:
148
+ data: Raw Opus-encoded audio bytes
149
+ sample_rate: Sample rate in Hz (typically 48000)
150
+ timestamp_ms: When this chunk was generated
151
+ is_final: Whether this is the last chunk in a sequence
152
+ """
153
+
154
+ data: bytes
155
+ sample_rate: int = 48000
156
+ timestamp_ms: Optional[int] = None
157
+ is_final: bool = False
158
+
159
+
160
+ @dataclass
161
+ class TextChunk:
162
+ """
163
+ A text token from PersonaPlex LLM output.
164
+
165
+ Attributes:
166
+ text: Text content (partial or complete word)
167
+ timestamp_ms: When this token was generated
168
+ is_final: Whether this is the last token in a sequence
169
+ """
170
+
171
+ text: str
172
+ timestamp_ms: Optional[int] = None
173
+ is_final: bool = False
@@ -0,0 +1,192 @@
1
+ """Utility functions for PersonaPlex pipeline."""
2
+
3
+ import json
4
+ import logging
5
+ import uuid
6
+ from pathlib import Path
7
+ from datetime import datetime, UTC
8
+ from typing import Optional
9
+
10
+ from .types import SessionData
11
+
12
+ logger = logging.getLogger(__name__)
13
+
14
+
15
+ def generate_session_id() -> str:
16
+ """
17
+ Generate a unique session identifier.
18
+
19
+ Returns:
20
+ UUID4 string for this session
21
+ """
22
+ return str(uuid.uuid4())
23
+
24
+
25
+ def get_timestamp_iso() -> str:
26
+ """
27
+ Get current timestamp in ISO 8601 format with Z suffix.
28
+
29
+ Returns:
30
+ Timestamp string (e.g., "2026-02-03T10:30:45.123456Z")
31
+ """
32
+ return datetime.now(UTC).isoformat().replace("+00:00", "Z")
33
+
34
+
35
+ def save_transcript(
36
+ session_data: SessionData,
37
+ output_path: Optional[str] = None,
38
+ ) -> Path:
39
+ """
40
+ Save session transcript to JSON file.
41
+
42
+ Approach:
43
+ 1. Convert SessionData to dictionary
44
+ 2. Write as formatted JSON
45
+ 3. Return path for verification
46
+
47
+ Args:
48
+ session_data: SessionData object with transcript
49
+ output_path: Directory to save transcript (default: ./transcripts/)
50
+
51
+ Returns:
52
+ Path to saved JSON file
53
+
54
+ Raises:
55
+ IOError: If file write fails
56
+ """
57
+ if output_path is None:
58
+ output_path = "./transcripts/"
59
+
60
+ output_dir = Path(output_path)
61
+ output_dir.mkdir(parents=True, exist_ok=True)
62
+
63
+ # Filename: session_id_YYYY-MM-DD.json
64
+ timestamp_str = session_data.timestamp.split("T")[0] # Extract date
65
+ filename = f"{session_data.session_id}_{timestamp_str}.json"
66
+ filepath = output_dir / filename
67
+
68
+ try:
69
+ with open(filepath, "w") as f:
70
+ json.dump(session_data.to_dict(), f, indent=2)
71
+ logger.info(f"Transcript saved to {filepath}")
72
+ return filepath
73
+ except IOError as e:
74
+ logger.error(f"Failed to save transcript: {e}")
75
+ raise
76
+
77
+
78
+ def load_transcript(filepath: str | Path) -> SessionData:
79
+ """
80
+ Load session transcript from JSON file.
81
+
82
+ Args:
83
+ filepath: Path to transcript JSON file
84
+
85
+ Returns:
86
+ SessionData object
87
+
88
+ Raises:
89
+ FileNotFoundError: If file doesn't exist
90
+ json.JSONDecodeError: If JSON is invalid
91
+ """
92
+ filepath = Path(filepath)
93
+
94
+ try:
95
+ with open(filepath, "r") as f:
96
+ data = json.load(f)
97
+ logger.info(f"Loaded transcript from {filepath}")
98
+ return SessionData.from_dict(data)
99
+ except FileNotFoundError:
100
+ logger.error(f"Transcript file not found: {filepath}")
101
+ raise
102
+ except json.JSONDecodeError as e:
103
+ logger.error(f"Invalid JSON in transcript file: {e}")
104
+ raise
105
+
106
+
107
+ def list_transcripts(directory: str | Path = "./transcripts/") -> list[Path]:
108
+ """
109
+ List all transcript files in a directory.
110
+
111
+ Args:
112
+ directory: Path to transcripts directory
113
+
114
+ Returns:
115
+ List of Path objects for .json files, sorted by modification time (newest first)
116
+ """
117
+ dir_path = Path(directory)
118
+ if not dir_path.exists():
119
+ logger.warning(f"Transcripts directory does not exist: {directory}")
120
+ return []
121
+
122
+ transcripts = sorted(
123
+ dir_path.glob("*.json"),
124
+ key=lambda p: p.stat().st_mtime,
125
+ reverse=True,
126
+ )
127
+ return transcripts
128
+
129
+
130
+ def format_transcript_for_display(session_data: SessionData) -> str:
131
+ """
132
+ Format transcript as human-readable text.
133
+
134
+ Args:
135
+ session_data: SessionData object
136
+
137
+ Returns:
138
+ Formatted text with speaker labels and messages
139
+ """
140
+ lines = [
141
+ f"=== PersonaPlex Session ===",
142
+ f"Session ID: {session_data.session_id}",
143
+ f"Started: {session_data.timestamp}",
144
+ f"Voice: {session_data.voice_prompt}",
145
+ f"Prompt: {session_data.system_prompt}",
146
+ f"",
147
+ "--- Transcript ---",
148
+ ]
149
+
150
+ for msg in session_data.messages:
151
+ speaker = msg.role.upper()
152
+ lines.append(f"{speaker}: {msg.text}")
153
+ lines.append("")
154
+
155
+ return "\n".join(lines)
156
+
157
+
158
+ def cleanup_old_transcripts(
159
+ directory: str | Path = "./transcripts/",
160
+ max_age_days: int = 30,
161
+ ) -> int:
162
+ """
163
+ Delete transcripts older than specified number of days.
164
+
165
+ Args:
166
+ directory: Path to transcripts directory
167
+ max_age_days: Delete files older than this many days
168
+
169
+ Returns:
170
+ Number of files deleted
171
+ """
172
+ from datetime import timedelta
173
+ import time
174
+
175
+ dir_path = Path(directory)
176
+ if not dir_path.exists():
177
+ return 0
178
+
179
+ cutoff_time = time.time() - (max_age_days * 24 * 60 * 60)
180
+ deleted_count = 0
181
+
182
+ for transcript_file in dir_path.glob("*.json"):
183
+ if transcript_file.stat().st_mtime < cutoff_time:
184
+ try:
185
+ transcript_file.unlink()
186
+ logger.info(f"Deleted old transcript: {transcript_file}")
187
+ deleted_count += 1
188
+ except OSError as e:
189
+ logger.error(f"Failed to delete {transcript_file}: {e}")
190
+
191
+ logger.info(f"Cleaned up {deleted_count} old transcripts")
192
+ return deleted_count
@@ -0,0 +1,5 @@
1
+ """Streaming and WebSocket server components."""
2
+
3
+ from .websocket_server import WebSocketServer
4
+
5
+ __all__ = ["WebSocketServer"]