egrobots-sa-client 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,17 @@
1
+ """
2
+ Egrobots SA Client
3
+
4
+ Python client for Arabic Speech Analysis service.
5
+ """
6
+
7
+ from .client import EgrobotsSSAClient
8
+
9
+ __all__ = ["EgrobotsSSAClient"]
10
+
11
+ # Read version from VERSION file
12
+ try:
13
+ from pathlib import Path
14
+ _version_file = Path(__file__).parent.parent / "VERSION"
15
+ __version__ = _version_file.read_text().strip()
16
+ except Exception:
17
+ __version__ = "1.0.0"
@@ -0,0 +1,693 @@
1
+ """
2
+ Egrobots SSA Client
3
+
4
+ A Python client for the Egrobots Speech Sentiment Analysis service.
5
+ Supports both real-time streaming and batch processing modes.
6
+ """
7
+ import asyncio
8
+ import websockets
9
+ import json
10
+ import base64
11
+ import time
12
+ from pathlib import Path
13
+ from pydub import AudioSegment
14
+ import io
15
+ from typing import Dict, Optional, Callable, Union
16
+ from dotenv import load_dotenv
17
+ import os
18
+
19
+ # Load environment variables from .env file
20
+ # Try multiple locations to support both local and Docker deployments
21
+ dotenv_paths = [
22
+ Path(__file__).resolve().parents[2] / '.env', # Project root (local dev)
23
+ Path(__file__).resolve().parent / '.env', # Same dir as client (Docker)
24
+ Path.cwd() / '.env', # Current working directory
25
+ ]
26
+
27
+ for dotenv_path in dotenv_paths:
28
+ if dotenv_path.exists():
29
+ load_dotenv(dotenv_path=dotenv_path)
30
+ break
31
+
32
+
33
+ class EgrobotsSSAClient:
34
+ """
35
+ Client for Egrobots SSA service.
36
+
37
+ Supports two modes:
38
+ 1. Real-time streaming: Stream audio chunks as they arrive (e.g., from microphone)
39
+ 2. Batch processing: Uploads complete audio file via HTTP POST
40
+
41
+ Streaming endpoints:
42
+ - /ws/stream-sentiment: For sentiment analysis
43
+ - /ws/stream-conversation: For conversation analysis (with diarization)
44
+ """
45
+
46
+ def __init__(self):
47
+ """
48
+ Initialize the Egrobots SSA client.
49
+ Reads the websocket_url and http_url from environment variables.
50
+ """
51
+ self.websocket_url = os.getenv("WEBSOCKET_URL")
52
+ self.http_url = os.getenv("BASE_URL")
53
+ if not self.websocket_url or not self.http_url:
54
+ raise ValueError("WEBSOCKET_URL and BASE_URL must be set in the .env file.")
55
+ self.session_id = None
56
+ self.chunk_count = 0
57
+ self.websocket = None
58
+ self.is_streaming = False
59
+ self._current_endpoint = "sentiment" # Track which endpoint is being used
60
+
61
+ def _get_streaming_url(self, endpoint: str = "sentiment") -> str:
62
+ """
63
+ Get the appropriate WebSocket URL for the given endpoint.
64
+
65
+ Args:
66
+ endpoint: "sentiment" or "conversation"
67
+
68
+ Returns:
69
+ WebSocket URL for the specified endpoint
70
+ """
71
+ base_ws_url = self.websocket_url.rsplit('/ws/', 1)[0]
72
+ if endpoint == "conversation":
73
+ return f"{base_ws_url}/ws/stream-conversation"
74
+ return f"{base_ws_url}/ws/stream-sentiment"
75
+
76
+ async def start_streaming_session(
77
+ self,
78
+ endpoint: str = "sentiment",
79
+ enable_translation: bool = False,
80
+ progress_callback: Optional[Callable] = None
81
+ ) -> bool:
82
+ """
83
+ Start a new streaming session.
84
+
85
+ Args:
86
+ endpoint: Analysis endpoint - "sentiment" or "conversation"
87
+ enable_translation: Enable bilingual response (EN + AR) for conversation
88
+ progress_callback: Optional callback for progress updates
89
+
90
+ Returns:
91
+ True if session started successfully, False otherwise
92
+ """
93
+ try:
94
+ self._current_endpoint = endpoint
95
+ ws_url = self._get_streaming_url(endpoint)
96
+
97
+ # Connect to WebSocket
98
+ self.websocket = await websockets.connect(
99
+ ws_url,
100
+ ping_interval=None,
101
+ close_timeout=10
102
+ )
103
+
104
+ if progress_callback:
105
+ progress_callback('connected', {'url': ws_url, 'endpoint': endpoint})
106
+
107
+ # Start session with options
108
+ start_message = {"action": "start"}
109
+ if endpoint == "conversation":
110
+ start_message["enable_translation"] = enable_translation
111
+
112
+ await self.websocket.send(json.dumps(start_message))
113
+ response = await self.websocket.recv()
114
+ response_data = json.loads(response)
115
+
116
+ if response_data.get("status") != "ready":
117
+ if progress_callback:
118
+ progress_callback('error', {'message': str(response_data)})
119
+ return False
120
+
121
+ self.session_id = response_data.get("session_id")
122
+ self.is_streaming = True
123
+ self.chunk_count = 0
124
+
125
+ if progress_callback:
126
+ progress_callback('session_started', {
127
+ 'session_id': self.session_id,
128
+ 'mode': f'real-time streaming ({endpoint})',
129
+ 'endpoint': endpoint
130
+ })
131
+
132
+ return True
133
+
134
+ except Exception as e:
135
+ if progress_callback:
136
+ progress_callback('error', {'message': str(e)})
137
+ return False
138
+
139
+ async def send_audio_chunk(
140
+ self,
141
+ audio_data: bytes,
142
+ audio_format: str = "mp3",
143
+ progress_callback: Optional[Callable] = None
144
+ ) -> bool:
145
+ """
146
+ Send a single audio chunk to the streaming session.
147
+
148
+ This is the core method for real-time streaming - call this whenever
149
+ you have a new audio chunk from your microphone or audio source.
150
+
151
+ Args:
152
+ audio_data: Raw audio bytes (in the specified format)
153
+ audio_format: Audio format (mp3, wav, etc.)
154
+ progress_callback: Optional callback for progress updates
155
+
156
+ Returns:
157
+ True if chunk was sent successfully, False otherwise
158
+ """
159
+ if not self.is_streaming or not self.websocket:
160
+ if progress_callback:
161
+ progress_callback('error', {'message': 'No active streaming session'})
162
+ return False
163
+
164
+ try:
165
+ # Encode audio to base64
166
+ chunk_b64 = base64.b64encode(audio_data).decode('utf-8')
167
+
168
+ # Send chunk
169
+ await self.websocket.send(json.dumps({
170
+ "action": "chunk",
171
+ "data": chunk_b64,
172
+ "format": audio_format
173
+ }))
174
+
175
+ # Wait for acknowledgment
176
+ response = await self.websocket.recv()
177
+ response_data = json.loads(response)
178
+
179
+ self.chunk_count += 1
180
+
181
+ if progress_callback:
182
+ progress_callback('chunk_sent', {
183
+ 'chunk_number': self.chunk_count,
184
+ 'elapsed_seconds': response_data.get('elapsed_time_seconds', 0),
185
+ 'chunk_size_mb': len(audio_data) / (1024 * 1024)
186
+ })
187
+
188
+ return True
189
+
190
+ except Exception as e:
191
+ if progress_callback:
192
+ progress_callback('error', {'message': str(e)})
193
+ return False
194
+
195
+ async def end_streaming_session(
196
+ self,
197
+ progress_callback: Optional[Callable] = None
198
+ ) -> Dict:
199
+ """
200
+ End the streaming session and get analysis results.
201
+
202
+ Args:
203
+ progress_callback: Optional callback for progress updates
204
+
205
+ Returns:
206
+ Dict containing sentiment analysis results
207
+ """
208
+ if not self.is_streaming or not self.websocket:
209
+ return {'error': 'No active streaming session'}
210
+
211
+ try:
212
+ processing_start = time.time()
213
+
214
+ if progress_callback:
215
+ progress_callback('streaming_complete', {
216
+ 'total_chunks': self.chunk_count
217
+ })
218
+
219
+ # Send end signal
220
+ await self.websocket.send(json.dumps({"action": "end"}))
221
+
222
+ # Get processing message
223
+ response = await self.websocket.recv()
224
+ processing_msg = json.loads(response)
225
+
226
+ if progress_callback:
227
+ progress_callback('processing', {
228
+ 'message': processing_msg.get('message', 'Processing...')
229
+ })
230
+
231
+ # Get final result
232
+ response = await self.websocket.recv()
233
+ result_data = json.loads(response)
234
+
235
+ processing_time = time.time() - processing_start
236
+
237
+ # Close websocket
238
+ await self.websocket.close()
239
+ self.is_streaming = False
240
+
241
+ if result_data.get("status") == "complete":
242
+ result = result_data.get("result", {})
243
+ result['processing_time_seconds'] = processing_time
244
+ result['session_id'] = self.session_id
245
+ result['total_chunks'] = self.chunk_count
246
+
247
+ if progress_callback:
248
+ progress_callback('complete', result)
249
+
250
+ return result
251
+ else:
252
+ if progress_callback:
253
+ progress_callback('error', result_data)
254
+ return {'error': result_data}
255
+
256
+ except Exception as e:
257
+ error = {'error': str(e)}
258
+ if progress_callback:
259
+ progress_callback('error', error)
260
+ return error
261
+
262
+ async def stream_from_chunks(
263
+ self,
264
+ chunk_generator,
265
+ endpoint: str = "sentiment",
266
+ enable_translation: bool = False,
267
+ progress_callback: Optional[Callable] = None
268
+ ) -> Dict:
269
+ """
270
+ Stream audio chunks from a generator (for real-time audio sources).
271
+
272
+ This is the recommended method for TRUE real-time streaming where chunks
273
+ arrive over time (e.g., from a microphone or live audio stream).
274
+
275
+ Args:
276
+ chunk_generator: An async generator that yields (audio_bytes, format) tuples
277
+ Example: async for chunk_data, fmt in microphone_stream()
278
+ endpoint: Analysis endpoint - "sentiment" or "conversation"
279
+ enable_translation: Enable bilingual response (EN + AR) for conversation
280
+ progress_callback: Optional callback for progress updates
281
+
282
+ Returns:
283
+ Dict containing analysis results
284
+
285
+ Example:
286
+ async def microphone_chunks():
287
+ # Your microphone/audio source code here
288
+ while recording:
289
+ chunk = await get_audio_from_mic() # Your audio capture logic
290
+ yield chunk, "mp3"
291
+
292
+ # For sentiment analysis
293
+ result = await client.stream_from_chunks(microphone_chunks())
294
+
295
+ # For conversation analysis with translation
296
+ result = await client.stream_from_chunks(
297
+ microphone_chunks(),
298
+ endpoint="conversation",
299
+ enable_translation=True
300
+ )
301
+ """
302
+ # Start session with specified endpoint
303
+ if not await self.start_streaming_session(endpoint, enable_translation, progress_callback):
304
+ return {'error': 'Failed to start streaming session'}
305
+
306
+ # Stream chunks as they arrive
307
+ try:
308
+ async for audio_data, audio_format in chunk_generator:
309
+ success = await self.send_audio_chunk(
310
+ audio_data,
311
+ audio_format,
312
+ progress_callback
313
+ )
314
+ if not success:
315
+ return {'error': 'Failed to send audio chunk'}
316
+ except Exception as e:
317
+ if progress_callback:
318
+ progress_callback('error', {'message': f'Chunk generation error: {str(e)}'})
319
+ return {'error': str(e)}
320
+
321
+ # End session and get results
322
+ return await self.end_streaming_session(progress_callback)
323
+
324
+ def _process_sentiment_analysis(
325
+ self,
326
+ audio_path: str,
327
+ enable_translation: bool = False,
328
+ progress_callback: Optional[Callable] = None
329
+ ) -> Dict:
330
+ """
331
+ Internal method to process sentiment analysis.
332
+
333
+ Args:
334
+ audio_path: Path to audio file
335
+ enable_translation: Enable bilingual response (EN + AR)
336
+ progress_callback: Optional callback for progress updates
337
+
338
+ Returns:
339
+ Dict containing sentiment analysis results
340
+ """
341
+ import requests
342
+
343
+ audio_file_path = Path(audio_path)
344
+
345
+ if not audio_file_path.exists():
346
+ error = {'error': f'File not found: {audio_path}'}
347
+ if progress_callback:
348
+ progress_callback('error', error)
349
+ return error
350
+
351
+ file_size_mb = audio_file_path.stat().st_size / (1024 * 1024)
352
+
353
+ if progress_callback:
354
+ progress_callback('loading', {
355
+ 'audio_path': audio_path,
356
+ 'file_size_mb': file_size_mb,
357
+ 'mode': 'batch_sentiment'
358
+ })
359
+
360
+ try:
361
+ endpoint_url = f"{self.http_url}/analyze-sentiment"
362
+ upload_start = time.time()
363
+
364
+ with open(audio_file_path, 'rb') as audio_file:
365
+ files = {'audio_file': audio_file}
366
+ data = {'enable_translation': str(enable_translation).lower()}
367
+
368
+ if progress_callback:
369
+ progress_callback('uploading', {
370
+ 'url': endpoint_url,
371
+ 'file_size_mb': file_size_mb,
372
+ 'endpoint': 'sentiment'
373
+ })
374
+
375
+ response = requests.post(
376
+ endpoint_url,
377
+ files=files,
378
+ data=data,
379
+ timeout=3600
380
+ )
381
+
382
+ upload_time = time.time() - upload_start
383
+
384
+ if response.status_code == 200:
385
+ result = response.json()
386
+ result['upload_time_seconds'] = upload_time
387
+ result['file_size_mb'] = file_size_mb
388
+
389
+ if progress_callback:
390
+ progress_callback('complete', result)
391
+
392
+ return result
393
+ else:
394
+ error = {
395
+ 'error': f'HTTP {response.status_code}',
396
+ 'message': response.text
397
+ }
398
+ if progress_callback:
399
+ progress_callback('error', error)
400
+ return error
401
+
402
+ except Exception as e:
403
+ error = {'error': str(e)}
404
+ if progress_callback:
405
+ progress_callback('error', error)
406
+ return error
407
+
408
+ def _process_conversation_analysis(
409
+ self,
410
+ audio: Union[str, bytes],
411
+ file_name: str = "audio.wav",
412
+ enable_translation: bool = False,
413
+ progress_callback: Optional[Callable] = None,
414
+ max_retries: int = 3,
415
+ retry_delay: int = 2
416
+ ) -> Dict:
417
+ """
418
+ Internal method to process conversation analysis with retry logic.
419
+
420
+ Args:
421
+ audio: Path to the audio file or audio bytes.
422
+ file_name: The name of the file (used when audio is bytes).
423
+ enable_translation: Enable bilingual response (EN + AR). Default: False for faster processing.
424
+ progress_callback: Optional callback for progress updates.
425
+ max_retries: Maximum number of retry attempts for failed requests. Default: 3.
426
+ retry_delay: Delay in seconds between retry attempts. Default: 2.
427
+ Returns:
428
+ A dictionary containing the conversation analysis results.
429
+ """
430
+ import requests
431
+ from requests.adapters import HTTPAdapter
432
+ from urllib3.util.retry import Retry
433
+
434
+ file_size_mb = 0
435
+ mode = 'batch_conversation'
436
+
437
+ if isinstance(audio, str):
438
+ audio_file_path = Path(audio)
439
+ if not audio_file_path.exists():
440
+ error = {'error': f'File not found: {audio}'}
441
+ if progress_callback:
442
+ progress_callback('error', error)
443
+ return error
444
+ file_size_mb = audio_file_path.stat().st_size / (1024 * 1024)
445
+ if progress_callback:
446
+ progress_callback('loading', {'audio_path': audio, 'file_size_mb': file_size_mb, 'mode': mode})
447
+ elif isinstance(audio, bytes):
448
+ file_size_mb = len(audio) / (1024 * 1024)
449
+ if progress_callback:
450
+ progress_callback('loading', {'audio_path': file_name, 'file_size_mb': file_size_mb, 'mode': mode})
451
+
452
+ # Configure retry strategy
453
+ retry_strategy = Retry(
454
+ total=max_retries,
455
+ backoff_factor=retry_delay,
456
+ status_forcelist=[429, 500, 502, 503, 504],
457
+ allowed_methods=["POST"]
458
+ )
459
+
460
+ adapter = HTTPAdapter(max_retries=retry_strategy)
461
+ session = requests.Session()
462
+ session.mount("https://", adapter)
463
+ session.mount("http://", adapter)
464
+
465
+ for attempt in range(max_retries + 1):
466
+ try:
467
+ upload_start = time.time()
468
+ endpoint = f"{self.http_url}/analyze-conversation"
469
+
470
+ if progress_callback:
471
+ retry_msg = f" (attempt {attempt + 1}/{max_retries + 1})" if attempt > 0 else ""
472
+ progress_callback('uploading', {
473
+ 'url': endpoint,
474
+ 'file_size_mb': file_size_mb,
475
+ 'attempt': attempt + 1,
476
+ 'message': f'Uploading{retry_msg}...'
477
+ })
478
+
479
+ if isinstance(audio, str):
480
+ with open(audio, 'rb') as f:
481
+ files = {'audio_file': f}
482
+ data = {'enable_translation': str(enable_translation).lower()}
483
+ response = session.post(
484
+ endpoint,
485
+ files=files,
486
+ data=data,
487
+ timeout=3600
488
+ )
489
+ else:
490
+ files = {'audio_file': (file_name, audio, 'audio/wav')}
491
+ data = {'enable_translation': str(enable_translation).lower()}
492
+ response = session.post(
493
+ endpoint,
494
+ files=files,
495
+ data=data,
496
+ timeout=3600
497
+ )
498
+
499
+ upload_time = time.time() - upload_start
500
+
501
+ if response.status_code == 200:
502
+ result = response.json()
503
+ result['upload_time_seconds'] = upload_time
504
+ result['file_size_mb'] = file_size_mb
505
+
506
+ if progress_callback:
507
+ progress_callback('complete', result)
508
+
509
+ return result
510
+ else:
511
+ error = {'error': f'HTTP {response.status_code}', 'message': response.text}
512
+ if progress_callback:
513
+ progress_callback('error', error)
514
+ return error
515
+
516
+ except requests.exceptions.SSLError as e:
517
+ error_msg = str(e)
518
+ if attempt < max_retries:
519
+ if progress_callback:
520
+ progress_callback('retrying', {
521
+ 'attempt': attempt + 1,
522
+ 'max_retries': max_retries,
523
+ 'error': 'SSL connection error',
524
+ 'retry_delay': retry_delay
525
+ })
526
+ time.sleep(retry_delay)
527
+ continue
528
+ else:
529
+ error = {
530
+ 'error': 'SSL Connection Error',
531
+ 'message': 'Failed to establish secure connection with the server. Please check your internet connection and try again.',
532
+ 'technical_details': error_msg
533
+ }
534
+ if progress_callback:
535
+ progress_callback('error', error)
536
+ return error
537
+
538
+ except requests.exceptions.ConnectionError as e:
539
+ error_msg = str(e)
540
+ if attempt < max_retries:
541
+ if progress_callback:
542
+ progress_callback('retrying', {
543
+ 'attempt': attempt + 1,
544
+ 'max_retries': max_retries,
545
+ 'error': 'Connection error',
546
+ 'retry_delay': retry_delay
547
+ })
548
+ time.sleep(retry_delay)
549
+ continue
550
+ else:
551
+ error = {
552
+ 'error': 'Connection Error',
553
+ 'message': 'Unable to connect to the server. Please check your internet connection and try again.',
554
+ 'technical_details': error_msg
555
+ }
556
+ if progress_callback:
557
+ progress_callback('error', error)
558
+ return error
559
+
560
+ except requests.exceptions.Timeout as e:
561
+ error = {
562
+ 'error': 'Request Timeout',
563
+ 'message': 'The request took too long to complete. The audio file may be too large.',
564
+ 'technical_details': str(e)
565
+ }
566
+ if progress_callback:
567
+ progress_callback('error', error)
568
+ return error
569
+
570
+ except Exception as e:
571
+ error_msg = str(e)
572
+ if attempt < max_retries and ('SSL' in error_msg or 'Connection' in error_msg):
573
+ if progress_callback:
574
+ progress_callback('retrying', {
575
+ 'attempt': attempt + 1,
576
+ 'max_retries': max_retries,
577
+ 'error': error_msg,
578
+ 'retry_delay': retry_delay
579
+ })
580
+ time.sleep(retry_delay)
581
+ continue
582
+ else:
583
+ error = {
584
+ 'error': 'Unexpected Error',
585
+ 'message': error_msg
586
+ }
587
+ if progress_callback:
588
+ progress_callback('error', error)
589
+ return error
590
+
591
+ # This should never be reached, but just in case
592
+ return {
593
+ 'error': 'Max retries exceeded',
594
+ 'message': 'Failed to complete request after multiple attempts'
595
+ }
596
+
597
+ def batch_process(
598
+ self,
599
+ audio_path: str,
600
+ endpoint: str = "sentiment",
601
+ enable_translation: bool = False,
602
+ progress_callback: Optional[Callable] = None
603
+ ) -> Dict:
604
+ """
605
+ Gateway method for batch processing audio files.
606
+ Routes to the appropriate analysis endpoint based on the endpoint parameter.
607
+
608
+ Args:
609
+ audio_path: Path to audio file
610
+ endpoint: Analysis endpoint - 'sentiment' or 'conversation'. Default: 'sentiment'
611
+ enable_translation: Enable bilingual response (EN + AR). Default: False for faster processing.
612
+ progress_callback: Optional callback function for progress updates
613
+
614
+ Returns:
615
+ Dict containing analysis results (sentiment or conversation based on endpoint)
616
+ """
617
+ # Validate endpoint parameter
618
+ valid_endpoints = ['sentiment', 'conversation']
619
+ if endpoint not in valid_endpoints:
620
+ error = {'error': f'Invalid endpoint: {endpoint}. Must be one of {valid_endpoints}'}
621
+ if progress_callback:
622
+ progress_callback('error', error)
623
+ return error
624
+
625
+ # Route to appropriate processing method
626
+ if endpoint == 'sentiment':
627
+ return self._process_sentiment_analysis(
628
+ audio_path=audio_path,
629
+ enable_translation=enable_translation,
630
+ progress_callback=progress_callback
631
+ )
632
+ else: # conversation
633
+ return self._process_conversation_analysis(
634
+ audio=audio_path,
635
+ enable_translation=enable_translation,
636
+ progress_callback=progress_callback
637
+ )
638
+
639
+ async def stream_realtime_async(
640
+ self,
641
+ audio_path: str,
642
+ chunk_duration_seconds: int = 30,
643
+ progress_callback: Optional[Callable] = None
644
+ ) -> Dict:
645
+ """Async wrapper for stream_realtime (for consistency)."""
646
+ return await self.stream_realtime(audio_path, chunk_duration_seconds, progress_callback)
647
+
648
+ def get_session_info(self) -> Dict:
649
+ """Get current session information."""
650
+ return {
651
+ 'session_id': self.session_id,
652
+ 'chunk_count': self.chunk_count,
653
+ 'is_streaming': self.is_streaming
654
+ }
655
+
656
+ @staticmethod
657
+ def extract_language_version(result: Dict, language: str = 'EN') -> Dict:
658
+ """
659
+ Extract a specific language version from a bilingual response.
660
+
661
+ Args:
662
+ result: Analysis result (may be bilingual with EN/AR keys)
663
+ language: Language to extract ('EN' or 'AR')
664
+
665
+ Returns:
666
+ Dictionary with the specified language version plus any metadata
667
+ """
668
+ if language.upper() not in result:
669
+ # Not a bilingual response, return as-is
670
+ return result
671
+
672
+ # Extract the language version
673
+ lang_data = result[language.upper()].copy()
674
+
675
+ # Add back any metadata or technical fields
676
+ for key, value in result.items():
677
+ if key not in ['EN', 'AR']:
678
+ lang_data[key] = value
679
+
680
+ return lang_data
681
+
682
+ @staticmethod
683
+ def is_bilingual_response(result: Dict) -> bool:
684
+ """
685
+ Check if a response is in bilingual format.
686
+
687
+ Args:
688
+ result: Analysis result
689
+
690
+ Returns:
691
+ True if result has EN and AR keys
692
+ """
693
+ return 'EN' in result and 'AR' in result
@@ -0,0 +1,73 @@
1
+ Metadata-Version: 2.4
2
+ Name: egrobots-sa-client
3
+ Version: 1.1.0
4
+ Summary: Python client for Egrobots Arabic Speech Sentiment Analysis service
5
+ Author-email: Egrobots <moamen.moustafa@egrobots.com>
6
+ License-Expression: MIT
7
+ Project-URL: Homepage, https://github.com/Mox301/egrobots-audio-analysis-agent-demo
8
+ Project-URL: Documentation, https://github.com/Mox301/egrobots-audio-analysis-agent-demo/blob/main/backend_client/CLIENT_USAGE.md
9
+ Project-URL: Repository, https://github.com/Mox301/egrobots-audio-analysis-agent-demo
10
+ Project-URL: Issues, https://github.com/Mox301/egrobots-audio-analysis-agent-demo/issues
11
+ Keywords: arabic,audio,sentiment,analysis,speech,nlp
12
+ Classifier: Development Status :: 4 - Beta
13
+ Classifier: Intended Audience :: Developers
14
+ Classifier: Programming Language :: Python :: 3
15
+ Classifier: Programming Language :: Python :: 3.9
16
+ Classifier: Programming Language :: Python :: 3.10
17
+ Classifier: Programming Language :: Python :: 3.11
18
+ Classifier: Programming Language :: Python :: 3.12
19
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
20
+ Classifier: Topic :: Multimedia :: Sound/Audio :: Analysis
21
+ Requires-Python: >=3.9
22
+ Description-Content-Type: text/markdown
23
+ Requires-Dist: websockets>=10.0
24
+ Requires-Dist: pydub>=0.25.0
25
+ Requires-Dist: python-dotenv>=1.0.0
26
+ Requires-Dist: requests>=2.28.0
27
+ Provides-Extra: dev
28
+ Requires-Dist: pytest>=7.0; extra == "dev"
29
+ Requires-Dist: pytest-asyncio>=0.21; extra == "dev"
30
+ Requires-Dist: build>=0.10; extra == "dev"
31
+ Requires-Dist: twine>=4.0; extra == "dev"
32
+
33
+ # Egrobots SA Client
34
+
35
+ Python client for Egrobots Arabic Speech Sentiment Analysis service.
36
+
37
+ ## Installation
38
+
39
+ ```bash
40
+ pip install egrobots-sa-client
41
+ ```
42
+
43
+ ## Quick Start
44
+
45
+ ```python
46
+ from egrobots_sa_client import EgrobotsSSAClient
47
+
48
+ client = EgrobotsSSAClient()
49
+
50
+ # Batch processing
51
+ result = client.batch_process(
52
+ audio_path="audio.mp3",
53
+ endpoint="sentiment", # or "conversation"
54
+ enable_translation=False
55
+ )
56
+ ```
57
+
58
+ ## Configuration
59
+
60
+ Create a `.env` file:
61
+
62
+ ```env
63
+ WEBSOCKET_URL=wss://your-service.com/ws/stream-sentiment
64
+ BASE_URL=https://your-service.com
65
+ ```
66
+
67
+ ## Documentation
68
+
69
+ See [CLIENT_USAGE.md](CLIENT_USAGE.md) for full documentation.
70
+
71
+ ## License
72
+
73
+ MIT
@@ -0,0 +1,6 @@
1
+ egrobots_sa_client/__init__.py,sha256=JOXccT3tHRZKLRY6pNDY-e5i8BaeBr4mP2xakyXluWE,375
2
+ egrobots_sa_client/client.py,sha256=qw9IXxXkcdvk4PiS-ejQWwHQb1htbcz6qKOUY_jwLKc,26034
3
+ egrobots_sa_client-1.1.0.dist-info/METADATA,sha256=EhCH-eNhC9se279VpXW28QV2Eaa0cJMIWHxbHdw_4rI,2182
4
+ egrobots_sa_client-1.1.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
5
+ egrobots_sa_client-1.1.0.dist-info/top_level.txt,sha256=3U_N8FvB679RE7S7rGKdqmUdcVnTeOpqk0Sx8B_YEsI,19
6
+ egrobots_sa_client-1.1.0.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (80.9.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1 @@
1
+ egrobots_sa_client