sarvamai 0.1.22a3__py3-none-any.whl → 0.1.22a4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (26) hide show
  1. sarvamai/__init__.py +0 -6
  2. sarvamai/core/client_wrapper.py +2 -2
  3. sarvamai/requests/speech_to_text_transcription_data.py +0 -6
  4. sarvamai/requests/speech_to_text_translate_transcription_data.py +0 -6
  5. sarvamai/speech_to_text_job/job.py +100 -2
  6. sarvamai/speech_to_text_job/raw_client.py +14 -10
  7. sarvamai/speech_to_text_streaming/__init__.py +0 -2
  8. sarvamai/speech_to_text_streaming/client.py +0 -31
  9. sarvamai/speech_to_text_streaming/raw_client.py +0 -31
  10. sarvamai/speech_to_text_streaming/types/__init__.py +0 -2
  11. sarvamai/speech_to_text_translate_job/job.py +100 -2
  12. sarvamai/speech_to_text_translate_job/raw_client.py +14 -10
  13. sarvamai/speech_to_text_translate_streaming/__init__.py +0 -2
  14. sarvamai/speech_to_text_translate_streaming/client.py +0 -31
  15. sarvamai/speech_to_text_translate_streaming/raw_client.py +0 -31
  16. sarvamai/speech_to_text_translate_streaming/types/__init__.py +0 -4
  17. sarvamai/types/__init__.py +0 -2
  18. sarvamai/types/completion_event_flag.py +3 -1
  19. sarvamai/types/speech_to_text_transcription_data.py +0 -6
  20. sarvamai/types/speech_to_text_translate_transcription_data.py +0 -6
  21. {sarvamai-0.1.22a3.dist-info → sarvamai-0.1.22a4.dist-info}/METADATA +1 -1
  22. {sarvamai-0.1.22a3.dist-info → sarvamai-0.1.22a4.dist-info}/RECORD +23 -26
  23. sarvamai/speech_to_text_streaming/types/speech_to_text_streaming_stream_ongoing_speech_results.py +0 -5
  24. sarvamai/speech_to_text_translate_streaming/types/speech_to_text_translate_streaming_stream_ongoing_speech_results.py +0 -5
  25. sarvamai/types/response_speech_state.py +0 -7
  26. {sarvamai-0.1.22a3.dist-info → sarvamai-0.1.22a4.dist-info}/WHEEL +0 -0
sarvamai/__init__.py CHANGED
@@ -56,7 +56,6 @@ from .types import (
56
56
  NumeralsFormat,
57
57
  PingSignal,
58
58
  ReasoningEffort,
59
- ResponseSpeechState,
60
59
  ResponseType,
61
60
  Role,
62
61
  SarvamModelIds,
@@ -190,14 +189,12 @@ from .speech_to_text_streaming import (
190
189
  SpeechToTextStreamingHighVadSensitivity,
191
190
  SpeechToTextStreamingInputAudioCodec,
192
191
  SpeechToTextStreamingLanguageCode,
193
- SpeechToTextStreamingStreamOngoingSpeechResults,
194
192
  SpeechToTextStreamingVadSignals,
195
193
  )
196
194
  from .speech_to_text_translate_streaming import (
197
195
  SpeechToTextTranslateStreamingFlushSignal,
198
196
  SpeechToTextTranslateStreamingHighVadSensitivity,
199
197
  SpeechToTextTranslateStreamingInputAudioCodec,
200
- SpeechToTextTranslateStreamingStreamOngoingSpeechResults,
201
198
  SpeechToTextTranslateStreamingVadSignals,
202
199
  )
203
200
  from .text_to_speech_streaming import TextToSpeechStreamingSendCompletionEvent
@@ -300,7 +297,6 @@ __all__ = [
300
297
  "PingSignal",
301
298
  "PingSignalParams",
302
299
  "ReasoningEffort",
303
- "ResponseSpeechState",
304
300
  "ResponseType",
305
301
  "Role",
306
302
  "SarvamAI",
@@ -326,7 +322,6 @@ __all__ = [
326
322
  "SpeechToTextStreamingLanguageCode",
327
323
  "SpeechToTextStreamingResponse",
328
324
  "SpeechToTextStreamingResponseParams",
329
- "SpeechToTextStreamingStreamOngoingSpeechResults",
330
325
  "SpeechToTextStreamingVadSignals",
331
326
  "SpeechToTextTranscriptionData",
332
327
  "SpeechToTextTranscriptionDataParams",
@@ -343,7 +338,6 @@ __all__ = [
343
338
  "SpeechToTextTranslateStreamingInputAudioCodec",
344
339
  "SpeechToTextTranslateStreamingResponse",
345
340
  "SpeechToTextTranslateStreamingResponseParams",
346
- "SpeechToTextTranslateStreamingStreamOngoingSpeechResults",
347
341
  "SpeechToTextTranslateStreamingVadSignals",
348
342
  "SpeechToTextTranslateTranscriptionData",
349
343
  "SpeechToTextTranslateTranscriptionDataParams",
@@ -23,10 +23,10 @@ class BaseClientWrapper:
23
23
 
24
24
  def get_headers(self) -> typing.Dict[str, str]:
25
25
  headers: typing.Dict[str, str] = {
26
- "User-Agent": "sarvamai/0.1.22a3",
26
+ "User-Agent": "sarvamai/0.1.22a4",
27
27
  "X-Fern-Language": "Python",
28
28
  "X-Fern-SDK-Name": "sarvamai",
29
- "X-Fern-SDK-Version": "0.1.22a3",
29
+ "X-Fern-SDK-Version": "0.1.22a4",
30
30
  **(self.get_custom_headers() or {}),
31
31
  }
32
32
  headers["api-subscription-key"] = self.api_subscription_key
@@ -3,7 +3,6 @@
3
3
  import typing
4
4
 
5
5
  import typing_extensions
6
- from ..types.response_speech_state import ResponseSpeechState
7
6
  from .transcription_metrics import TranscriptionMetricsParams
8
7
 
9
8
 
@@ -33,9 +32,4 @@ class SpeechToTextTranscriptionDataParams(typing_extensions.TypedDict):
33
32
  BCP-47 code of detected language
34
33
  """
35
34
 
36
- response_speech_state: typing_extensions.NotRequired[ResponseSpeechState]
37
- """
38
- Current state of speech detection and processing
39
- """
40
-
41
35
  metrics: TranscriptionMetricsParams
@@ -1,7 +1,6 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
3
  import typing_extensions
4
- from ..types.response_speech_state import ResponseSpeechState
5
4
  from .transcription_metrics import TranscriptionMetricsParams
6
5
 
7
6
 
@@ -21,9 +20,4 @@ class SpeechToTextTranslateTranscriptionDataParams(typing_extensions.TypedDict):
21
20
  BCP-47 code of detected source language (null when language detection is in progress)
22
21
  """
23
22
 
24
- response_speech_state: typing_extensions.NotRequired[ResponseSpeechState]
25
- """
26
- Current state of speech detection and processing
27
- """
28
-
29
23
  metrics: TranscriptionMetricsParams
@@ -146,9 +146,58 @@ class AsyncSpeechToTextJob:
146
146
  "output_file": detail.outputs[0].file_name,
147
147
  }
148
148
  for detail in (job_status.job_details or [])
149
- if detail.inputs and detail.outputs
149
+ if detail.inputs and detail.outputs and detail.state == "Success"
150
150
  ]
151
151
 
152
+ async def get_file_results(
153
+ self,
154
+ ) -> typing.Dict[str, typing.List[typing.Dict[str, typing.Any]]]:
155
+ """
156
+ Get detailed results for each file in the batch job.
157
+
158
+ Returns
159
+ -------
160
+ Dict[str, List[Dict[str, Any]]]
161
+ Dictionary with 'successful' and 'failed' keys, each containing a list of file details.
162
+ Each file detail includes:
163
+ - 'file_name': Name of the input file
164
+ - 'status': Status of processing ('Success' or 'Failed')
165
+ - 'error_message': Error message if failed (None if successful)
166
+ - 'output_file': Name of output file if successful (None if failed)
167
+ """
168
+ job_status = await self.get_status()
169
+ results: typing.Dict[str, typing.List[typing.Dict[str, typing.Any]]] = {
170
+ "successful": [],
171
+ "failed": [],
172
+ }
173
+
174
+ for detail in job_status.job_details or []:
175
+ # Check for empty lists explicitly
176
+ if not detail.inputs or len(detail.inputs) == 0:
177
+ continue
178
+
179
+ try:
180
+ file_info = {
181
+ "file_name": detail.inputs[0].file_name,
182
+ "status": detail.state,
183
+ "error_message": detail.error_message,
184
+ "output_file": (
185
+ detail.outputs[0].file_name
186
+ if detail.outputs and len(detail.outputs) > 0
187
+ else None
188
+ ),
189
+ }
190
+
191
+ if detail.state == "Success":
192
+ results["successful"].append(file_info)
193
+ else:
194
+ results["failed"].append(file_info)
195
+ except (IndexError, AttributeError):
196
+ # Skip malformed job details
197
+ continue
198
+
199
+ return results
200
+
152
201
  async def download_outputs(self, output_dir: str) -> bool:
153
202
  """
154
203
  Download output files to the specified directory.
@@ -387,9 +436,58 @@ class SpeechToTextJob:
387
436
  "output_file": detail.outputs[0].file_name,
388
437
  }
389
438
  for detail in (job_status.job_details or [])
390
- if detail.inputs and detail.outputs
439
+ if detail.inputs and detail.outputs and detail.state == "Success"
391
440
  ]
392
441
 
442
+ def get_file_results(
443
+ self,
444
+ ) -> typing.Dict[str, typing.List[typing.Dict[str, typing.Any]]]:
445
+ """
446
+ Get detailed results for each file in the batch job.
447
+
448
+ Returns
449
+ -------
450
+ Dict[str, List[Dict[str, Any]]]
451
+ Dictionary with 'successful' and 'failed' keys, each containing a list of file details.
452
+ Each file detail includes:
453
+ - 'file_name': Name of the input file
454
+ - 'status': Status of processing ('Success' or 'Failed')
455
+ - 'error_message': Error message if failed (None if successful)
456
+ - 'output_file': Name of output file if successful (None if failed)
457
+ """
458
+ job_status = self.get_status()
459
+ results: typing.Dict[str, typing.List[typing.Dict[str, typing.Any]]] = {
460
+ "successful": [],
461
+ "failed": [],
462
+ }
463
+
464
+ for detail in job_status.job_details or []:
465
+ # Check for empty lists explicitly
466
+ if not detail.inputs or len(detail.inputs) == 0:
467
+ continue
468
+
469
+ try:
470
+ file_info = {
471
+ "file_name": detail.inputs[0].file_name,
472
+ "status": detail.state,
473
+ "error_message": detail.error_message,
474
+ "output_file": (
475
+ detail.outputs[0].file_name
476
+ if detail.outputs and len(detail.outputs) > 0
477
+ else None
478
+ ),
479
+ }
480
+
481
+ if detail.state == "Success":
482
+ results["successful"].append(file_info)
483
+ else:
484
+ results["failed"].append(file_info)
485
+ except (IndexError, AttributeError):
486
+ # Skip malformed job details
487
+ continue
488
+
489
+ return results
490
+
393
491
  def download_outputs(self, output_dir: str) -> bool:
394
492
  """
395
493
  Download output files to the specified directory.
@@ -39,7 +39,7 @@ class RawSpeechToTextJobClient:
39
39
  request_options: typing.Optional[RequestOptions] = None,
40
40
  ) -> HttpResponse[BulkJobInitResponseV1]:
41
41
  """
42
- Get a job uuid, and storage folder details for speech to text bulk job v1
42
+ Create a new speech to text bulk job and receive a job UUID and storage folder details for processing multiple audio files
43
43
 
44
44
  Parameters
45
45
  ----------
@@ -160,7 +160,9 @@ class RawSpeechToTextJobClient:
160
160
  self, job_id: str, *, request_options: typing.Optional[RequestOptions] = None
161
161
  ) -> HttpResponse[JobStatusV1Response]:
162
162
  """
163
- Get the status of a speech to text bulk job V1
163
+ Retrieve the current status and details of a speech to text bulk job, including progress and file-level information.
164
+
165
+ **Rate Limiting Best Practice:** To prevent rate limit errors and ensure optimal server performance, we recommend implementing a minimum 5-millisecond delay between consecutive status polling requests. This helps maintain system stability while still providing timely status updates.
164
166
 
165
167
  Parameters
166
168
  ----------
@@ -270,7 +272,7 @@ class RawSpeechToTextJobClient:
270
272
  request_options: typing.Optional[RequestOptions] = None,
271
273
  ) -> HttpResponse[JobStatusV1Response]:
272
274
  """
273
- Start a speech to text bulk job V1
275
+ Start processing a speech to text bulk job after all audio files have been uploaded
274
276
 
275
277
  Parameters
276
278
  ----------
@@ -381,7 +383,7 @@ class RawSpeechToTextJobClient:
381
383
  self, *, job_id: str, files: typing.Sequence[str], request_options: typing.Optional[RequestOptions] = None
382
384
  ) -> HttpResponse[FilesUploadResponse]:
383
385
  """
384
- Start a speech to text bulk job V1
386
+ Generate presigned upload URLs for audio files that will be processed in a speech to text bulk job
385
387
 
386
388
  Parameters
387
389
  ----------
@@ -496,7 +498,7 @@ class RawSpeechToTextJobClient:
496
498
  self, *, job_id: str, files: typing.Sequence[str], request_options: typing.Optional[RequestOptions] = None
497
499
  ) -> HttpResponse[FilesDownloadResponse]:
498
500
  """
499
- Start a speech to text bulk job V1
501
+ Generate presigned download URLs for the transcription output files of a completed speech to text bulk job
500
502
 
501
503
  Parameters
502
504
  ----------
@@ -620,7 +622,7 @@ class AsyncRawSpeechToTextJobClient:
620
622
  request_options: typing.Optional[RequestOptions] = None,
621
623
  ) -> AsyncHttpResponse[BulkJobInitResponseV1]:
622
624
  """
623
- Get a job uuid, and storage folder details for speech to text bulk job v1
625
+ Create a new speech to text bulk job and receive a job UUID and storage folder details for processing multiple audio files
624
626
 
625
627
  Parameters
626
628
  ----------
@@ -741,7 +743,9 @@ class AsyncRawSpeechToTextJobClient:
741
743
  self, job_id: str, *, request_options: typing.Optional[RequestOptions] = None
742
744
  ) -> AsyncHttpResponse[JobStatusV1Response]:
743
745
  """
744
- Get the status of a speech to text bulk job V1
746
+ Retrieve the current status and details of a speech to text bulk job, including progress and file-level information.
747
+
748
+ **Rate Limiting Best Practice:** To prevent rate limit errors and ensure optimal server performance, we recommend implementing a minimum 5-millisecond delay between consecutive status polling requests. This helps maintain system stability while still providing timely status updates.
745
749
 
746
750
  Parameters
747
751
  ----------
@@ -851,7 +855,7 @@ class AsyncRawSpeechToTextJobClient:
851
855
  request_options: typing.Optional[RequestOptions] = None,
852
856
  ) -> AsyncHttpResponse[JobStatusV1Response]:
853
857
  """
854
- Start a speech to text bulk job V1
858
+ Start processing a speech to text bulk job after all audio files have been uploaded
855
859
 
856
860
  Parameters
857
861
  ----------
@@ -962,7 +966,7 @@ class AsyncRawSpeechToTextJobClient:
962
966
  self, *, job_id: str, files: typing.Sequence[str], request_options: typing.Optional[RequestOptions] = None
963
967
  ) -> AsyncHttpResponse[FilesUploadResponse]:
964
968
  """
965
- Start a speech to text bulk job V1
969
+ Generate presigned upload URLs for audio files that will be processed in a speech to text bulk job
966
970
 
967
971
  Parameters
968
972
  ----------
@@ -1077,7 +1081,7 @@ class AsyncRawSpeechToTextJobClient:
1077
1081
  self, *, job_id: str, files: typing.Sequence[str], request_options: typing.Optional[RequestOptions] = None
1078
1082
  ) -> AsyncHttpResponse[FilesDownloadResponse]:
1079
1083
  """
1080
- Start a speech to text bulk job V1
1084
+ Generate presigned download URLs for the transcription output files of a completed speech to text bulk job
1081
1085
 
1082
1086
  Parameters
1083
1087
  ----------
@@ -7,7 +7,6 @@ from .types import (
7
7
  SpeechToTextStreamingHighVadSensitivity,
8
8
  SpeechToTextStreamingInputAudioCodec,
9
9
  SpeechToTextStreamingLanguageCode,
10
- SpeechToTextStreamingStreamOngoingSpeechResults,
11
10
  SpeechToTextStreamingVadSignals,
12
11
  )
13
12
 
@@ -16,6 +15,5 @@ __all__ = [
16
15
  "SpeechToTextStreamingHighVadSensitivity",
17
16
  "SpeechToTextStreamingInputAudioCodec",
18
17
  "SpeechToTextStreamingLanguageCode",
19
- "SpeechToTextStreamingStreamOngoingSpeechResults",
20
18
  "SpeechToTextStreamingVadSignals",
21
19
  ]
@@ -15,9 +15,6 @@ from .types.speech_to_text_streaming_flush_signal import SpeechToTextStreamingFl
15
15
  from .types.speech_to_text_streaming_high_vad_sensitivity import SpeechToTextStreamingHighVadSensitivity
16
16
  from .types.speech_to_text_streaming_input_audio_codec import SpeechToTextStreamingInputAudioCodec
17
17
  from .types.speech_to_text_streaming_language_code import SpeechToTextStreamingLanguageCode
18
- from .types.speech_to_text_streaming_stream_ongoing_speech_results import (
19
- SpeechToTextStreamingStreamOngoingSpeechResults,
20
- )
21
18
  from .types.speech_to_text_streaming_vad_signals import SpeechToTextStreamingVadSignals
22
19
 
23
20
  try:
@@ -52,8 +49,6 @@ class SpeechToTextStreamingClient:
52
49
  high_vad_sensitivity: typing.Optional[SpeechToTextStreamingHighVadSensitivity] = None,
53
50
  vad_signals: typing.Optional[SpeechToTextStreamingVadSignals] = None,
54
51
  flush_signal: typing.Optional[SpeechToTextStreamingFlushSignal] = None,
55
- stream_ongoing_speech_results: typing.Optional[SpeechToTextStreamingStreamOngoingSpeechResults] = None,
56
- streaming_ongoing_requests_frame_size: typing.Optional[str] = None,
57
52
  api_subscription_key: typing.Optional[str] = None,
58
53
  request_options: typing.Optional[RequestOptions] = None,
59
54
  ) -> typing.Iterator[SpeechToTextStreamingSocketClient]:
@@ -87,12 +82,6 @@ class SpeechToTextStreamingClient:
87
82
  flush_signal : typing.Optional[SpeechToTextStreamingFlushSignal]
88
83
  Signal to flush the audio buffer and finalize transcription
89
84
 
90
- stream_ongoing_speech_results : typing.Optional[SpeechToTextStreamingStreamOngoingSpeechResults]
91
- Enable streaming of ongoing speech results during active speech
92
-
93
- streaming_ongoing_requests_frame_size : typing.Optional[str]
94
- Frame size for streaming ongoing speech results (1-100)
95
-
96
85
  api_subscription_key : typing.Optional[str]
97
86
  API subscription key for authentication
98
87
 
@@ -119,12 +108,6 @@ class SpeechToTextStreamingClient:
119
108
  query_params = query_params.add("vad_signals", vad_signals)
120
109
  if flush_signal is not None:
121
110
  query_params = query_params.add("flush_signal", flush_signal)
122
- if stream_ongoing_speech_results is not None:
123
- query_params = query_params.add("stream_ongoing_speech_results", stream_ongoing_speech_results)
124
- if streaming_ongoing_requests_frame_size is not None:
125
- query_params = query_params.add(
126
- "streaming_ongoing_requests_frame_size", streaming_ongoing_requests_frame_size
127
- )
128
111
  ws_url = ws_url + f"?{query_params}"
129
112
  headers = self._raw_client._client_wrapper.get_headers()
130
113
  if api_subscription_key is not None:
@@ -175,8 +158,6 @@ class AsyncSpeechToTextStreamingClient:
175
158
  high_vad_sensitivity: typing.Optional[SpeechToTextStreamingHighVadSensitivity] = None,
176
159
  vad_signals: typing.Optional[SpeechToTextStreamingVadSignals] = None,
177
160
  flush_signal: typing.Optional[SpeechToTextStreamingFlushSignal] = None,
178
- stream_ongoing_speech_results: typing.Optional[SpeechToTextStreamingStreamOngoingSpeechResults] = None,
179
- streaming_ongoing_requests_frame_size: typing.Optional[str] = None,
180
161
  api_subscription_key: typing.Optional[str] = None,
181
162
  request_options: typing.Optional[RequestOptions] = None,
182
163
  ) -> typing.AsyncIterator[AsyncSpeechToTextStreamingSocketClient]:
@@ -210,12 +191,6 @@ class AsyncSpeechToTextStreamingClient:
210
191
  flush_signal : typing.Optional[SpeechToTextStreamingFlushSignal]
211
192
  Signal to flush the audio buffer and finalize transcription
212
193
 
213
- stream_ongoing_speech_results : typing.Optional[SpeechToTextStreamingStreamOngoingSpeechResults]
214
- Enable streaming of ongoing speech results during active speech
215
-
216
- streaming_ongoing_requests_frame_size : typing.Optional[str]
217
- Frame size for streaming ongoing speech results (1-100)
218
-
219
194
  api_subscription_key : typing.Optional[str]
220
195
  API subscription key for authentication
221
196
 
@@ -242,12 +217,6 @@ class AsyncSpeechToTextStreamingClient:
242
217
  query_params = query_params.add("vad_signals", vad_signals)
243
218
  if flush_signal is not None:
244
219
  query_params = query_params.add("flush_signal", flush_signal)
245
- if stream_ongoing_speech_results is not None:
246
- query_params = query_params.add("stream_ongoing_speech_results", stream_ongoing_speech_results)
247
- if streaming_ongoing_requests_frame_size is not None:
248
- query_params = query_params.add(
249
- "streaming_ongoing_requests_frame_size", streaming_ongoing_requests_frame_size
250
- )
251
220
  ws_url = ws_url + f"?{query_params}"
252
221
  headers = self._raw_client._client_wrapper.get_headers()
253
222
  if api_subscription_key is not None:
@@ -14,9 +14,6 @@ from .types.speech_to_text_streaming_flush_signal import SpeechToTextStreamingFl
14
14
  from .types.speech_to_text_streaming_high_vad_sensitivity import SpeechToTextStreamingHighVadSensitivity
15
15
  from .types.speech_to_text_streaming_input_audio_codec import SpeechToTextStreamingInputAudioCodec
16
16
  from .types.speech_to_text_streaming_language_code import SpeechToTextStreamingLanguageCode
17
- from .types.speech_to_text_streaming_stream_ongoing_speech_results import (
18
- SpeechToTextStreamingStreamOngoingSpeechResults,
19
- )
20
17
  from .types.speech_to_text_streaming_vad_signals import SpeechToTextStreamingVadSignals
21
18
 
22
19
  try:
@@ -40,8 +37,6 @@ class RawSpeechToTextStreamingClient:
40
37
  high_vad_sensitivity: typing.Optional[SpeechToTextStreamingHighVadSensitivity] = None,
41
38
  vad_signals: typing.Optional[SpeechToTextStreamingVadSignals] = None,
42
39
  flush_signal: typing.Optional[SpeechToTextStreamingFlushSignal] = None,
43
- stream_ongoing_speech_results: typing.Optional[SpeechToTextStreamingStreamOngoingSpeechResults] = None,
44
- streaming_ongoing_requests_frame_size: typing.Optional[str] = None,
45
40
  api_subscription_key: typing.Optional[str] = None,
46
41
  request_options: typing.Optional[RequestOptions] = None,
47
42
  ) -> typing.Iterator[SpeechToTextStreamingSocketClient]:
@@ -75,12 +70,6 @@ class RawSpeechToTextStreamingClient:
75
70
  flush_signal : typing.Optional[SpeechToTextStreamingFlushSignal]
76
71
  Signal to flush the audio buffer and finalize transcription
77
72
 
78
- stream_ongoing_speech_results : typing.Optional[SpeechToTextStreamingStreamOngoingSpeechResults]
79
- Enable streaming of ongoing speech results during active speech
80
-
81
- streaming_ongoing_requests_frame_size : typing.Optional[str]
82
- Frame size for streaming ongoing speech results (1-100)
83
-
84
73
  api_subscription_key : typing.Optional[str]
85
74
  API subscription key for authentication
86
75
 
@@ -107,12 +96,6 @@ class RawSpeechToTextStreamingClient:
107
96
  query_params = query_params.add("vad_signals", vad_signals)
108
97
  if flush_signal is not None:
109
98
  query_params = query_params.add("flush_signal", flush_signal)
110
- if stream_ongoing_speech_results is not None:
111
- query_params = query_params.add("stream_ongoing_speech_results", stream_ongoing_speech_results)
112
- if streaming_ongoing_requests_frame_size is not None:
113
- query_params = query_params.add(
114
- "streaming_ongoing_requests_frame_size", streaming_ongoing_requests_frame_size
115
- )
116
99
  ws_url = ws_url + f"?{query_params}"
117
100
  headers = self._client_wrapper.get_headers()
118
101
  if api_subscription_key is not None:
@@ -152,8 +135,6 @@ class AsyncRawSpeechToTextStreamingClient:
152
135
  high_vad_sensitivity: typing.Optional[SpeechToTextStreamingHighVadSensitivity] = None,
153
136
  vad_signals: typing.Optional[SpeechToTextStreamingVadSignals] = None,
154
137
  flush_signal: typing.Optional[SpeechToTextStreamingFlushSignal] = None,
155
- stream_ongoing_speech_results: typing.Optional[SpeechToTextStreamingStreamOngoingSpeechResults] = None,
156
- streaming_ongoing_requests_frame_size: typing.Optional[str] = None,
157
138
  api_subscription_key: typing.Optional[str] = None,
158
139
  request_options: typing.Optional[RequestOptions] = None,
159
140
  ) -> typing.AsyncIterator[AsyncSpeechToTextStreamingSocketClient]:
@@ -187,12 +168,6 @@ class AsyncRawSpeechToTextStreamingClient:
187
168
  flush_signal : typing.Optional[SpeechToTextStreamingFlushSignal]
188
169
  Signal to flush the audio buffer and finalize transcription
189
170
 
190
- stream_ongoing_speech_results : typing.Optional[SpeechToTextStreamingStreamOngoingSpeechResults]
191
- Enable streaming of ongoing speech results during active speech
192
-
193
- streaming_ongoing_requests_frame_size : typing.Optional[str]
194
- Frame size for streaming ongoing speech results (1-100)
195
-
196
171
  api_subscription_key : typing.Optional[str]
197
172
  API subscription key for authentication
198
173
 
@@ -219,12 +194,6 @@ class AsyncRawSpeechToTextStreamingClient:
219
194
  query_params = query_params.add("vad_signals", vad_signals)
220
195
  if flush_signal is not None:
221
196
  query_params = query_params.add("flush_signal", flush_signal)
222
- if stream_ongoing_speech_results is not None:
223
- query_params = query_params.add("stream_ongoing_speech_results", stream_ongoing_speech_results)
224
- if streaming_ongoing_requests_frame_size is not None:
225
- query_params = query_params.add(
226
- "streaming_ongoing_requests_frame_size", streaming_ongoing_requests_frame_size
227
- )
228
197
  ws_url = ws_url + f"?{query_params}"
229
198
  headers = self._client_wrapper.get_headers()
230
199
  if api_subscription_key is not None:
@@ -6,7 +6,6 @@ from .speech_to_text_streaming_flush_signal import SpeechToTextStreamingFlushSig
6
6
  from .speech_to_text_streaming_high_vad_sensitivity import SpeechToTextStreamingHighVadSensitivity
7
7
  from .speech_to_text_streaming_input_audio_codec import SpeechToTextStreamingInputAudioCodec
8
8
  from .speech_to_text_streaming_language_code import SpeechToTextStreamingLanguageCode
9
- from .speech_to_text_streaming_stream_ongoing_speech_results import SpeechToTextStreamingStreamOngoingSpeechResults
10
9
  from .speech_to_text_streaming_vad_signals import SpeechToTextStreamingVadSignals
11
10
 
12
11
  __all__ = [
@@ -14,6 +13,5 @@ __all__ = [
14
13
  "SpeechToTextStreamingHighVadSensitivity",
15
14
  "SpeechToTextStreamingInputAudioCodec",
16
15
  "SpeechToTextStreamingLanguageCode",
17
- "SpeechToTextStreamingStreamOngoingSpeechResults",
18
16
  "SpeechToTextStreamingVadSignals",
19
17
  ]
@@ -150,9 +150,58 @@ class AsyncSpeechToTextTranslateJob:
150
150
  "output_file": detail.outputs[0].file_name,
151
151
  }
152
152
  for detail in (job_status.job_details or [])
153
- if detail.inputs and detail.outputs
153
+ if detail.inputs and detail.outputs and detail.state == "Success"
154
154
  ]
155
155
 
156
+ async def get_file_results(
157
+ self,
158
+ ) -> typing.Dict[str, typing.List[typing.Dict[str, typing.Any]]]:
159
+ """
160
+ Get detailed results for each file in the batch job.
161
+
162
+ Returns
163
+ -------
164
+ Dict[str, List[Dict[str, Any]]]
165
+ Dictionary with 'successful' and 'failed' keys, each containing a list of file details.
166
+ Each file detail includes:
167
+ - 'file_name': Name of the input file
168
+ - 'status': Status of processing ('Success' or 'Failed')
169
+ - 'error_message': Error message if failed (None if successful)
170
+ - 'output_file': Name of output file if successful (None if failed)
171
+ """
172
+ job_status = await self.get_status()
173
+ results: typing.Dict[str, typing.List[typing.Dict[str, typing.Any]]] = {
174
+ "successful": [],
175
+ "failed": [],
176
+ }
177
+
178
+ for detail in job_status.job_details or []:
179
+ # Check for empty lists explicitly
180
+ if not detail.inputs or len(detail.inputs) == 0:
181
+ continue
182
+
183
+ try:
184
+ file_info = {
185
+ "file_name": detail.inputs[0].file_name,
186
+ "status": detail.state,
187
+ "error_message": detail.error_message,
188
+ "output_file": (
189
+ detail.outputs[0].file_name
190
+ if detail.outputs and len(detail.outputs) > 0
191
+ else None
192
+ ),
193
+ }
194
+
195
+ if detail.state == "Success":
196
+ results["successful"].append(file_info)
197
+ else:
198
+ results["failed"].append(file_info)
199
+ except (IndexError, AttributeError):
200
+ # Skip malformed job details
201
+ continue
202
+
203
+ return results
204
+
156
205
  async def download_outputs(self, output_dir: str) -> bool:
157
206
  """
158
207
  Download output files to the specified directory.
@@ -395,9 +444,58 @@ class SpeechToTextTranslateJob:
395
444
  "output_file": detail.outputs[0].file_name,
396
445
  }
397
446
  for detail in (job_status.job_details or [])
398
- if detail.inputs and detail.outputs
447
+ if detail.inputs and detail.outputs and detail.state == "Success"
399
448
  ]
400
449
 
450
+ def get_file_results(
451
+ self,
452
+ ) -> typing.Dict[str, typing.List[typing.Dict[str, typing.Any]]]:
453
+ """
454
+ Get detailed results for each file in the batch job.
455
+
456
+ Returns
457
+ -------
458
+ Dict[str, List[Dict[str, Any]]]
459
+ Dictionary with 'successful' and 'failed' keys, each containing a list of file details.
460
+ Each file detail includes:
461
+ - 'file_name': Name of the input file
462
+ - 'status': Status of processing ('Success' or 'Failed')
463
+ - 'error_message': Error message if failed (None if successful)
464
+ - 'output_file': Name of output file if successful (None if failed)
465
+ """
466
+ job_status = self.get_status()
467
+ results: typing.Dict[str, typing.List[typing.Dict[str, typing.Any]]] = {
468
+ "successful": [],
469
+ "failed": [],
470
+ }
471
+
472
+ for detail in job_status.job_details or []:
473
+ # Check for empty lists explicitly
474
+ if not detail.inputs or len(detail.inputs) == 0:
475
+ continue
476
+
477
+ try:
478
+ file_info = {
479
+ "file_name": detail.inputs[0].file_name,
480
+ "status": detail.state,
481
+ "error_message": detail.error_message,
482
+ "output_file": (
483
+ detail.outputs[0].file_name
484
+ if detail.outputs and len(detail.outputs) > 0
485
+ else None
486
+ ),
487
+ }
488
+
489
+ if detail.state == "Success":
490
+ results["successful"].append(file_info)
491
+ else:
492
+ results["failed"].append(file_info)
493
+ except (IndexError, AttributeError):
494
+ # Skip malformed job details
495
+ continue
496
+
497
+ return results
498
+
401
499
  def download_outputs(self, output_dir: str) -> bool:
402
500
  """
403
501
  Download output files to the specified directory.
@@ -40,7 +40,7 @@ class RawSpeechToTextTranslateJobClient:
40
40
  request_options: typing.Optional[RequestOptions] = None,
41
41
  ) -> HttpResponse[BulkJobInitResponseV1]:
42
42
  """
43
- Get a job uuid, and storage folder details for speech to text tranlsate bulk job v1
43
+ Create a new speech to text translate bulk job and receive a job UUID and storage folder details for processing multiple audio files with translation
44
44
 
45
45
  Parameters
46
46
  ----------
@@ -166,7 +166,9 @@ class RawSpeechToTextTranslateJobClient:
166
166
  self, job_id: str, *, request_options: typing.Optional[RequestOptions] = None
167
167
  ) -> HttpResponse[JobStatusV1Response]:
168
168
  """
169
- Get the status of a speech to text translate bulk job V1
169
+ Retrieve the current status and details of a speech to text translate bulk job, including progress and file-level information.
170
+
171
+ **Rate Limiting Best Practice:** To prevent rate limit errors and ensure optimal server performance, we recommend implementing a minimum 5-millisecond delay between consecutive status polling requests. This helps maintain system stability while still providing timely status updates.
170
172
 
171
173
  Parameters
172
174
  ----------
@@ -276,7 +278,7 @@ class RawSpeechToTextTranslateJobClient:
276
278
  request_options: typing.Optional[RequestOptions] = None,
277
279
  ) -> HttpResponse[JobStatusV1Response]:
278
280
  """
279
- Start a speech to text translate bulk job V1
281
+ Start processing a speech to text translate bulk job after all audio files have been uploaded
280
282
 
281
283
  Parameters
282
284
  ----------
@@ -392,7 +394,7 @@ class RawSpeechToTextTranslateJobClient:
392
394
  request_options: typing.Optional[RequestOptions] = None,
393
395
  ) -> HttpResponse[FilesUploadResponse]:
394
396
  """
395
- Start a speech to text bulk job V1
397
+ Generate presigned upload URLs for audio files that will be processed in a speech to text translate bulk job
396
398
 
397
399
  Parameters
398
400
  ----------
@@ -517,7 +519,7 @@ class RawSpeechToTextTranslateJobClient:
517
519
  request_options: typing.Optional[RequestOptions] = None,
518
520
  ) -> HttpResponse[FilesDownloadResponse]:
519
521
  """
520
- Start a speech to text bulk job V1
522
+ Generate presigned download URLs for the translated transcription output files of a completed speech to text translate bulk job
521
523
 
522
524
  Parameters
523
525
  ----------
@@ -647,7 +649,7 @@ class AsyncRawSpeechToTextTranslateJobClient:
647
649
  request_options: typing.Optional[RequestOptions] = None,
648
650
  ) -> AsyncHttpResponse[BulkJobInitResponseV1]:
649
651
  """
650
- Get a job uuid, and storage folder details for speech to text tranlsate bulk job v1
652
+ Create a new speech to text translate bulk job and receive a job UUID and storage folder details for processing multiple audio files with translation
651
653
 
652
654
  Parameters
653
655
  ----------
@@ -773,7 +775,9 @@ class AsyncRawSpeechToTextTranslateJobClient:
773
775
  self, job_id: str, *, request_options: typing.Optional[RequestOptions] = None
774
776
  ) -> AsyncHttpResponse[JobStatusV1Response]:
775
777
  """
776
- Get the status of a speech to text translate bulk job V1
778
+ Retrieve the current status and details of a speech to text translate bulk job, including progress and file-level information.
779
+
780
+ **Rate Limiting Best Practice:** To prevent rate limit errors and ensure optimal server performance, we recommend implementing a minimum 5-millisecond delay between consecutive status polling requests. This helps maintain system stability while still providing timely status updates.
777
781
 
778
782
  Parameters
779
783
  ----------
@@ -883,7 +887,7 @@ class AsyncRawSpeechToTextTranslateJobClient:
883
887
  request_options: typing.Optional[RequestOptions] = None,
884
888
  ) -> AsyncHttpResponse[JobStatusV1Response]:
885
889
  """
886
- Start a speech to text translate bulk job V1
890
+ Start processing a speech to text translate bulk job after all audio files have been uploaded
887
891
 
888
892
  Parameters
889
893
  ----------
@@ -999,7 +1003,7 @@ class AsyncRawSpeechToTextTranslateJobClient:
999
1003
  request_options: typing.Optional[RequestOptions] = None,
1000
1004
  ) -> AsyncHttpResponse[FilesUploadResponse]:
1001
1005
  """
1002
- Start a speech to text bulk job V1
1006
+ Generate presigned upload URLs for audio files that will be processed in a speech to text translate bulk job
1003
1007
 
1004
1008
  Parameters
1005
1009
  ----------
@@ -1124,7 +1128,7 @@ class AsyncRawSpeechToTextTranslateJobClient:
1124
1128
  request_options: typing.Optional[RequestOptions] = None,
1125
1129
  ) -> AsyncHttpResponse[FilesDownloadResponse]:
1126
1130
  """
1127
- Start a speech to text bulk job V1
1131
+ Generate presigned download URLs for the translated transcription output files of a completed speech to text translate bulk job
1128
1132
 
1129
1133
  Parameters
1130
1134
  ----------
@@ -6,7 +6,6 @@ from .types import (
6
6
  SpeechToTextTranslateStreamingFlushSignal,
7
7
  SpeechToTextTranslateStreamingHighVadSensitivity,
8
8
  SpeechToTextTranslateStreamingInputAudioCodec,
9
- SpeechToTextTranslateStreamingStreamOngoingSpeechResults,
10
9
  SpeechToTextTranslateStreamingVadSignals,
11
10
  )
12
11
 
@@ -14,6 +13,5 @@ __all__ = [
14
13
  "SpeechToTextTranslateStreamingFlushSignal",
15
14
  "SpeechToTextTranslateStreamingHighVadSensitivity",
16
15
  "SpeechToTextTranslateStreamingInputAudioCodec",
17
- "SpeechToTextTranslateStreamingStreamOngoingSpeechResults",
18
16
  "SpeechToTextTranslateStreamingVadSignals",
19
17
  ]
@@ -16,9 +16,6 @@ from .types.speech_to_text_translate_streaming_high_vad_sensitivity import (
16
16
  SpeechToTextTranslateStreamingHighVadSensitivity,
17
17
  )
18
18
  from .types.speech_to_text_translate_streaming_input_audio_codec import SpeechToTextTranslateStreamingInputAudioCodec
19
- from .types.speech_to_text_translate_streaming_stream_ongoing_speech_results import (
20
- SpeechToTextTranslateStreamingStreamOngoingSpeechResults,
21
- )
22
19
  from .types.speech_to_text_translate_streaming_vad_signals import SpeechToTextTranslateStreamingVadSignals
23
20
 
24
21
  try:
@@ -52,8 +49,6 @@ class SpeechToTextTranslateStreamingClient:
52
49
  high_vad_sensitivity: typing.Optional[SpeechToTextTranslateStreamingHighVadSensitivity] = None,
53
50
  vad_signals: typing.Optional[SpeechToTextTranslateStreamingVadSignals] = None,
54
51
  flush_signal: typing.Optional[SpeechToTextTranslateStreamingFlushSignal] = None,
55
- stream_ongoing_speech_results: typing.Optional[SpeechToTextTranslateStreamingStreamOngoingSpeechResults] = None,
56
- streaming_ongoing_requests_frame_size: typing.Optional[str] = None,
57
52
  api_subscription_key: typing.Optional[str] = None,
58
53
  request_options: typing.Optional[RequestOptions] = None,
59
54
  ) -> typing.Iterator[SpeechToTextTranslateStreamingSocketClient]:
@@ -84,12 +79,6 @@ class SpeechToTextTranslateStreamingClient:
84
79
  flush_signal : typing.Optional[SpeechToTextTranslateStreamingFlushSignal]
85
80
  Signal to flush the audio buffer and finalize transcription and translation
86
81
 
87
- stream_ongoing_speech_results : typing.Optional[SpeechToTextTranslateStreamingStreamOngoingSpeechResults]
88
- Enable streaming of ongoing speech results during active speech
89
-
90
- streaming_ongoing_requests_frame_size : typing.Optional[str]
91
- Frame size for streaming ongoing speech results (1-100)
92
-
93
82
  api_subscription_key : typing.Optional[str]
94
83
  API subscription key for authentication
95
84
 
@@ -114,12 +103,6 @@ class SpeechToTextTranslateStreamingClient:
114
103
  query_params = query_params.add("vad_signals", vad_signals)
115
104
  if flush_signal is not None:
116
105
  query_params = query_params.add("flush_signal", flush_signal)
117
- if stream_ongoing_speech_results is not None:
118
- query_params = query_params.add("stream_ongoing_speech_results", stream_ongoing_speech_results)
119
- if streaming_ongoing_requests_frame_size is not None:
120
- query_params = query_params.add(
121
- "streaming_ongoing_requests_frame_size", streaming_ongoing_requests_frame_size
122
- )
123
106
  ws_url = ws_url + f"?{query_params}"
124
107
  headers = self._raw_client._client_wrapper.get_headers()
125
108
  if api_subscription_key is not None:
@@ -169,8 +152,6 @@ class AsyncSpeechToTextTranslateStreamingClient:
169
152
  high_vad_sensitivity: typing.Optional[SpeechToTextTranslateStreamingHighVadSensitivity] = None,
170
153
  vad_signals: typing.Optional[SpeechToTextTranslateStreamingVadSignals] = None,
171
154
  flush_signal: typing.Optional[SpeechToTextTranslateStreamingFlushSignal] = None,
172
- stream_ongoing_speech_results: typing.Optional[SpeechToTextTranslateStreamingStreamOngoingSpeechResults] = None,
173
- streaming_ongoing_requests_frame_size: typing.Optional[str] = None,
174
155
  api_subscription_key: typing.Optional[str] = None,
175
156
  request_options: typing.Optional[RequestOptions] = None,
176
157
  ) -> typing.AsyncIterator[AsyncSpeechToTextTranslateStreamingSocketClient]:
@@ -201,12 +182,6 @@ class AsyncSpeechToTextTranslateStreamingClient:
201
182
  flush_signal : typing.Optional[SpeechToTextTranslateStreamingFlushSignal]
202
183
  Signal to flush the audio buffer and finalize transcription and translation
203
184
 
204
- stream_ongoing_speech_results : typing.Optional[SpeechToTextTranslateStreamingStreamOngoingSpeechResults]
205
- Enable streaming of ongoing speech results during active speech
206
-
207
- streaming_ongoing_requests_frame_size : typing.Optional[str]
208
- Frame size for streaming ongoing speech results (1-100)
209
-
210
185
  api_subscription_key : typing.Optional[str]
211
186
  API subscription key for authentication
212
187
 
@@ -231,12 +206,6 @@ class AsyncSpeechToTextTranslateStreamingClient:
231
206
  query_params = query_params.add("vad_signals", vad_signals)
232
207
  if flush_signal is not None:
233
208
  query_params = query_params.add("flush_signal", flush_signal)
234
- if stream_ongoing_speech_results is not None:
235
- query_params = query_params.add("stream_ongoing_speech_results", stream_ongoing_speech_results)
236
- if streaming_ongoing_requests_frame_size is not None:
237
- query_params = query_params.add(
238
- "streaming_ongoing_requests_frame_size", streaming_ongoing_requests_frame_size
239
- )
240
209
  ws_url = ws_url + f"?{query_params}"
241
210
  headers = self._raw_client._client_wrapper.get_headers()
242
211
  if api_subscription_key is not None:
@@ -15,9 +15,6 @@ from .types.speech_to_text_translate_streaming_high_vad_sensitivity import (
15
15
  SpeechToTextTranslateStreamingHighVadSensitivity,
16
16
  )
17
17
  from .types.speech_to_text_translate_streaming_input_audio_codec import SpeechToTextTranslateStreamingInputAudioCodec
18
- from .types.speech_to_text_translate_streaming_stream_ongoing_speech_results import (
19
- SpeechToTextTranslateStreamingStreamOngoingSpeechResults,
20
- )
21
18
  from .types.speech_to_text_translate_streaming_vad_signals import SpeechToTextTranslateStreamingVadSignals
22
19
 
23
20
  try:
@@ -40,8 +37,6 @@ class RawSpeechToTextTranslateStreamingClient:
40
37
  high_vad_sensitivity: typing.Optional[SpeechToTextTranslateStreamingHighVadSensitivity] = None,
41
38
  vad_signals: typing.Optional[SpeechToTextTranslateStreamingVadSignals] = None,
42
39
  flush_signal: typing.Optional[SpeechToTextTranslateStreamingFlushSignal] = None,
43
- stream_ongoing_speech_results: typing.Optional[SpeechToTextTranslateStreamingStreamOngoingSpeechResults] = None,
44
- streaming_ongoing_requests_frame_size: typing.Optional[str] = None,
45
40
  api_subscription_key: typing.Optional[str] = None,
46
41
  request_options: typing.Optional[RequestOptions] = None,
47
42
  ) -> typing.Iterator[SpeechToTextTranslateStreamingSocketClient]:
@@ -72,12 +67,6 @@ class RawSpeechToTextTranslateStreamingClient:
72
67
  flush_signal : typing.Optional[SpeechToTextTranslateStreamingFlushSignal]
73
68
  Signal to flush the audio buffer and finalize transcription and translation
74
69
 
75
- stream_ongoing_speech_results : typing.Optional[SpeechToTextTranslateStreamingStreamOngoingSpeechResults]
76
- Enable streaming of ongoing speech results during active speech
77
-
78
- streaming_ongoing_requests_frame_size : typing.Optional[str]
79
- Frame size for streaming ongoing speech results (1-100)
80
-
81
70
  api_subscription_key : typing.Optional[str]
82
71
  API subscription key for authentication
83
72
 
@@ -102,12 +91,6 @@ class RawSpeechToTextTranslateStreamingClient:
102
91
  query_params = query_params.add("vad_signals", vad_signals)
103
92
  if flush_signal is not None:
104
93
  query_params = query_params.add("flush_signal", flush_signal)
105
- if stream_ongoing_speech_results is not None:
106
- query_params = query_params.add("stream_ongoing_speech_results", stream_ongoing_speech_results)
107
- if streaming_ongoing_requests_frame_size is not None:
108
- query_params = query_params.add(
109
- "streaming_ongoing_requests_frame_size", streaming_ongoing_requests_frame_size
110
- )
111
94
  ws_url = ws_url + f"?{query_params}"
112
95
  headers = self._client_wrapper.get_headers()
113
96
  if api_subscription_key is not None:
@@ -146,8 +129,6 @@ class AsyncRawSpeechToTextTranslateStreamingClient:
146
129
  high_vad_sensitivity: typing.Optional[SpeechToTextTranslateStreamingHighVadSensitivity] = None,
147
130
  vad_signals: typing.Optional[SpeechToTextTranslateStreamingVadSignals] = None,
148
131
  flush_signal: typing.Optional[SpeechToTextTranslateStreamingFlushSignal] = None,
149
- stream_ongoing_speech_results: typing.Optional[SpeechToTextTranslateStreamingStreamOngoingSpeechResults] = None,
150
- streaming_ongoing_requests_frame_size: typing.Optional[str] = None,
151
132
  api_subscription_key: typing.Optional[str] = None,
152
133
  request_options: typing.Optional[RequestOptions] = None,
153
134
  ) -> typing.AsyncIterator[AsyncSpeechToTextTranslateStreamingSocketClient]:
@@ -178,12 +159,6 @@ class AsyncRawSpeechToTextTranslateStreamingClient:
178
159
  flush_signal : typing.Optional[SpeechToTextTranslateStreamingFlushSignal]
179
160
  Signal to flush the audio buffer and finalize transcription and translation
180
161
 
181
- stream_ongoing_speech_results : typing.Optional[SpeechToTextTranslateStreamingStreamOngoingSpeechResults]
182
- Enable streaming of ongoing speech results during active speech
183
-
184
- streaming_ongoing_requests_frame_size : typing.Optional[str]
185
- Frame size for streaming ongoing speech results (1-100)
186
-
187
162
  api_subscription_key : typing.Optional[str]
188
163
  API subscription key for authentication
189
164
 
@@ -208,12 +183,6 @@ class AsyncRawSpeechToTextTranslateStreamingClient:
208
183
  query_params = query_params.add("vad_signals", vad_signals)
209
184
  if flush_signal is not None:
210
185
  query_params = query_params.add("flush_signal", flush_signal)
211
- if stream_ongoing_speech_results is not None:
212
- query_params = query_params.add("stream_ongoing_speech_results", stream_ongoing_speech_results)
213
- if streaming_ongoing_requests_frame_size is not None:
214
- query_params = query_params.add(
215
- "streaming_ongoing_requests_frame_size", streaming_ongoing_requests_frame_size
216
- )
217
186
  ws_url = ws_url + f"?{query_params}"
218
187
  headers = self._client_wrapper.get_headers()
219
188
  if api_subscription_key is not None:
@@ -5,15 +5,11 @@
5
5
  from .speech_to_text_translate_streaming_flush_signal import SpeechToTextTranslateStreamingFlushSignal
6
6
  from .speech_to_text_translate_streaming_high_vad_sensitivity import SpeechToTextTranslateStreamingHighVadSensitivity
7
7
  from .speech_to_text_translate_streaming_input_audio_codec import SpeechToTextTranslateStreamingInputAudioCodec
8
- from .speech_to_text_translate_streaming_stream_ongoing_speech_results import (
9
- SpeechToTextTranslateStreamingStreamOngoingSpeechResults,
10
- )
11
8
  from .speech_to_text_translate_streaming_vad_signals import SpeechToTextTranslateStreamingVadSignals
12
9
 
13
10
  __all__ = [
14
11
  "SpeechToTextTranslateStreamingFlushSignal",
15
12
  "SpeechToTextTranslateStreamingHighVadSensitivity",
16
13
  "SpeechToTextTranslateStreamingInputAudioCodec",
17
- "SpeechToTextTranslateStreamingStreamOngoingSpeechResults",
18
14
  "SpeechToTextTranslateStreamingVadSignals",
19
15
  ]
@@ -57,7 +57,6 @@ from .language_identification_response import LanguageIdentificationResponse
57
57
  from .numerals_format import NumeralsFormat
58
58
  from .ping_signal import PingSignal
59
59
  from .reasoning_effort import ReasoningEffort
60
- from .response_speech_state import ResponseSpeechState
61
60
  from .response_type import ResponseType
62
61
  from .role import Role
63
62
  from .sarvam_model_ids import SarvamModelIds
@@ -157,7 +156,6 @@ __all__ = [
157
156
  "NumeralsFormat",
158
157
  "PingSignal",
159
158
  "ReasoningEffort",
160
- "ResponseSpeechState",
161
159
  "ResponseType",
162
160
  "Role",
163
161
  "SarvamModelIds",
@@ -1,3 +1,5 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
- CompletionEventFlag = bool
3
+ import typing
4
+
5
+ CompletionEventFlag = typing.Union[typing.Literal["true", "false"], typing.Any]
@@ -4,7 +4,6 @@ import typing
4
4
 
5
5
  import pydantic
6
6
  from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
- from .response_speech_state import ResponseSpeechState
8
7
  from .transcription_metrics import TranscriptionMetrics
9
8
 
10
9
 
@@ -34,11 +33,6 @@ class SpeechToTextTranscriptionData(UniversalBaseModel):
34
33
  BCP-47 code of detected language
35
34
  """
36
35
 
37
- response_speech_state: typing.Optional[ResponseSpeechState] = pydantic.Field(default=None)
38
- """
39
- Current state of speech detection and processing
40
- """
41
-
42
36
  metrics: TranscriptionMetrics
43
37
 
44
38
  if IS_PYDANTIC_V2:
@@ -4,7 +4,6 @@ import typing
4
4
 
5
5
  import pydantic
6
6
  from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
- from .response_speech_state import ResponseSpeechState
8
7
  from .transcription_metrics import TranscriptionMetrics
9
8
 
10
9
 
@@ -24,11 +23,6 @@ class SpeechToTextTranslateTranscriptionData(UniversalBaseModel):
24
23
  BCP-47 code of detected source language (null when language detection is in progress)
25
24
  """
26
25
 
27
- response_speech_state: typing.Optional[ResponseSpeechState] = pydantic.Field(default=None)
28
- """
29
- Current state of speech detection and processing
30
- """
31
-
32
26
  metrics: TranscriptionMetrics
33
27
 
34
28
  if IS_PYDANTIC_V2:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: sarvamai
3
- Version: 0.1.22a3
3
+ Version: 0.1.22a4
4
4
  Summary:
5
5
  Requires-Python: >=3.8,<4.0
6
6
  Classifier: Intended Audience :: Developers
@@ -1,11 +1,11 @@
1
- sarvamai/__init__.py,sha256=ST9aZ-zVv6Vh_IrhELC1WSqATPg-Gm7nbrRdK6ZEN_g,11785
1
+ sarvamai/__init__.py,sha256=vI_MiA00kkNnsVNJmIFkf1o_-vLIp8SeNy4xdzjQ_wY,11499
2
2
  sarvamai/chat/__init__.py,sha256=_VhToAyIt_5axN6CLJwtxg3-CO7THa_23pbUzqhXJa4,85
3
3
  sarvamai/chat/client.py,sha256=xOSj83Gr6Q7eY2qUeATiuXYQqBqWqSCQlIEopK5fKus,11022
4
4
  sarvamai/chat/raw_client.py,sha256=A2kRuZcVWlJhyYCD7YKgqNkZEp3cYa1731KhRkhirU0,17885
5
5
  sarvamai/client.py,sha256=J30X_os1lPf8Wml0KDFEf6p8VGHhgF_lf3nw1T2D3qo,8207
6
6
  sarvamai/core/__init__.py,sha256=YE2CtXeASe1RAbaI39twKWYKCuT4tW5is9HWHhJjR_g,1653
7
7
  sarvamai/core/api_error.py,sha256=44vPoTyWN59gonCIZMdzw7M1uspygiLnr3GNFOoVL2Q,614
8
- sarvamai/core/client_wrapper.py,sha256=METOzaadK7zwtrbukCi36XZ59jVQR35bQGPUzSKmwH8,2570
8
+ sarvamai/core/client_wrapper.py,sha256=4xbcx3NSJ2H-l5jU2jidpdocEhri8wqKQnNFN1Peah0,2570
9
9
  sarvamai/core/datetime_utils.py,sha256=nBys2IsYrhPdszxGKCNRPSOCwa-5DWOHG95FB8G9PKo,1047
10
10
  sarvamai/core/events.py,sha256=HvKBdSoYcFetk7cgNXb7FxuY-FtY8NtUhZIN7mGVx8U,1159
11
11
  sarvamai/core/file.py,sha256=d4NNbX8XvXP32z8KpK2Xovv33nFfruIrpz0QWxlgpZk,2663
@@ -71,12 +71,12 @@ sarvamai/requests/speech_to_text_job_parameters.py,sha256=F3V6XvdUQlWlB4-5XAcRsF
71
71
  sarvamai/requests/speech_to_text_response.py,sha256=GS3jNmHDOxqNZ7cvftD62khUMSBIQUu6zEPdCqk8zJk,1041
72
72
  sarvamai/requests/speech_to_text_response_data.py,sha256=69fYRdL0tCKpgKQqwzcM4T4Nf_lRxJFh-VCFe_tN964,364
73
73
  sarvamai/requests/speech_to_text_streaming_response.py,sha256=cN5tKE9wOWuyBna4wmrf-0LfkOULMpRaJ7qjLuu76V0,348
74
- sarvamai/requests/speech_to_text_transcription_data.py,sha256=ZbrUy4SBEx8OoeOW1PwTbTq8171Z2z2H1Mh_ZZxrREI,1087
74
+ sarvamai/requests/speech_to_text_transcription_data.py,sha256=Vc65hXDq65d14cP-fDJm151bi7XEKgPItNGt1UL6cOY,877
75
75
  sarvamai/requests/speech_to_text_translate_job_parameters.py,sha256=Cco38i8IhX00S2eW3MhLekqUFMS7hZW2AwbpWyCAgpU,990
76
76
  sarvamai/requests/speech_to_text_translate_response.py,sha256=xLV2F37PkGR0erRDfTBEPWvywR8eVSL9JbH5a0C9wkY,893
77
77
  sarvamai/requests/speech_to_text_translate_response_data.py,sha256=OmjunP9R2xertJKn4fmpyzjDdj1_B_Yh6ZjH1eOlR-Q,407
78
78
  sarvamai/requests/speech_to_text_translate_streaming_response.py,sha256=KTjYZ0_oLapuM5Iiq7UwejMsrL1TGgFAW4k5l17TkZs,385
79
- sarvamai/requests/speech_to_text_translate_transcription_data.py,sha256=xBVSuFtYz-tNiE69OMFpfBnxbEG3ZMl7YMFDWoJyhWU,805
79
+ sarvamai/requests/speech_to_text_translate_transcription_data.py,sha256=oAmW5ihTd301IJYN2u2KrZxB0j3EMacFBfvIhtOSjFI,595
80
80
  sarvamai/requests/stop_configuration.py,sha256=Xmp8zyUpnN65pH5A7NqefckB8wk53_BBzOUrgRm2gXs,146
81
81
  sarvamai/requests/stt_flush_signal.py,sha256=Gb-SoPPAyVKFVPZKxebLgV4bAv21NjVgvfCl5cqcxrY,360
82
82
  sarvamai/requests/task_detail_v_1.py,sha256=2rehl7dSDSgzaw13b9bODamhiN2uB-IK4cOksq8Vmqc,582
@@ -91,32 +91,30 @@ sarvamai/speech_to_text/client.py,sha256=JZ-3ZenTTcJkSAIb7Hkj8zBS4r2TV_jlynl4Ljx
91
91
  sarvamai/speech_to_text/raw_client.py,sha256=oCGHyVtVBpXGRSWtAvR_r2j3tEumc9VEDjOYqMVxv5w,26677
92
92
  sarvamai/speech_to_text_job/__init__.py,sha256=_VhToAyIt_5axN6CLJwtxg3-CO7THa_23pbUzqhXJa4,85
93
93
  sarvamai/speech_to_text_job/client.py,sha256=WSGBJxYcNxl77Zd1X6VVWjg4zshqecXf6WCyhfLXVlI,18007
94
- sarvamai/speech_to_text_job/job.py,sha256=K8HOmwrYd6l82-MZfWDBmNkZeeERyg9YOihnFfvl-Js,15021
95
- sarvamai/speech_to_text_job/raw_client.py,sha256=OZTPzMhAn-ckE_xKzfZ9QLsEX5EZVOJS0Pf-PBa19jM,48200
96
- sarvamai/speech_to_text_streaming/__init__.py,sha256=6xLJUFTGQOnZEeUSV_lL9T2zpQCJ0WP_O-imYFVhrIE,643
97
- sarvamai/speech_to_text_streaming/client.py,sha256=wGTtFHGxVmpk6F9mpyzgHTcEKlTZChfbG2ytEeb9b3E,13635
98
- sarvamai/speech_to_text_streaming/raw_client.py,sha256=xlbIKnDrGggXsKubYbpbg5g7hFZ_FHGsozXT2jyxhrM,12816
94
+ sarvamai/speech_to_text_job/job.py,sha256=9AfVSp5nzrl-Cx_1n2AJZqTMzp6Dkz2cvmbdq78fCgM,18751
95
+ sarvamai/speech_to_text_job/raw_client.py,sha256=6MB82mSqAOi92mE8vUeNSTB0wuxLZYRwizt15R6r-wo,49394
96
+ sarvamai/speech_to_text_streaming/__init__.py,sha256=-7nN6AJFryjSvGHVbajYEt-vni6kNDfJUiZJFNl_ao4,535
97
+ sarvamai/speech_to_text_streaming/client.py,sha256=GU3bin0Ea6rOoX-hc9iUStPU0rh6ThezKW-r8d3NgTg,11732
98
+ sarvamai/speech_to_text_streaming/raw_client.py,sha256=lUxjNbdxfirnAcPd4mMhd0EK7c0Z1i9Fa-hHscviFmg,10913
99
99
  sarvamai/speech_to_text_streaming/socket_client.py,sha256=P6qXRN0s3UFAp6CP5lkqrW2KPK9me70ZVfWquxLB4wI,7538
100
- sarvamai/speech_to_text_streaming/types/__init__.py,sha256=cHFJXPoXFFPxkwfFIcXbESlHG9NUFWDUmLQ8bPmCgcQ,926
100
+ sarvamai/speech_to_text_streaming/types/__init__.py,sha256=_G5TSTthsnjGmwdV4fpsybjEWMMTNkh-kWXZjgK5X48,755
101
101
  sarvamai/speech_to_text_streaming/types/speech_to_text_streaming_flush_signal.py,sha256=dDJOBlzAjhuiSVqW2RHHY1f6xy0DU_Yoo9UV8-7MjnA,173
102
102
  sarvamai/speech_to_text_streaming/types/speech_to_text_streaming_high_vad_sensitivity.py,sha256=OwPwffa8TkLPGMnOTn5S7d-HmV8QmN3B7fHz8I1-VT8,180
103
103
  sarvamai/speech_to_text_streaming/types/speech_to_text_streaming_input_audio_codec.py,sha256=yfINVbnoiAvhDskwVkpnFv10Q9SzU104oRJR-n__ugc,584
104
104
  sarvamai/speech_to_text_streaming/types/speech_to_text_streaming_language_code.py,sha256=LxgEifmgWTCFZn9U-f-TWKxRPng3a2J26Zt526QrA0Y,267
105
- sarvamai/speech_to_text_streaming/types/speech_to_text_streaming_stream_ongoing_speech_results.py,sha256=_hc9wuLDsRB6aahsmHqYK0tLoLb6MFZjNo3N7enj8mI,188
106
105
  sarvamai/speech_to_text_streaming/types/speech_to_text_streaming_vad_signals.py,sha256=8wiFOB7WDMbYCcMTYgNFJaIjEytYeXpJLwr_O_mH0TI,172
107
106
  sarvamai/speech_to_text_translate_job/__init__.py,sha256=_VhToAyIt_5axN6CLJwtxg3-CO7THa_23pbUzqhXJa4,85
108
107
  sarvamai/speech_to_text_translate_job/client.py,sha256=xu8kYtCESDB7LzL8YKBUq5qhTPMIl3_H3XD2L_7y4UU,18969
109
- sarvamai/speech_to_text_translate_job/job.py,sha256=DU4k3eB28V8N16M_QEchakVng4IOul6_Qrdn3FumgHA,15208
110
- sarvamai/speech_to_text_translate_job/raw_client.py,sha256=dAitbu2B9afPK6iT9zNjUJnE5BIr5-lrAlwrfwFxdkU,49507
111
- sarvamai/speech_to_text_translate_streaming/__init__.py,sha256=KmUM04hMxqYEG6I7rDFmlCBIYgHDIG8-A9d8bKLImg4,653
112
- sarvamai/speech_to_text_translate_streaming/client.py,sha256=8Gi22KsMlhn4QKyNeH59vGGF3QpN5RwMoT2G7BjzP2E,13617
113
- sarvamai/speech_to_text_translate_streaming/raw_client.py,sha256=jrWDHke_HApWIxBeGoyFLwEwYnYtx-Mj2M5UuGp-ivk,12726
108
+ sarvamai/speech_to_text_translate_job/job.py,sha256=tL1Zemsogb_AK9wqZwN4ooPaN176sFKduTH9g87y-WU,18938
109
+ sarvamai/speech_to_text_translate_job/raw_client.py,sha256=Emx14cRiAZXg1PqZkoJbDOKwyDmOgwxWlqPkAPZ9GPU,50797
110
+ sarvamai/speech_to_text_translate_streaming/__init__.py,sha256=s6HPwrkABpkhDSsd_t6pVRiWfY4MfVE0lVj9b4V_fx4,527
111
+ sarvamai/speech_to_text_translate_streaming/client.py,sha256=jXfucb5A5uyKgbAB_tJDYCn86Myv8k8Cs-BIBqaZGHo,11659
112
+ sarvamai/speech_to_text_translate_streaming/raw_client.py,sha256=1TKO5mrqI234wGWPlcyKS-9ObLuY6XRcgYExxv1CJhY,10768
114
113
  sarvamai/speech_to_text_translate_streaming/socket_client.py,sha256=ipEPSj5eHAyDpuEXfaP7JJL1rXJXGEo-IB888ReAFKs,8901
115
- sarvamai/speech_to_text_translate_streaming/types/__init__.py,sha256=L5lAQwKmB57xee93CkgZx3DWWUNcTV-gVNCsEmmbfcs,948
114
+ sarvamai/speech_to_text_translate_streaming/types/__init__.py,sha256=nsKmvwkhcPekF9kcStDhTDilALFf2jT-wfCn25KVe7U,740
116
115
  sarvamai/speech_to_text_translate_streaming/types/speech_to_text_translate_streaming_flush_signal.py,sha256=jkjvCGJ1pFKi3AOTkwMW-lo18WGgrgAhMpoe5P0AMzA,182
117
116
  sarvamai/speech_to_text_translate_streaming/types/speech_to_text_translate_streaming_high_vad_sensitivity.py,sha256=r6MvTlkM0VEpb4dpnMHtINOZ-gYc22o0Fx_Xce2rjvo,189
118
117
  sarvamai/speech_to_text_translate_streaming/types/speech_to_text_translate_streaming_input_audio_codec.py,sha256=IwO6IF9BQU5MpGWs7m-w0dV8yp__7s4y3L4hHDErBWM,593
119
- sarvamai/speech_to_text_translate_streaming/types/speech_to_text_translate_streaming_stream_ongoing_speech_results.py,sha256=OECioY4SsX5UKsxbE_HIIMZYQEzRg49Z0RhPSRdTfCc,197
120
118
  sarvamai/speech_to_text_translate_streaming/types/speech_to_text_translate_streaming_vad_signals.py,sha256=EV3xd9qyKMnMvA9rO-qFDDIac4b84roBu7n-maaPxG8,181
121
119
  sarvamai/text/__init__.py,sha256=_VhToAyIt_5axN6CLJwtxg3-CO7THa_23pbUzqhXJa4,85
122
120
  sarvamai/text/client.py,sha256=2kA0Gxfi-r52zMQdqRRD811014alzlHB_FANkp3Kn_c,30595
@@ -130,7 +128,7 @@ sarvamai/text_to_speech_streaming/raw_client.py,sha256=E-4cVcgYA1MQRq2YkLoGt1OU3
130
128
  sarvamai/text_to_speech_streaming/socket_client.py,sha256=aun1nyc5ZYziXP0FmyvkCB0WKa_VdjA0XHYI2-Z_gCo,13973
131
129
  sarvamai/text_to_speech_streaming/types/__init__.py,sha256=fx2VJtnbh4RFUETiV2aPGslEQ0lq_us4UjDbQr_2J6I,242
132
130
  sarvamai/text_to_speech_streaming/types/text_to_speech_streaming_send_completion_event.py,sha256=cZWm6cAwNwPGF8ZADtnRev_AsgM_xj5ypg2oHHQfgZI,181
133
- sarvamai/types/__init__.py,sha256=DvAsZu5BYk6rSQSZ02cl1Prc0tGgo0p_9L9uJjzpwWI,8416
131
+ sarvamai/types/__init__.py,sha256=hBtXC7VLxZdrBEPs7-Ym6c3fYlJJS1VxHbfb4J8CcEM,8334
134
132
  sarvamai/types/audio_data.py,sha256=g-sQcFXiV0uFAsUJ_TjM3OSnPoCUmNkMORRJHOl152k,1545
135
133
  sarvamai/types/audio_data_input_audio_codec.py,sha256=Xb3UEr2wTniW0z8ND4iV051YHhgFazIFOd7q7UGH3nY,572
136
134
  sarvamai/types/audio_message.py,sha256=sB4EgkWkWJzipYXobkmM9AYZTTZtCpg_ySKssUeznUE,560
@@ -145,7 +143,7 @@ sarvamai/types/chat_completion_request_system_message.py,sha256=E7YhTk1zr4u7dj_y
145
143
  sarvamai/types/chat_completion_request_user_message.py,sha256=J3WhlrfOfCCe7ugmJIfP_L9st3OFtXkIjZTSuR8O9nQ,615
146
144
  sarvamai/types/chat_completion_response_message.py,sha256=wz935eBnCkSIl0I0qMxBuH4vAUCso1aHDGReMW1VHGE,744
147
145
  sarvamai/types/choice.py,sha256=uXBCsjWP9VK3XWQWZUeI4EnU10w0G9nAfKn2tJZvxko,1244
148
- sarvamai/types/completion_event_flag.py,sha256=TzDOwE5GrC2uiVpnc9vV8y7n4uT_cr_G5FR011h_zw4,92
146
+ sarvamai/types/completion_event_flag.py,sha256=HdvjxXha9Ux5KS_Lfp7Q5eaX62eIk4bXcuJgfhGFXf4,160
149
147
  sarvamai/types/completion_usage.py,sha256=xYQGlQUbKqsksuV73H-1ajjfT5M7w47eLfdWXSlrI5M,843
150
148
  sarvamai/types/config_message.py,sha256=xLD2wZcXejYrmREMd-cn38da4hKfsNPKRtyAGCW0Zcg,779
151
149
  sarvamai/types/configure_connection.py,sha256=SnSNk02gQqP8e4VB4y88jjeFQ4ClpImjGLn2ANI8cZ4,1058
@@ -181,7 +179,6 @@ sarvamai/types/language_identification_response.py,sha256=jG4ZQ6KQHCiEDqC51OniOw
181
179
  sarvamai/types/numerals_format.py,sha256=xg3lYiHcnzyFwuwRcaIteJLH_Pz6pJ9n9kTlYPEnCBU,165
182
180
  sarvamai/types/ping_signal.py,sha256=cE53FRIXlc8bSo18z6jlAnOh6DhZEMX36huWEX6X3-A,695
183
181
  sarvamai/types/reasoning_effort.py,sha256=_TBLn3rQgzJAdnKqV2g0PETbrSBZl0fPLfQ5ZE9H4Pc,164
184
- sarvamai/types/response_speech_state.py,sha256=SYwVP-TD2nhZ2kAXw0NzuVtkRgMjbLsuXRfKHPGxhK0,231
185
182
  sarvamai/types/response_type.py,sha256=yyk0QTIQlNa9W0Uoj_5_ey_Q3Bu8Jij5GkgR0Rt_WnU,163
186
183
  sarvamai/types/role.py,sha256=3eY01zZQKB8BSD4cFDeVjz-o2qnHJKz1vnToLqbExxs,115
187
184
  sarvamai/types/sarvam_model_ids.py,sha256=iYBMglf31KQ1iUZeAMQ-2PP9NDcyHRG7goz7O9VG8qg,124
@@ -194,14 +191,14 @@ sarvamai/types/speech_to_text_model.py,sha256=DSnGHPpIT-OyRv1bpy1xqEoLw90MTKyDgc
194
191
  sarvamai/types/speech_to_text_response.py,sha256=iWRGEJeHUFIOxeEhoCQu68njeA6lcqXbT2czV-O8Wx0,1438
195
192
  sarvamai/types/speech_to_text_response_data.py,sha256=gbxZTBSjbN3ZIa10K6tWPYtymcpnQTFIaUnXkOmsmD4,322
196
193
  sarvamai/types/speech_to_text_streaming_response.py,sha256=z6tVAHbVK9lC3w3lac__LEUfO8AAzEilkeGlaLskTtc,687
197
- sarvamai/types/speech_to_text_transcription_data.py,sha256=9xJljx_Uhhr0_DQZADPHbvEYmVCNwo9XHy7_QFPVqgY,1517
194
+ sarvamai/types/speech_to_text_transcription_data.py,sha256=EqwPAPSi98PwARaTj-ufzFUSHyN-NPoPla5vi_KERrU,1297
198
195
  sarvamai/types/speech_to_text_translate_job_parameters.py,sha256=fvfcyzIyT0DtcRYePDvglHH-wAhGbsi3H5G4i5nuWT8,1409
199
196
  sarvamai/types/speech_to_text_translate_language.py,sha256=yikNM-roIumVG-eqBWss93wLGudZdLPwd0i3VcXH5zo,263
200
197
  sarvamai/types/speech_to_text_translate_model.py,sha256=CVSz6gJBY82GhhEuWSdzRLJW9XTsAgweRnKd1tN6mXo,139
201
198
  sarvamai/types/speech_to_text_translate_response.py,sha256=Z5Na7IQW2ok3TP21xd-jKkwioplEKfonNIMhoJQKkVw,1278
202
199
  sarvamai/types/speech_to_text_translate_response_data.py,sha256=_NlLVp7oQU3em_4E47QVbIP9nromPE07Z9HtMpY1lrU,359
203
200
  sarvamai/types/speech_to_text_translate_streaming_response.py,sha256=J6h3AGdAJxpODFs30bR-e6OaWKa__oVhwv_TrbPSO98,724
204
- sarvamai/types/speech_to_text_translate_transcription_data.py,sha256=SPdYTeD1H4WQiExfs-jETyTwXblrqlsthTjUHqZEWxw,1216
201
+ sarvamai/types/speech_to_text_translate_transcription_data.py,sha256=-cZZm21um6erOzx18nAgBYKA3Qc3YzNADUCnfx9mD-k,996
205
202
  sarvamai/types/spoken_form_numerals_format.py,sha256=soBly93wMkazIcp2GDM0Mf1MjY140Pe24hBlwNoWge0,169
206
203
  sarvamai/types/stop_configuration.py,sha256=yA_q4s4BIrbl3FotZpg4ZcyL10C7gVI0s2dqvH32BNw,136
207
204
  sarvamai/types/storage_container_type.py,sha256=DZXDiDj74lMmUq6jaZfIMW1zMXgoVdY6rs_FcyB9OGk,184
@@ -227,6 +224,6 @@ sarvamai/types/transliterate_mode.py,sha256=1jSEMlGcoLkWuk12TgoOpSgwifa4rThGKZ1h
227
224
  sarvamai/types/transliterate_source_language.py,sha256=bSY9wJszF0sg-Cgg6F-YcWC8ly1mIlj9rqa15-jBtx8,283
228
225
  sarvamai/types/transliteration_response.py,sha256=yt-lzTbDeJ_ZL4I8kQa6oESxA9ebeJJY7LfFHpdEsmM,815
229
226
  sarvamai/version.py,sha256=Qkp3Ee9YH-O9RTix90e0i7iNrFAGN-QDt2AFwGA4n8k,75
230
- sarvamai-0.1.22a3.dist-info/METADATA,sha256=Y_DbEm9yqps7R3sCQuZrxb6E73ww6uGni-biEMYFurQ,26753
231
- sarvamai-0.1.22a3.dist-info/WHEEL,sha256=Zb28QaM1gQi8f4VCBhsUklF61CTlNYfs9YAZn-TOGFk,88
232
- sarvamai-0.1.22a3.dist-info/RECORD,,
227
+ sarvamai-0.1.22a4.dist-info/METADATA,sha256=UrGlg-Q0Vo4UkotwZRRtKtvLu8vwJNYxGAbkxBS87_I,26753
228
+ sarvamai-0.1.22a4.dist-info/WHEEL,sha256=Zb28QaM1gQi8f4VCBhsUklF61CTlNYfs9YAZn-TOGFk,88
229
+ sarvamai-0.1.22a4.dist-info/RECORD,,
@@ -1,5 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import typing
4
-
5
- SpeechToTextStreamingStreamOngoingSpeechResults = typing.Union[typing.Literal["true", "false"], typing.Any]
@@ -1,5 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import typing
4
-
5
- SpeechToTextTranslateStreamingStreamOngoingSpeechResults = typing.Union[typing.Literal["true", "false"], typing.Any]
@@ -1,7 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import typing
4
-
5
- ResponseSpeechState = typing.Union[
6
- typing.Literal["START_SPEECH_TENTATIVE", "VAD_MISFIRE", "START_SPEECH", "END_SPEECH", "ONGOING"], typing.Any
7
- ]