sarvamai 0.1.23a4__py3-none-any.whl → 0.1.23a6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. sarvamai/__init__.py +4 -0
  2. sarvamai/core/client_wrapper.py +2 -2
  3. sarvamai/requests/speech_to_text_job_parameters.py +37 -5
  4. sarvamai/requests/speech_to_text_response.py +6 -14
  5. sarvamai/requests/speech_to_text_transcription_data.py +14 -0
  6. sarvamai/requests/speech_to_text_translate_job_parameters.py +4 -1
  7. sarvamai/requests/speech_to_text_translate_response.py +6 -9
  8. sarvamai/requests/speech_to_text_translate_transcription_data.py +13 -0
  9. sarvamai/speech_to_text/client.py +84 -26
  10. sarvamai/speech_to_text/raw_client.py +84 -26
  11. sarvamai/speech_to_text_streaming/__init__.py +2 -0
  12. sarvamai/speech_to_text_streaming/client.py +117 -18
  13. sarvamai/speech_to_text_streaming/raw_client.py +117 -18
  14. sarvamai/speech_to_text_streaming/types/__init__.py +2 -0
  15. sarvamai/speech_to_text_streaming/types/speech_to_text_streaming_input_audio_codec.py +7 -0
  16. sarvamai/speech_to_text_streaming/types/speech_to_text_streaming_language_code.py +25 -1
  17. sarvamai/speech_to_text_streaming/types/speech_to_text_streaming_mode.py +1 -1
  18. sarvamai/speech_to_text_translate_streaming/__init__.py +2 -0
  19. sarvamai/speech_to_text_translate_streaming/client.py +23 -2
  20. sarvamai/speech_to_text_translate_streaming/raw_client.py +23 -2
  21. sarvamai/speech_to_text_translate_streaming/types/__init__.py +2 -0
  22. sarvamai/speech_to_text_translate_streaming/types/speech_to_text_translate_streaming_input_audio_codec.py +7 -0
  23. sarvamai/types/mode.py +1 -3
  24. sarvamai/types/speech_to_text_job_parameters.py +37 -5
  25. sarvamai/types/speech_to_text_language.py +24 -1
  26. sarvamai/types/speech_to_text_model.py +1 -3
  27. sarvamai/types/speech_to_text_response.py +6 -14
  28. sarvamai/types/speech_to_text_transcription_data.py +14 -0
  29. sarvamai/types/speech_to_text_translate_job_parameters.py +4 -1
  30. sarvamai/types/speech_to_text_translate_language.py +25 -1
  31. sarvamai/types/speech_to_text_translate_model.py +1 -1
  32. sarvamai/types/speech_to_text_translate_response.py +6 -9
  33. sarvamai/types/speech_to_text_translate_transcription_data.py +13 -0
  34. {sarvamai-0.1.23a4.dist-info → sarvamai-0.1.23a6.dist-info}/METADATA +1 -1
  35. {sarvamai-0.1.23a4.dist-info → sarvamai-0.1.23a6.dist-info}/RECORD +36 -34
  36. {sarvamai-0.1.23a4.dist-info → sarvamai-0.1.23a6.dist-info}/WHEEL +0 -0
@@ -14,6 +14,7 @@ from .types.speech_to_text_translate_streaming_flush_signal import SpeechToTextT
14
14
  from .types.speech_to_text_translate_streaming_high_vad_sensitivity import (
15
15
  SpeechToTextTranslateStreamingHighVadSensitivity,
16
16
  )
17
+ from .types.speech_to_text_translate_streaming_input_audio_codec import SpeechToTextTranslateStreamingInputAudioCodec
17
18
  from .types.speech_to_text_translate_streaming_vad_signals import SpeechToTextTranslateStreamingVadSignals
18
19
 
19
20
  try:
@@ -35,6 +36,7 @@ class RawSpeechToTextTranslateStreamingClient:
35
36
  high_vad_sensitivity: typing.Optional[SpeechToTextTranslateStreamingHighVadSensitivity] = None,
36
37
  vad_signals: typing.Optional[SpeechToTextTranslateStreamingVadSignals] = None,
37
38
  flush_signal: typing.Optional[SpeechToTextTranslateStreamingFlushSignal] = None,
39
+ input_audio_codec: typing.Optional[SpeechToTextTranslateStreamingInputAudioCodec] = None,
38
40
  api_subscription_key: typing.Optional[str] = None,
39
41
  request_options: typing.Optional[RequestOptions] = None,
40
42
  ) -> typing.Iterator[SpeechToTextTranslateStreamingSocketClient]:
@@ -48,7 +50,10 @@ class RawSpeechToTextTranslateStreamingClient:
48
50
  Parameters
49
51
  ----------
50
52
  model : typing.Optional[typing.Literal["saaras:v2.5"]]
51
- Speech to text model to use (defaults to "saaras:v2.5" if not specified)
53
+ Model to be used for speech to text translation.
54
+
55
+ - **saaras:v2.5** (default): Translation model that translates audio from any spoken Indic language to English.
56
+ - Example: Hindi audio → English text output
52
57
 
53
58
  sample_rate : typing.Optional[str]
54
59
  Audio sample rate for the WebSocket connection. When specified as a connection parameter, only 16kHz and 8kHz are supported. 8kHz is only available via this connection parameter. If not specified, defaults to 16kHz.
@@ -62,6 +67,10 @@ class RawSpeechToTextTranslateStreamingClient:
62
67
  flush_signal : typing.Optional[SpeechToTextTranslateStreamingFlushSignal]
63
68
  Signal to flush the audio buffer and finalize transcription and translation
64
69
 
70
+ input_audio_codec : typing.Optional[SpeechToTextTranslateStreamingInputAudioCodec]
71
+ Audio codec/format of the input stream. Use this when sending raw PCM audio.
72
+ Supported values: wav, pcm_s16le, pcm_l16, pcm_raw.
73
+
65
74
  api_subscription_key : typing.Optional[str]
66
75
  API subscription key for authentication
67
76
 
@@ -84,6 +93,8 @@ class RawSpeechToTextTranslateStreamingClient:
84
93
  query_params = query_params.add("vad_signals", vad_signals)
85
94
  if flush_signal is not None:
86
95
  query_params = query_params.add("flush_signal", flush_signal)
96
+ if input_audio_codec is not None:
97
+ query_params = query_params.add("input_audio_codec", input_audio_codec)
87
98
  ws_url = ws_url + f"?{query_params}"
88
99
  headers = self._client_wrapper.get_headers()
89
100
  if api_subscription_key is not None:
@@ -121,6 +132,7 @@ class AsyncRawSpeechToTextTranslateStreamingClient:
121
132
  high_vad_sensitivity: typing.Optional[SpeechToTextTranslateStreamingHighVadSensitivity] = None,
122
133
  vad_signals: typing.Optional[SpeechToTextTranslateStreamingVadSignals] = None,
123
134
  flush_signal: typing.Optional[SpeechToTextTranslateStreamingFlushSignal] = None,
135
+ input_audio_codec: typing.Optional[SpeechToTextTranslateStreamingInputAudioCodec] = None,
124
136
  api_subscription_key: typing.Optional[str] = None,
125
137
  request_options: typing.Optional[RequestOptions] = None,
126
138
  ) -> typing.AsyncIterator[AsyncSpeechToTextTranslateStreamingSocketClient]:
@@ -134,7 +146,10 @@ class AsyncRawSpeechToTextTranslateStreamingClient:
134
146
  Parameters
135
147
  ----------
136
148
  model : typing.Optional[typing.Literal["saaras:v2.5"]]
137
- Speech to text model to use (defaults to "saaras:v2.5" if not specified)
149
+ Model to be used for speech to text translation.
150
+
151
+ - **saaras:v2.5** (default): Translation model that translates audio from any spoken Indic language to English.
152
+ - Example: Hindi audio → English text output
138
153
 
139
154
  sample_rate : typing.Optional[str]
140
155
  Audio sample rate for the WebSocket connection. When specified as a connection parameter, only 16kHz and 8kHz are supported. 8kHz is only available via this connection parameter. If not specified, defaults to 16kHz.
@@ -148,6 +163,10 @@ class AsyncRawSpeechToTextTranslateStreamingClient:
148
163
  flush_signal : typing.Optional[SpeechToTextTranslateStreamingFlushSignal]
149
164
  Signal to flush the audio buffer and finalize transcription and translation
150
165
 
166
+ input_audio_codec : typing.Optional[SpeechToTextTranslateStreamingInputAudioCodec]
167
+ Audio codec/format of the input stream. Use this when sending raw PCM audio.
168
+ Supported values: wav, pcm_s16le, pcm_l16, pcm_raw.
169
+
151
170
  api_subscription_key : typing.Optional[str]
152
171
  API subscription key for authentication
153
172
 
@@ -170,6 +189,8 @@ class AsyncRawSpeechToTextTranslateStreamingClient:
170
189
  query_params = query_params.add("vad_signals", vad_signals)
171
190
  if flush_signal is not None:
172
191
  query_params = query_params.add("flush_signal", flush_signal)
192
+ if input_audio_codec is not None:
193
+ query_params = query_params.add("input_audio_codec", input_audio_codec)
173
194
  ws_url = ws_url + f"?{query_params}"
174
195
  headers = self._client_wrapper.get_headers()
175
196
  if api_subscription_key is not None:
@@ -4,10 +4,12 @@
4
4
 
5
5
  from .speech_to_text_translate_streaming_flush_signal import SpeechToTextTranslateStreamingFlushSignal
6
6
  from .speech_to_text_translate_streaming_high_vad_sensitivity import SpeechToTextTranslateStreamingHighVadSensitivity
7
+ from .speech_to_text_translate_streaming_input_audio_codec import SpeechToTextTranslateStreamingInputAudioCodec
7
8
  from .speech_to_text_translate_streaming_vad_signals import SpeechToTextTranslateStreamingVadSignals
8
9
 
9
10
  __all__ = [
10
11
  "SpeechToTextTranslateStreamingFlushSignal",
11
12
  "SpeechToTextTranslateStreamingHighVadSensitivity",
13
+ "SpeechToTextTranslateStreamingInputAudioCodec",
12
14
  "SpeechToTextTranslateStreamingVadSignals",
13
15
  ]
@@ -0,0 +1,7 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ SpeechToTextTranslateStreamingInputAudioCodec = typing.Union[
6
+ typing.Literal["wav", "pcm_s16le", "pcm_l16", "pcm_raw"], typing.Any
7
+ ]
sarvamai/types/mode.py CHANGED
@@ -2,6 +2,4 @@
2
2
 
3
3
  import typing
4
4
 
5
- Mode = typing.Union[
6
- typing.Literal["transcribe", "translate", "indic-en", "verbatim", "translit", "codemix"], typing.Any
7
- ]
5
+ Mode = typing.Union[typing.Literal["transcribe", "translate", "verbatim", "translit", "codemix"], typing.Any]
@@ -12,20 +12,52 @@ from .speech_to_text_translate_language import SpeechToTextTranslateLanguage
12
12
  class SpeechToTextJobParameters(UniversalBaseModel):
13
13
  language_code: typing.Optional[SpeechToTextTranslateLanguage] = pydantic.Field(default=None)
14
14
  """
15
- Language code
15
+ Specifies the language of the input audio in BCP-47 format.
16
+
17
+ **Available Options:**
18
+ - `unknown` (default): Use when the language is not known; the API will auto-detect.
19
+ - `hi-IN`: Hindi
20
+ - `bn-IN`: Bengali
21
+ - `kn-IN`: Kannada
22
+ - `ml-IN`: Malayalam
23
+ - `mr-IN`: Marathi
24
+ - `od-IN`: Odia
25
+ - `pa-IN`: Punjabi
26
+ - `ta-IN`: Tamil
27
+ - `te-IN`: Telugu
28
+ - `en-IN`: English
29
+ - `gu-IN`: Gujarati
16
30
  """
17
31
 
18
32
  model: typing.Optional[SpeechToTextModel] = pydantic.Field(default=None)
19
33
  """
20
34
  Model to be used for speech to text.
21
- - **saarika:v2.5** (default)
22
- - **saarika:v3**: Advanced transcription model
23
- - **saaras:v3**: Advanced model with multiple modes
35
+
36
+ - **saarika:v2.5** (default): Transcribes audio in the spoken language.
37
+
38
+ - **saaras:v3**: State-of-the-art model with flexible output formats. Supports multiple modes via the `mode` parameter: transcribe, translate, verbatim, translit, codemix.
24
39
  """
25
40
 
26
41
  mode: typing.Optional[Mode] = pydantic.Field(default=None)
27
42
  """
28
- Mode of operation. Only applicable for saaras:v3 model.
43
+ Mode of operation. **Only applicable when using saaras:v3 model.**
44
+
45
+ Example audio: 'मेरा फोन नंबर है 9840950950'
46
+
47
+ - **transcribe** (default): Standard transcription in the original language with proper formatting and number normalization.
48
+ - Output: `मेरा फोन नंबर है 9840950950`
49
+
50
+ - **translate**: Translates speech from any supported Indic language to English.
51
+ - Output: `My phone number is 9840950950`
52
+
53
+ - **verbatim**: Exact word-for-word transcription without normalization, preserving filler words and spoken numbers as-is.
54
+ - Output: `मेरा फोन नंबर है नौ आठ चार zero नौ पांच zero नौ पांच zero`
55
+
56
+ - **translit**: Romanization - Transliterates speech to Latin/Roman script only.
57
+ - Output: `mera phone number hai 9840950950`
58
+
59
+ - **codemix**: Code-mixed text with English words in English and Indic words in native script.
60
+ - Output: `मेरा phone number है 9840950950`
29
61
  """
30
62
 
31
63
  with_timestamps: typing.Optional[bool] = pydantic.Field(default=None)
@@ -4,7 +4,30 @@ import typing
4
4
 
5
5
  SpeechToTextLanguage = typing.Union[
6
6
  typing.Literal[
7
- "unknown", "hi-IN", "bn-IN", "kn-IN", "ml-IN", "mr-IN", "od-IN", "pa-IN", "ta-IN", "te-IN", "en-IN", "gu-IN"
7
+ "unknown",
8
+ "hi-IN",
9
+ "bn-IN",
10
+ "kn-IN",
11
+ "ml-IN",
12
+ "mr-IN",
13
+ "od-IN",
14
+ "pa-IN",
15
+ "ta-IN",
16
+ "te-IN",
17
+ "en-IN",
18
+ "gu-IN",
19
+ "as-IN",
20
+ "ur-IN",
21
+ "ne-IN",
22
+ "kok-IN",
23
+ "ks-IN",
24
+ "sd-IN",
25
+ "sa-IN",
26
+ "sat-IN",
27
+ "mni-IN",
28
+ "brx-IN",
29
+ "mai-IN",
30
+ "doi-IN",
8
31
  ],
9
32
  typing.Any,
10
33
  ]
@@ -2,6 +2,4 @@
2
2
 
3
3
  import typing
4
4
 
5
- SpeechToTextModel = typing.Union[
6
- typing.Literal["saarika:v2.5", "saarika:v1", "saarika:v2", "saarika:flash", "saaras:v3"], typing.Any
7
- ]
5
+ SpeechToTextModel = typing.Union[typing.Literal["saarika:v2.5", "saaras:v3"], typing.Any]
@@ -4,30 +4,22 @@ import typing
4
4
 
5
5
  import pydantic
6
6
  from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
- from .diarized_transcript import DiarizedTranscript
8
- from .timestamps_model import TimestampsModel
9
7
 
10
8
 
11
9
  class SpeechToTextResponse(UniversalBaseModel):
12
- request_id: typing.Optional[str] = None
13
- transcript: str = pydantic.Field()
10
+ request_id: str = pydantic.Field()
14
11
  """
15
- The transcribed text from the provided audio file.
12
+ Unique identifier for the request
16
13
  """
17
14
 
18
- timestamps: typing.Optional[TimestampsModel] = pydantic.Field(default=None)
19
- """
20
- Contains timestamps for the transcribed text. This field is included only if with_timestamps is set to true
21
- """
22
-
23
- diarized_transcript: typing.Optional[DiarizedTranscript] = pydantic.Field(default=None)
15
+ transcript: str = pydantic.Field()
24
16
  """
25
- Diarized transcript of the provided speech
17
+ The transcribed text from the provided audio file.
26
18
  """
27
19
 
28
- language_code: typing.Optional[str] = pydantic.Field(default=None)
20
+ language_code: str = pydantic.Field()
29
21
  """
30
- This will return the BCP-47 code of language spoken in the input. If multiple languages are detected, this will return language code of most predominant spoken language. If no language is detected, this will be null
22
+ The BCP-47 code of language spoken in the input (e.g., hi-IN, en-IN). If multiple languages are detected, returns the most predominant spoken language.
31
23
  """
32
24
 
33
25
  if IS_PYDANTIC_V2:
@@ -33,6 +33,20 @@ class SpeechToTextTranscriptionData(UniversalBaseModel):
33
33
  BCP-47 code of detected language
34
34
  """
35
35
 
36
+ language_probability: typing.Optional[float] = pydantic.Field(default=None)
37
+ """
38
+ Float value (0.0 to 1.0) indicating the probability of the detected language being correct. Higher values indicate higher confidence.
39
+
40
+ **When it returns a value:**
41
+ - When `language_code` is not provided in the request
42
+ - When `language_code` is set to `unknown`
43
+
44
+ **When it returns null:**
45
+ - When a specific `language_code` is provided (language detection is skipped)
46
+
47
+ The parameter is always present in the response.
48
+ """
49
+
36
50
  metrics: TranscriptionMetrics
37
51
 
38
52
  if IS_PYDANTIC_V2:
@@ -15,7 +15,10 @@ class SpeechToTextTranslateJobParameters(UniversalBaseModel):
15
15
 
16
16
  model: typing.Optional[SpeechToTextTranslateModel] = pydantic.Field(default=None)
17
17
  """
18
- Model to be used for converting speech to text in target language
18
+ Model to be used for speech to text translation.
19
+
20
+ - **saaras:v2.5** (default): Translation model that translates audio from any spoken Indic language to English.
21
+ - Example: Hindi audio → English text output
19
22
  """
20
23
 
21
24
  with_diarization: typing.Optional[bool] = pydantic.Field(default=None)
@@ -3,6 +3,30 @@
3
3
  import typing
4
4
 
5
5
  SpeechToTextTranslateLanguage = typing.Union[
6
- typing.Literal["hi-IN", "bn-IN", "kn-IN", "ml-IN", "mr-IN", "od-IN", "pa-IN", "ta-IN", "te-IN", "gu-IN", "en-IN"],
6
+ typing.Literal[
7
+ "hi-IN",
8
+ "bn-IN",
9
+ "kn-IN",
10
+ "ml-IN",
11
+ "mr-IN",
12
+ "od-IN",
13
+ "pa-IN",
14
+ "ta-IN",
15
+ "te-IN",
16
+ "gu-IN",
17
+ "en-IN",
18
+ "as-IN",
19
+ "ur-IN",
20
+ "ne-IN",
21
+ "kok-IN",
22
+ "ks-IN",
23
+ "sd-IN",
24
+ "sa-IN",
25
+ "sat-IN",
26
+ "mni-IN",
27
+ "brx-IN",
28
+ "mai-IN",
29
+ "doi-IN",
30
+ ],
7
31
  typing.Any,
8
32
  ]
@@ -2,4 +2,4 @@
2
2
 
3
3
  import typing
4
4
 
5
- SpeechToTextTranslateModel = typing.Union[typing.Literal["saaras:v2.5", "saaras:v3"], typing.Any]
5
+ SpeechToTextTranslateModel = typing.Literal["saaras:v2.5"]
@@ -4,25 +4,22 @@ import typing
4
4
 
5
5
  import pydantic
6
6
  from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
- from .diarized_transcript import DiarizedTranscript
8
- from .speech_to_text_translate_language import SpeechToTextTranslateLanguage
9
7
 
10
8
 
11
9
  class SpeechToTextTranslateResponse(UniversalBaseModel):
12
- request_id: typing.Optional[str] = None
13
- transcript: str = pydantic.Field()
10
+ request_id: str = pydantic.Field()
14
11
  """
15
- Transcript of the provided speech
12
+ Unique identifier for the request
16
13
  """
17
14
 
18
- language_code: typing.Optional[SpeechToTextTranslateLanguage] = pydantic.Field(default=None)
15
+ transcript: str = pydantic.Field()
19
16
  """
20
- This will return the BCP-47 code of language spoken in the input. If multiple languages are detected, this will return language code of most predominant spoken language. If no language is detected, this will be null
17
+ English translation of the provided speech
21
18
  """
22
19
 
23
- diarized_transcript: typing.Optional[DiarizedTranscript] = pydantic.Field(default=None)
20
+ language_code: str = pydantic.Field()
24
21
  """
25
- Diarized transcript of the provided speech
22
+ The BCP-47 code of the detected source language spoken in the input (e.g., hi-IN, kn-IN).
26
23
  """
27
24
 
28
25
  if IS_PYDANTIC_V2:
@@ -23,6 +23,19 @@ class SpeechToTextTranslateTranscriptionData(UniversalBaseModel):
23
23
  BCP-47 code of detected source language (null when language detection is in progress)
24
24
  """
25
25
 
26
+ language_probability: typing.Optional[float] = pydantic.Field(default=None)
27
+ """
28
+ Float value (0.0 to 1.0) indicating the probability of the detected source language being correct. Higher values indicate higher confidence.
29
+
30
+ **When it returns a value:**
31
+ - Always returns a value as source language is auto-detected for translation
32
+
33
+ **When it returns null:**
34
+ - When language detection confidence is unavailable
35
+
36
+ The parameter is always present in the response.
37
+ """
38
+
26
39
  metrics: TranscriptionMetrics
27
40
 
28
41
  if IS_PYDANTIC_V2:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: sarvamai
3
- Version: 0.1.23a4
3
+ Version: 0.1.23a6
4
4
  Summary:
5
5
  Requires-Python: >=3.8,<4.0
6
6
  Classifier: Intended Audience :: Developers
@@ -1,11 +1,11 @@
1
- sarvamai/__init__.py,sha256=Lm2OWqz5V4YobtV9iX7XnEF7EsXEKg6scXqzdBECti4,11531
1
+ sarvamai/__init__.py,sha256=YgHvuv6dxlTz4zYxWfRcgZKp1imFf0gmwvCQRclWOj0,11721
2
2
  sarvamai/chat/__init__.py,sha256=_VhToAyIt_5axN6CLJwtxg3-CO7THa_23pbUzqhXJa4,85
3
3
  sarvamai/chat/client.py,sha256=xOSj83Gr6Q7eY2qUeATiuXYQqBqWqSCQlIEopK5fKus,11022
4
4
  sarvamai/chat/raw_client.py,sha256=A2kRuZcVWlJhyYCD7YKgqNkZEp3cYa1731KhRkhirU0,17885
5
5
  sarvamai/client.py,sha256=J30X_os1lPf8Wml0KDFEf6p8VGHhgF_lf3nw1T2D3qo,8207
6
6
  sarvamai/core/__init__.py,sha256=YE2CtXeASe1RAbaI39twKWYKCuT4tW5is9HWHhJjR_g,1653
7
7
  sarvamai/core/api_error.py,sha256=44vPoTyWN59gonCIZMdzw7M1uspygiLnr3GNFOoVL2Q,614
8
- sarvamai/core/client_wrapper.py,sha256=bEnbr4OmkyHdmJbqD-Hw3UlyjXlCyoV_Uc6OzqGk6u4,2570
8
+ sarvamai/core/client_wrapper.py,sha256=Ijmv6KUlK_OtCy7p9OYCUSgVRv62-xOAyyAvR5uw3AY,2570
9
9
  sarvamai/core/datetime_utils.py,sha256=nBys2IsYrhPdszxGKCNRPSOCwa-5DWOHG95FB8G9PKo,1047
10
10
  sarvamai/core/events.py,sha256=HvKBdSoYcFetk7cgNXb7FxuY-FtY8NtUhZIN7mGVx8U,1159
11
11
  sarvamai/core/file.py,sha256=d4NNbX8XvXP32z8KpK2Xovv33nFfruIrpz0QWxlgpZk,2663
@@ -67,16 +67,16 @@ sarvamai/requests/language_identification_response.py,sha256=BdS5U9Gic-71vb--ph6
67
67
  sarvamai/requests/ping_signal.py,sha256=TSgmfz2k4X1L6TzvX8u2SKZ6XQY3bSf7nPZf8mUViaM,343
68
68
  sarvamai/requests/send_text.py,sha256=DWzbNgeNN2xSIYgk2zEisgLqjwq5oleqJVHrtOnIqbE,267
69
69
  sarvamai/requests/send_text_data.py,sha256=2jds-xd77u-YTgIWQsTUBKE-_7tsrFshXXuC_Ld4ULo,161
70
- sarvamai/requests/speech_to_text_job_parameters.py,sha256=QxiiqhjL58mw8nw6SjxuM-221OrlIPF8wWhom5rS_T8,1303
71
- sarvamai/requests/speech_to_text_response.py,sha256=GS3jNmHDOxqNZ7cvftD62khUMSBIQUu6zEPdCqk8zJk,1041
70
+ sarvamai/requests/speech_to_text_job_parameters.py,sha256=C1dpp5IkylFXdSNfKDhSo8jbFUquFh3SURFVME9XXq4,2854
71
+ sarvamai/requests/speech_to_text_response.py,sha256=5Le4BCjYQcf87seNWdrFhHi5YHRJ-ljMLv3nTwsNnWc,517
72
72
  sarvamai/requests/speech_to_text_response_data.py,sha256=69fYRdL0tCKpgKQqwzcM4T4Nf_lRxJFh-VCFe_tN964,364
73
73
  sarvamai/requests/speech_to_text_streaming_response.py,sha256=cN5tKE9wOWuyBna4wmrf-0LfkOULMpRaJ7qjLuu76V0,348
74
- sarvamai/requests/speech_to_text_transcription_data.py,sha256=Vc65hXDq65d14cP-fDJm151bi7XEKgPItNGt1UL6cOY,877
75
- sarvamai/requests/speech_to_text_translate_job_parameters.py,sha256=Cco38i8IhX00S2eW3MhLekqUFMS7hZW2AwbpWyCAgpU,990
76
- sarvamai/requests/speech_to_text_translate_response.py,sha256=xLV2F37PkGR0erRDfTBEPWvywR8eVSL9JbH5a0C9wkY,893
74
+ sarvamai/requests/speech_to_text_transcription_data.py,sha256=6YjW2ySX-yIql9MGM02wMue4lNOX1rwif8eSg7jQWo0,1413
75
+ sarvamai/requests/speech_to_text_translate_job_parameters.py,sha256=Q1mL3ul3WYArryp-HP_wHc8WCTLq6ZFhCUFeH3NM80E,1147
76
+ sarvamai/requests/speech_to_text_translate_response.py,sha256=pVxKjb9DAtlXM1jXA94TmCAzOrmzIuMjcM-mUwTnDRA,456
77
77
  sarvamai/requests/speech_to_text_translate_response_data.py,sha256=OmjunP9R2xertJKn4fmpyzjDdj1_B_Yh6ZjH1eOlR-Q,407
78
78
  sarvamai/requests/speech_to_text_translate_streaming_response.py,sha256=KTjYZ0_oLapuM5Iiq7UwejMsrL1TGgFAW4k5l17TkZs,385
79
- sarvamai/requests/speech_to_text_translate_transcription_data.py,sha256=oAmW5ihTd301IJYN2u2KrZxB0j3EMacFBfvIhtOSjFI,595
79
+ sarvamai/requests/speech_to_text_translate_transcription_data.py,sha256=AEECwgmzR_uInedR38jiksZN8wx_l3sQKQA2dLCi13c,1088
80
80
  sarvamai/requests/stop_configuration.py,sha256=Xmp8zyUpnN65pH5A7NqefckB8wk53_BBzOUrgRm2gXs,146
81
81
  sarvamai/requests/stt_flush_signal.py,sha256=Gb-SoPPAyVKFVPZKxebLgV4bAv21NjVgvfCl5cqcxrY,360
82
82
  sarvamai/requests/task_detail_v_1.py,sha256=2rehl7dSDSgzaw13b9bODamhiN2uB-IK4cOksq8Vmqc,582
@@ -87,34 +87,36 @@ sarvamai/requests/transcription_metrics.py,sha256=FDclX2Z9Z3azrDXxtZW8xbkxxWMZQX
87
87
  sarvamai/requests/translation_response.py,sha256=8iwQeZB1purHY757bIQI-n9QeVRBItaAVcBJ_la-k1Y,414
88
88
  sarvamai/requests/transliteration_response.py,sha256=KqRkqnegLmt7LjdVxjRePX6RoqaLm64KFGZ6q7mXyfw,426
89
89
  sarvamai/speech_to_text/__init__.py,sha256=_VhToAyIt_5axN6CLJwtxg3-CO7THa_23pbUzqhXJa4,85
90
- sarvamai/speech_to_text/client.py,sha256=3RxDjOGAWeb_sfSg6hbeQw5qIU98zDsZs1GHrqpT8jA,13741
91
- sarvamai/speech_to_text/raw_client.py,sha256=rhs_5Td7aKLapSTut4DFL6fhtAsYcOalrJt7KGNnioQ,28039
90
+ sarvamai/speech_to_text/client.py,sha256=K9lb57rQRfYwwAonj7BSP9aKI7io_fXhjn79M-Brwag,16483
91
+ sarvamai/speech_to_text/raw_client.py,sha256=Rw0x9ipXFWs6xF4aly4DgIN9tqdPZyLz5Powg2ZnQkA,30781
92
92
  sarvamai/speech_to_text_job/__init__.py,sha256=_VhToAyIt_5axN6CLJwtxg3-CO7THa_23pbUzqhXJa4,85
93
93
  sarvamai/speech_to_text_job/client.py,sha256=nMhBJa1rf1qQdQhlB1WUtyzOGhsWxECCPAJwBvWnq8M,18930
94
94
  sarvamai/speech_to_text_job/job.py,sha256=9AfVSp5nzrl-Cx_1n2AJZqTMzp6Dkz2cvmbdq78fCgM,18751
95
95
  sarvamai/speech_to_text_job/raw_client.py,sha256=6MB82mSqAOi92mE8vUeNSTB0wuxLZYRwizt15R6r-wo,49394
96
- sarvamai/speech_to_text_streaming/__init__.py,sha256=5l81Q5goyVA8oC5cKaS9-Hv4_PR2nYC318VcmaUTpg8,579
97
- sarvamai/speech_to_text_streaming/client.py,sha256=F8j1DALzxClbE1FRPqF5JQwRn6StOuHCuymGItXI1JI,12063
98
- sarvamai/speech_to_text_streaming/raw_client.py,sha256=5sWz5b0JpnvsDaszdg-foGOvKa_x_OqKE7O3yo1ESfI,11244
96
+ sarvamai/speech_to_text_streaming/__init__.py,sha256=D_WTGMhL_12vOb4IazZpC3o91HKFgPRNjs2r0EEyPBk,665
97
+ sarvamai/speech_to_text_streaming/client.py,sha256=-6fmOpPZXu6iRHSrWCU6mrWUhE24_AIzyBj9yljpxfY,16686
98
+ sarvamai/speech_to_text_streaming/raw_client.py,sha256=jJIiKxj4QmU3HAu-ZVk7vZnPWO3kOXXUXVWT37o6hIg,15867
99
99
  sarvamai/speech_to_text_streaming/socket_client.py,sha256=P6qXRN0s3UFAp6CP5lkqrW2KPK9me70ZVfWquxLB4wI,7538
100
- sarvamai/speech_to_text_streaming/types/__init__.py,sha256=M6FNnzx7E98J7AzP2oU_94qVsWoxaYZG9_Syc5p5qQg,825
100
+ sarvamai/speech_to_text_streaming/types/__init__.py,sha256=u6sdJI-GFD-CAKX7xFOeS64qwbDW5xuICovpYwBzwLY,962
101
101
  sarvamai/speech_to_text_streaming/types/speech_to_text_streaming_flush_signal.py,sha256=dDJOBlzAjhuiSVqW2RHHY1f6xy0DU_Yoo9UV8-7MjnA,173
102
102
  sarvamai/speech_to_text_streaming/types/speech_to_text_streaming_high_vad_sensitivity.py,sha256=OwPwffa8TkLPGMnOTn5S7d-HmV8QmN3B7fHz8I1-VT8,180
103
- sarvamai/speech_to_text_streaming/types/speech_to_text_streaming_language_code.py,sha256=LxgEifmgWTCFZn9U-f-TWKxRPng3a2J26Zt526QrA0Y,267
104
- sarvamai/speech_to_text_streaming/types/speech_to_text_streaming_mode.py,sha256=ughojb-xTiqiV-PnzAWyl6t9zSfIjFTdPa_b6XwLsHQ,229
103
+ sarvamai/speech_to_text_streaming/types/speech_to_text_streaming_input_audio_codec.py,sha256=dxnhjo9zo9WFk8CVUklubPhMaTxlaRvgFtuMmeQcAqo,208
104
+ sarvamai/speech_to_text_streaming/types/speech_to_text_streaming_language_code.py,sha256=7JdW_xi7IljKpdpOfwgNqQnfpng8VwASVPJ4QuOMh24,572
105
+ sarvamai/speech_to_text_streaming/types/speech_to_text_streaming_mode.py,sha256=jce75h2M9dEMD_eC29zCLQZFbLfCy8sdxEIISxtfubQ,217
105
106
  sarvamai/speech_to_text_streaming/types/speech_to_text_streaming_model.py,sha256=CpWC1HmcPJKWCSHhTYSaC8_pMsfNTBA-EHq-sfCjS-A,179
106
107
  sarvamai/speech_to_text_streaming/types/speech_to_text_streaming_vad_signals.py,sha256=8wiFOB7WDMbYCcMTYgNFJaIjEytYeXpJLwr_O_mH0TI,172
107
108
  sarvamai/speech_to_text_translate_job/__init__.py,sha256=_VhToAyIt_5axN6CLJwtxg3-CO7THa_23pbUzqhXJa4,85
108
109
  sarvamai/speech_to_text_translate_job/client.py,sha256=xu8kYtCESDB7LzL8YKBUq5qhTPMIl3_H3XD2L_7y4UU,18969
109
110
  sarvamai/speech_to_text_translate_job/job.py,sha256=tL1Zemsogb_AK9wqZwN4ooPaN176sFKduTH9g87y-WU,18938
110
111
  sarvamai/speech_to_text_translate_job/raw_client.py,sha256=Emx14cRiAZXg1PqZkoJbDOKwyDmOgwxWlqPkAPZ9GPU,50797
111
- sarvamai/speech_to_text_translate_streaming/__init__.py,sha256=HYq3MzUyWa4Kt1ou6vgAkbMCHoIcXzoqn6V1kya5c6g,423
112
- sarvamai/speech_to_text_translate_streaming/client.py,sha256=bQ2T3frXMQe8-L42zEQf-zPcHpnL7mVWPyMxcMp59cw,10433
113
- sarvamai/speech_to_text_translate_streaming/raw_client.py,sha256=j-SxOkqchXIbOIBKvaEPAzOCYw4aXrLcS073OMhNS0c,9542
112
+ sarvamai/speech_to_text_translate_streaming/__init__.py,sha256=s6HPwrkABpkhDSsd_t6pVRiWfY4MfVE0lVj9b4V_fx4,527
113
+ sarvamai/speech_to_text_translate_streaming/client.py,sha256=QiKQWWjX4iVTLF6YwmeJKcqzU9TNFzENCJeI4xk2ndQ,11813
114
+ sarvamai/speech_to_text_translate_streaming/raw_client.py,sha256=lUiORV21loTC3Fq5m8D_XJhxGdFFOfnmDNem3v2B64Y,10922
114
115
  sarvamai/speech_to_text_translate_streaming/socket_client.py,sha256=ipEPSj5eHAyDpuEXfaP7JJL1rXJXGEo-IB888ReAFKs,8901
115
- sarvamai/speech_to_text_translate_streaming/types/__init__.py,sha256=NFfpVz1gILXj96rWMFw4MgSpni0Yvs8CHgwJ9Xry7OU,575
116
+ sarvamai/speech_to_text_translate_streaming/types/__init__.py,sha256=nsKmvwkhcPekF9kcStDhTDilALFf2jT-wfCn25KVe7U,740
116
117
  sarvamai/speech_to_text_translate_streaming/types/speech_to_text_translate_streaming_flush_signal.py,sha256=jkjvCGJ1pFKi3AOTkwMW-lo18WGgrgAhMpoe5P0AMzA,182
117
118
  sarvamai/speech_to_text_translate_streaming/types/speech_to_text_translate_streaming_high_vad_sensitivity.py,sha256=r6MvTlkM0VEpb4dpnMHtINOZ-gYc22o0Fx_Xce2rjvo,189
119
+ sarvamai/speech_to_text_translate_streaming/types/speech_to_text_translate_streaming_input_audio_codec.py,sha256=x8K3YSZ_mVkfiyhmRk8RvEInw3KTOwCapj5q2tOPggo,217
118
120
  sarvamai/speech_to_text_translate_streaming/types/speech_to_text_translate_streaming_vad_signals.py,sha256=EV3xd9qyKMnMvA9rO-qFDDIac4b84roBu7n-maaPxG8,181
119
121
  sarvamai/text/__init__.py,sha256=_VhToAyIt_5axN6CLJwtxg3-CO7THa_23pbUzqhXJa4,85
120
122
  sarvamai/text/client.py,sha256=jNOHjc3Pu7xsnuDMuf7xX3WHAIO7B-AgMgKj3pL3eZA,29657
@@ -177,7 +179,7 @@ sarvamai/types/input_audio_codec.py,sha256=P3rz6lg-T34E7psQ1DX4e2yvPxVJDDsMVn4TS
177
179
  sarvamai/types/job_state.py,sha256=H6Zph2mIcjsd3upEDt1VzIEORkEpnIDs0kH8BvIyrow,189
178
180
  sarvamai/types/job_status_v_1_response.py,sha256=jusn-3XLCk8vCnu3q3GGSzz_yYgKB5PY_01Q32-1aJk,1605
179
181
  sarvamai/types/language_identification_response.py,sha256=jG4ZQ6KQHCiEDqC51OniOwiRdW14Fbz22bbTsUDp_kc,1483
180
- sarvamai/types/mode.py,sha256=Va8CxtXqSt-aVtKOlIyR_o7IuHYnhGPQy9-MwlgcOLg,208
182
+ sarvamai/types/mode.py,sha256=ENoXfbrT2rt9ClT6QHC3xQHiSJwM0Q0dZK5iEKJp41k,190
181
183
  sarvamai/types/numerals_format.py,sha256=xg3lYiHcnzyFwuwRcaIteJLH_Pz6pJ9n9kTlYPEnCBU,165
182
184
  sarvamai/types/ping_signal.py,sha256=cE53FRIXlc8bSo18z6jlAnOh6DhZEMX36huWEX6X3-A,695
183
185
  sarvamai/types/reasoning_effort.py,sha256=_TBLn3rQgzJAdnKqV2g0PETbrSBZl0fPLfQ5ZE9H4Pc,164
@@ -187,20 +189,20 @@ sarvamai/types/sarvam_model_ids.py,sha256=iYBMglf31KQ1iUZeAMQ-2PP9NDcyHRG7goz7O9
187
189
  sarvamai/types/send_text.py,sha256=kgST6V5NuURzgBpuiDi8fVwDg768ttDoeY9k1dSSb1Y,607
188
190
  sarvamai/types/send_text_data.py,sha256=H8yfcvd4gvyN34RrZ9i4qQvieednNBhL7i7isX4asuY,519
189
191
  sarvamai/types/speech_sample_rate.py,sha256=Hfi79KL2Y1W7OIvvrfWnt7EUvmU5i7bxYvXivrY_aUA,88
190
- sarvamai/types/speech_to_text_job_parameters.py,sha256=k96J4a9hzkTs50qz30GKy4nTN7uswQomCll1AThhUAw,1742
191
- sarvamai/types/speech_to_text_language.py,sha256=cq8FBOX0DfYB3v8jgNteQtHeJcqWqzKWJVyYGwwo_w0,279
192
- sarvamai/types/speech_to_text_model.py,sha256=PQCoWUNBAhFslY69Zh2-6HFbBA6m683cu0JT8A_LYo0,221
193
- sarvamai/types/speech_to_text_response.py,sha256=iWRGEJeHUFIOxeEhoCQu68njeA6lcqXbT2czV-O8Wx0,1438
192
+ sarvamai/types/speech_to_text_job_parameters.py,sha256=JFKO2rIyBJE_PK4mrMMwY4e562l_6HbZtP2kd4CbKa8,3293
193
+ sarvamai/types/speech_to_text_language.py,sha256=dYmAo9lg7iC0w_GVjmnYmALTQgJjma2wFDXQY3_Q4pc,578
194
+ sarvamai/types/speech_to_text_model.py,sha256=hHC3aOXzdPt8i32qJw4ZLz5bdREWVQl4P7Y_lOURJu4,170
195
+ sarvamai/types/speech_to_text_response.py,sha256=0tXgHr-1zN54hA_6nUkMV5Jkt7pEmjl7zJQ8gOt5Nq8,932
194
196
  sarvamai/types/speech_to_text_response_data.py,sha256=gbxZTBSjbN3ZIa10K6tWPYtymcpnQTFIaUnXkOmsmD4,322
195
197
  sarvamai/types/speech_to_text_streaming_response.py,sha256=z6tVAHbVK9lC3w3lac__LEUfO8AAzEilkeGlaLskTtc,687
196
- sarvamai/types/speech_to_text_transcription_data.py,sha256=EqwPAPSi98PwARaTj-ufzFUSHyN-NPoPla5vi_KERrU,1297
197
- sarvamai/types/speech_to_text_translate_job_parameters.py,sha256=fvfcyzIyT0DtcRYePDvglHH-wAhGbsi3H5G4i5nuWT8,1409
198
- sarvamai/types/speech_to_text_translate_language.py,sha256=yikNM-roIumVG-eqBWss93wLGudZdLPwd0i3VcXH5zo,263
199
- sarvamai/types/speech_to_text_translate_model.py,sha256=8ZdAFjquozAkAxfyHQUyS0Hu2IEpGG2P5tFG-COYuxU,178
200
- sarvamai/types/speech_to_text_translate_response.py,sha256=Z5Na7IQW2ok3TP21xd-jKkwioplEKfonNIMhoJQKkVw,1278
198
+ sarvamai/types/speech_to_text_transcription_data.py,sha256=nrGmNnA9Au5CpBTIyG9wto2PSl2I7T8Kll4KWOkCrFg,1850
199
+ sarvamai/types/speech_to_text_translate_job_parameters.py,sha256=-E85BoIBxW5Ck638aRFE0fC_f43RCoIkboAFu2QlBBs,1566
200
+ sarvamai/types/speech_to_text_translate_language.py,sha256=lmWyAWMwSSDNPU5HrJtmhNYLRPhWo41ShMUCrXyEPoc,568
201
+ sarvamai/types/speech_to_text_translate_model.py,sha256=CVSz6gJBY82GhhEuWSdzRLJW9XTsAgweRnKd1tN6mXo,139
202
+ sarvamai/types/speech_to_text_translate_response.py,sha256=v1xTwIva81c74hWy9-ipbJFQWvXlAvw_b7o9xvo6mgc,871
201
203
  sarvamai/types/speech_to_text_translate_response_data.py,sha256=_NlLVp7oQU3em_4E47QVbIP9nromPE07Z9HtMpY1lrU,359
202
204
  sarvamai/types/speech_to_text_translate_streaming_response.py,sha256=J6h3AGdAJxpODFs30bR-e6OaWKa__oVhwv_TrbPSO98,724
203
- sarvamai/types/speech_to_text_translate_transcription_data.py,sha256=-cZZm21um6erOzx18nAgBYKA3Qc3YzNADUCnfx9mD-k,996
205
+ sarvamai/types/speech_to_text_translate_transcription_data.py,sha256=HR-7y3LKVt4e3FY028aEXQmORtjdpjBY29AWTIRsRVA,1506
204
206
  sarvamai/types/spoken_form_numerals_format.py,sha256=soBly93wMkazIcp2GDM0Mf1MjY140Pe24hBlwNoWge0,169
205
207
  sarvamai/types/stop_configuration.py,sha256=yA_q4s4BIrbl3FotZpg4ZcyL10C7gVI0s2dqvH32BNw,136
206
208
  sarvamai/types/storage_container_type.py,sha256=DZXDiDj74lMmUq6jaZfIMW1zMXgoVdY6rs_FcyB9OGk,184
@@ -226,6 +228,6 @@ sarvamai/types/transliterate_mode.py,sha256=1jSEMlGcoLkWuk12TgoOpSgwifa4rThGKZ1h
226
228
  sarvamai/types/transliterate_source_language.py,sha256=bSY9wJszF0sg-Cgg6F-YcWC8ly1mIlj9rqa15-jBtx8,283
227
229
  sarvamai/types/transliteration_response.py,sha256=yt-lzTbDeJ_ZL4I8kQa6oESxA9ebeJJY7LfFHpdEsmM,815
228
230
  sarvamai/version.py,sha256=Qkp3Ee9YH-O9RTix90e0i7iNrFAGN-QDt2AFwGA4n8k,75
229
- sarvamai-0.1.23a4.dist-info/METADATA,sha256=mbfbKrGLegxZadUzXb9zLW9Xll7DYTEfefhtHanA-1I,26753
230
- sarvamai-0.1.23a4.dist-info/WHEEL,sha256=Zb28QaM1gQi8f4VCBhsUklF61CTlNYfs9YAZn-TOGFk,88
231
- sarvamai-0.1.23a4.dist-info/RECORD,,
231
+ sarvamai-0.1.23a6.dist-info/METADATA,sha256=5pdMsPKXwU2rQqBN-U7GmnWbAKVgSLmtfd7E0G8e6KU,26753
232
+ sarvamai-0.1.23a6.dist-info/WHEEL,sha256=Zb28QaM1gQi8f4VCBhsUklF61CTlNYfs9YAZn-TOGFk,88
233
+ sarvamai-0.1.23a6.dist-info/RECORD,,