sarvamai 0.1.5a13__py3-none-any.whl → 0.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (25) hide show
  1. sarvamai/__init__.py +15 -4
  2. sarvamai/core/client_wrapper.py +2 -2
  3. sarvamai/requests/audio_data.py +3 -2
  4. sarvamai/speech_to_text_streaming/__init__.py +12 -2
  5. sarvamai/speech_to_text_streaming/client.py +10 -8
  6. sarvamai/speech_to_text_streaming/raw_client.py +10 -8
  7. sarvamai/speech_to_text_streaming/types/__init__.py +8 -1
  8. sarvamai/speech_to_text_streaming/types/speech_to_text_streaming_high_vad_sensitivity.py +5 -0
  9. sarvamai/speech_to_text_streaming/types/speech_to_text_streaming_vad_signals.py +5 -0
  10. sarvamai/speech_to_text_translate_streaming/__init__.py +10 -2
  11. sarvamai/speech_to_text_translate_streaming/client.py +12 -8
  12. sarvamai/speech_to_text_translate_streaming/raw_client.py +12 -8
  13. sarvamai/speech_to_text_translate_streaming/types/__init__.py +7 -1
  14. sarvamai/speech_to_text_translate_streaming/types/speech_to_text_translate_streaming_high_vad_sensitivity.py +5 -0
  15. sarvamai/speech_to_text_translate_streaming/types/speech_to_text_translate_streaming_vad_signals.py +5 -0
  16. sarvamai/text_to_speech/client.py +6 -14
  17. sarvamai/text_to_speech/raw_client.py +6 -14
  18. sarvamai/types/__init__.py +0 -2
  19. sarvamai/types/audio_data.py +1 -2
  20. sarvamai/types/text_to_speech_model.py +1 -1
  21. sarvamai/types/text_to_speech_speaker.py +1 -22
  22. {sarvamai-0.1.5a13.dist-info → sarvamai-0.1.6.dist-info}/METADATA +1 -1
  23. {sarvamai-0.1.5a13.dist-info → sarvamai-0.1.6.dist-info}/RECORD +24 -21
  24. sarvamai/types/audio_data_encoding.py +0 -5
  25. {sarvamai-0.1.5a13.dist-info → sarvamai-0.1.6.dist-info}/WHEEL +0 -0
sarvamai/__init__.py CHANGED
@@ -4,7 +4,6 @@
4
4
 
5
5
  from .types import (
6
6
  AudioData,
7
- AudioDataEncoding,
8
7
  AudioMessage,
9
8
  ChatCompletionRequestAssistantMessage,
10
9
  ChatCompletionRequestMessage,
@@ -113,14 +112,22 @@ from .requests import (
113
112
  TranslationResponseParams,
114
113
  TransliterationResponseParams,
115
114
  )
116
- from .speech_to_text_streaming import SpeechToTextStreamingLanguageCode, SpeechToTextStreamingModel
117
- from .speech_to_text_translate_streaming import SpeechToTextTranslateStreamingModel
115
+ from .speech_to_text_streaming import (
116
+ SpeechToTextStreamingHighVadSensitivity,
117
+ SpeechToTextStreamingLanguageCode,
118
+ SpeechToTextStreamingModel,
119
+ SpeechToTextStreamingVadSignals,
120
+ )
121
+ from .speech_to_text_translate_streaming import (
122
+ SpeechToTextTranslateStreamingHighVadSensitivity,
123
+ SpeechToTextTranslateStreamingModel,
124
+ SpeechToTextTranslateStreamingVadSignals,
125
+ )
118
126
  from .version import __version__
119
127
 
120
128
  __all__ = [
121
129
  "AsyncSarvamAI",
122
130
  "AudioData",
123
- "AudioDataEncoding",
124
131
  "AudioDataParams",
125
132
  "AudioMessage",
126
133
  "AudioMessageParams",
@@ -183,10 +190,12 @@ __all__ = [
183
190
  "SpeechToTextResponseData",
184
191
  "SpeechToTextResponseDataParams",
185
192
  "SpeechToTextResponseParams",
193
+ "SpeechToTextStreamingHighVadSensitivity",
186
194
  "SpeechToTextStreamingLanguageCode",
187
195
  "SpeechToTextStreamingModel",
188
196
  "SpeechToTextStreamingResponse",
189
197
  "SpeechToTextStreamingResponseParams",
198
+ "SpeechToTextStreamingVadSignals",
190
199
  "SpeechToTextTranscriptionData",
191
200
  "SpeechToTextTranscriptionDataParams",
192
201
  "SpeechToTextTranslateLanguage",
@@ -195,9 +204,11 @@ __all__ = [
195
204
  "SpeechToTextTranslateResponseData",
196
205
  "SpeechToTextTranslateResponseDataParams",
197
206
  "SpeechToTextTranslateResponseParams",
207
+ "SpeechToTextTranslateStreamingHighVadSensitivity",
198
208
  "SpeechToTextTranslateStreamingModel",
199
209
  "SpeechToTextTranslateStreamingResponse",
200
210
  "SpeechToTextTranslateStreamingResponseParams",
211
+ "SpeechToTextTranslateStreamingVadSignals",
201
212
  "SpeechToTextTranslateTranscriptionData",
202
213
  "SpeechToTextTranslateTranscriptionDataParams",
203
214
  "SpokenFormNumeralsFormat",
@@ -17,10 +17,10 @@ class BaseClientWrapper:
17
17
 
18
18
  def get_headers(self) -> typing.Dict[str, str]:
19
19
  headers: typing.Dict[str, str] = {
20
- "User-Agent": "sarvamai/0.1.5a13",
20
+ "User-Agent": "sarvamai/0.1.6",
21
21
  "X-Fern-Language": "Python",
22
22
  "X-Fern-SDK-Name": "sarvamai",
23
- "X-Fern-SDK-Version": "0.1.5a13",
23
+ "X-Fern-SDK-Version": "0.1.6",
24
24
  }
25
25
  headers["api-subscription-key"] = self.api_subscription_key
26
26
  return headers
@@ -1,7 +1,8 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
+ import typing
4
+
3
5
  import typing_extensions
4
- from ..types.audio_data_encoding import AudioDataEncoding
5
6
 
6
7
 
7
8
  class AudioDataParams(typing_extensions.TypedDict):
@@ -15,7 +16,7 @@ class AudioDataParams(typing_extensions.TypedDict):
15
16
  Audio sample rate in Hz (16kHz preferred, 8kHz least preferred)
16
17
  """
17
18
 
18
- encoding: AudioDataEncoding
19
+ encoding: typing.Literal["audio/wav"]
19
20
  """
20
21
  Audio encoding format
21
22
  """
@@ -2,6 +2,16 @@
2
2
 
3
3
  # isort: skip_file
4
4
 
5
- from .types import SpeechToTextStreamingLanguageCode, SpeechToTextStreamingModel
5
+ from .types import (
6
+ SpeechToTextStreamingHighVadSensitivity,
7
+ SpeechToTextStreamingLanguageCode,
8
+ SpeechToTextStreamingModel,
9
+ SpeechToTextStreamingVadSignals,
10
+ )
6
11
 
7
- __all__ = ["SpeechToTextStreamingLanguageCode", "SpeechToTextStreamingModel"]
12
+ __all__ = [
13
+ "SpeechToTextStreamingHighVadSensitivity",
14
+ "SpeechToTextStreamingLanguageCode",
15
+ "SpeechToTextStreamingModel",
16
+ "SpeechToTextStreamingVadSignals",
17
+ ]
@@ -11,8 +11,10 @@ from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
11
11
  from ..core.request_options import RequestOptions
12
12
  from .raw_client import AsyncRawSpeechToTextStreamingClient, RawSpeechToTextStreamingClient
13
13
  from .socket_client import AsyncSpeechToTextStreamingSocketClient, SpeechToTextStreamingSocketClient
14
+ from .types.speech_to_text_streaming_high_vad_sensitivity import SpeechToTextStreamingHighVadSensitivity
14
15
  from .types.speech_to_text_streaming_language_code import SpeechToTextStreamingLanguageCode
15
16
  from .types.speech_to_text_streaming_model import SpeechToTextStreamingModel
17
+ from .types.speech_to_text_streaming_vad_signals import SpeechToTextStreamingVadSignals
16
18
 
17
19
 
18
20
  class SpeechToTextStreamingClient:
@@ -36,8 +38,8 @@ class SpeechToTextStreamingClient:
36
38
  *,
37
39
  language_code: SpeechToTextStreamingLanguageCode,
38
40
  model: typing.Optional[SpeechToTextStreamingModel] = None,
39
- high_vad_sensitivity: typing.Optional[str] = None,
40
- vad_signals: typing.Optional[str] = None,
41
+ high_vad_sensitivity: typing.Optional[SpeechToTextStreamingHighVadSensitivity] = None,
42
+ vad_signals: typing.Optional[SpeechToTextStreamingVadSignals] = None,
41
43
  api_subscription_key: typing.Optional[str] = None,
42
44
  request_options: typing.Optional[RequestOptions] = None,
43
45
  ) -> typing.Iterator[SpeechToTextStreamingSocketClient]:
@@ -52,10 +54,10 @@ class SpeechToTextStreamingClient:
52
54
  model : typing.Optional[SpeechToTextStreamingModel]
53
55
  Speech to text model to use
54
56
 
55
- high_vad_sensitivity : typing.Optional[str]
57
+ high_vad_sensitivity : typing.Optional[SpeechToTextStreamingHighVadSensitivity]
56
58
  Enable high VAD (Voice Activity Detection) sensitivity
57
59
 
58
- vad_signals : typing.Optional[str]
60
+ vad_signals : typing.Optional[SpeechToTextStreamingVadSignals]
59
61
  Enable VAD signals in response
60
62
 
61
63
  api_subscription_key : typing.Optional[str]
@@ -123,8 +125,8 @@ class AsyncSpeechToTextStreamingClient:
123
125
  *,
124
126
  language_code: SpeechToTextStreamingLanguageCode,
125
127
  model: typing.Optional[SpeechToTextStreamingModel] = None,
126
- high_vad_sensitivity: typing.Optional[str] = None,
127
- vad_signals: typing.Optional[str] = None,
128
+ high_vad_sensitivity: typing.Optional[SpeechToTextStreamingHighVadSensitivity] = None,
129
+ vad_signals: typing.Optional[SpeechToTextStreamingVadSignals] = None,
128
130
  api_subscription_key: typing.Optional[str] = None,
129
131
  request_options: typing.Optional[RequestOptions] = None,
130
132
  ) -> typing.AsyncIterator[AsyncSpeechToTextStreamingSocketClient]:
@@ -139,10 +141,10 @@ class AsyncSpeechToTextStreamingClient:
139
141
  model : typing.Optional[SpeechToTextStreamingModel]
140
142
  Speech to text model to use
141
143
 
142
- high_vad_sensitivity : typing.Optional[str]
144
+ high_vad_sensitivity : typing.Optional[SpeechToTextStreamingHighVadSensitivity]
143
145
  Enable high VAD (Voice Activity Detection) sensitivity
144
146
 
145
- vad_signals : typing.Optional[str]
147
+ vad_signals : typing.Optional[SpeechToTextStreamingVadSignals]
146
148
  Enable VAD signals in response
147
149
 
148
150
  api_subscription_key : typing.Optional[str]
@@ -10,8 +10,10 @@ from ..core.api_error import ApiError
10
10
  from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
11
11
  from ..core.request_options import RequestOptions
12
12
  from .socket_client import AsyncSpeechToTextStreamingSocketClient, SpeechToTextStreamingSocketClient
13
+ from .types.speech_to_text_streaming_high_vad_sensitivity import SpeechToTextStreamingHighVadSensitivity
13
14
  from .types.speech_to_text_streaming_language_code import SpeechToTextStreamingLanguageCode
14
15
  from .types.speech_to_text_streaming_model import SpeechToTextStreamingModel
16
+ from .types.speech_to_text_streaming_vad_signals import SpeechToTextStreamingVadSignals
15
17
 
16
18
 
17
19
  class RawSpeechToTextStreamingClient:
@@ -24,8 +26,8 @@ class RawSpeechToTextStreamingClient:
24
26
  *,
25
27
  language_code: SpeechToTextStreamingLanguageCode,
26
28
  model: typing.Optional[SpeechToTextStreamingModel] = None,
27
- high_vad_sensitivity: typing.Optional[str] = None,
28
- vad_signals: typing.Optional[str] = None,
29
+ high_vad_sensitivity: typing.Optional[SpeechToTextStreamingHighVadSensitivity] = None,
30
+ vad_signals: typing.Optional[SpeechToTextStreamingVadSignals] = None,
29
31
  api_subscription_key: typing.Optional[str] = None,
30
32
  request_options: typing.Optional[RequestOptions] = None,
31
33
  ) -> typing.Iterator[SpeechToTextStreamingSocketClient]:
@@ -40,10 +42,10 @@ class RawSpeechToTextStreamingClient:
40
42
  model : typing.Optional[SpeechToTextStreamingModel]
41
43
  Speech to text model to use
42
44
 
43
- high_vad_sensitivity : typing.Optional[str]
45
+ high_vad_sensitivity : typing.Optional[SpeechToTextStreamingHighVadSensitivity]
44
46
  Enable high VAD (Voice Activity Detection) sensitivity
45
47
 
46
- vad_signals : typing.Optional[str]
48
+ vad_signals : typing.Optional[SpeechToTextStreamingVadSignals]
47
49
  Enable VAD signals in response
48
50
 
49
51
  api_subscription_key : typing.Optional[str]
@@ -100,8 +102,8 @@ class AsyncRawSpeechToTextStreamingClient:
100
102
  *,
101
103
  language_code: SpeechToTextStreamingLanguageCode,
102
104
  model: typing.Optional[SpeechToTextStreamingModel] = None,
103
- high_vad_sensitivity: typing.Optional[str] = None,
104
- vad_signals: typing.Optional[str] = None,
105
+ high_vad_sensitivity: typing.Optional[SpeechToTextStreamingHighVadSensitivity] = None,
106
+ vad_signals: typing.Optional[SpeechToTextStreamingVadSignals] = None,
105
107
  api_subscription_key: typing.Optional[str] = None,
106
108
  request_options: typing.Optional[RequestOptions] = None,
107
109
  ) -> typing.AsyncIterator[AsyncSpeechToTextStreamingSocketClient]:
@@ -116,10 +118,10 @@ class AsyncRawSpeechToTextStreamingClient:
116
118
  model : typing.Optional[SpeechToTextStreamingModel]
117
119
  Speech to text model to use
118
120
 
119
- high_vad_sensitivity : typing.Optional[str]
121
+ high_vad_sensitivity : typing.Optional[SpeechToTextStreamingHighVadSensitivity]
120
122
  Enable high VAD (Voice Activity Detection) sensitivity
121
123
 
122
- vad_signals : typing.Optional[str]
124
+ vad_signals : typing.Optional[SpeechToTextStreamingVadSignals]
123
125
  Enable VAD signals in response
124
126
 
125
127
  api_subscription_key : typing.Optional[str]
@@ -2,7 +2,14 @@
2
2
 
3
3
  # isort: skip_file
4
4
 
5
+ from .speech_to_text_streaming_high_vad_sensitivity import SpeechToTextStreamingHighVadSensitivity
5
6
  from .speech_to_text_streaming_language_code import SpeechToTextStreamingLanguageCode
6
7
  from .speech_to_text_streaming_model import SpeechToTextStreamingModel
8
+ from .speech_to_text_streaming_vad_signals import SpeechToTextStreamingVadSignals
7
9
 
8
- __all__ = ["SpeechToTextStreamingLanguageCode", "SpeechToTextStreamingModel"]
10
+ __all__ = [
11
+ "SpeechToTextStreamingHighVadSensitivity",
12
+ "SpeechToTextStreamingLanguageCode",
13
+ "SpeechToTextStreamingModel",
14
+ "SpeechToTextStreamingVadSignals",
15
+ ]
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ SpeechToTextStreamingHighVadSensitivity = typing.Union[typing.Literal["true", "false"], typing.Any]
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ SpeechToTextStreamingVadSignals = typing.Union[typing.Literal["true", "false"], typing.Any]
@@ -2,6 +2,14 @@
2
2
 
3
3
  # isort: skip_file
4
4
 
5
- from .types import SpeechToTextTranslateStreamingModel
5
+ from .types import (
6
+ SpeechToTextTranslateStreamingHighVadSensitivity,
7
+ SpeechToTextTranslateStreamingModel,
8
+ SpeechToTextTranslateStreamingVadSignals,
9
+ )
6
10
 
7
- __all__ = ["SpeechToTextTranslateStreamingModel"]
11
+ __all__ = [
12
+ "SpeechToTextTranslateStreamingHighVadSensitivity",
13
+ "SpeechToTextTranslateStreamingModel",
14
+ "SpeechToTextTranslateStreamingVadSignals",
15
+ ]
@@ -11,7 +11,11 @@ from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
11
11
  from ..core.request_options import RequestOptions
12
12
  from .raw_client import AsyncRawSpeechToTextTranslateStreamingClient, RawSpeechToTextTranslateStreamingClient
13
13
  from .socket_client import AsyncSpeechToTextTranslateStreamingSocketClient, SpeechToTextTranslateStreamingSocketClient
14
+ from .types.speech_to_text_translate_streaming_high_vad_sensitivity import (
15
+ SpeechToTextTranslateStreamingHighVadSensitivity,
16
+ )
14
17
  from .types.speech_to_text_translate_streaming_model import SpeechToTextTranslateStreamingModel
18
+ from .types.speech_to_text_translate_streaming_vad_signals import SpeechToTextTranslateStreamingVadSignals
15
19
 
16
20
 
17
21
  class SpeechToTextTranslateStreamingClient:
@@ -34,8 +38,8 @@ class SpeechToTextTranslateStreamingClient:
34
38
  self,
35
39
  *,
36
40
  model: typing.Optional[SpeechToTextTranslateStreamingModel] = None,
37
- high_vad_sensitivity: typing.Optional[str] = None,
38
- vad_signals: typing.Optional[str] = None,
41
+ high_vad_sensitivity: typing.Optional[SpeechToTextTranslateStreamingHighVadSensitivity] = None,
42
+ vad_signals: typing.Optional[SpeechToTextTranslateStreamingVadSignals] = None,
39
43
  api_subscription_key: typing.Optional[str] = None,
40
44
  request_options: typing.Optional[RequestOptions] = None,
41
45
  ) -> typing.Iterator[SpeechToTextTranslateStreamingSocketClient]:
@@ -47,10 +51,10 @@ class SpeechToTextTranslateStreamingClient:
47
51
  model : typing.Optional[SpeechToTextTranslateStreamingModel]
48
52
  Speech to text model to use (defaults to "saaras:v2.5" if not specified)
49
53
 
50
- high_vad_sensitivity : typing.Optional[str]
54
+ high_vad_sensitivity : typing.Optional[SpeechToTextTranslateStreamingHighVadSensitivity]
51
55
  Enable high VAD (Voice Activity Detection) sensitivity
52
56
 
53
- vad_signals : typing.Optional[str]
57
+ vad_signals : typing.Optional[SpeechToTextTranslateStreamingVadSignals]
54
58
  Enable VAD signals in response
55
59
 
56
60
  api_subscription_key : typing.Optional[str]
@@ -115,8 +119,8 @@ class AsyncSpeechToTextTranslateStreamingClient:
115
119
  self,
116
120
  *,
117
121
  model: typing.Optional[SpeechToTextTranslateStreamingModel] = None,
118
- high_vad_sensitivity: typing.Optional[str] = None,
119
- vad_signals: typing.Optional[str] = None,
122
+ high_vad_sensitivity: typing.Optional[SpeechToTextTranslateStreamingHighVadSensitivity] = None,
123
+ vad_signals: typing.Optional[SpeechToTextTranslateStreamingVadSignals] = None,
120
124
  api_subscription_key: typing.Optional[str] = None,
121
125
  request_options: typing.Optional[RequestOptions] = None,
122
126
  ) -> typing.AsyncIterator[AsyncSpeechToTextTranslateStreamingSocketClient]:
@@ -128,10 +132,10 @@ class AsyncSpeechToTextTranslateStreamingClient:
128
132
  model : typing.Optional[SpeechToTextTranslateStreamingModel]
129
133
  Speech to text model to use (defaults to "saaras:v2.5" if not specified)
130
134
 
131
- high_vad_sensitivity : typing.Optional[str]
135
+ high_vad_sensitivity : typing.Optional[SpeechToTextTranslateStreamingHighVadSensitivity]
132
136
  Enable high VAD (Voice Activity Detection) sensitivity
133
137
 
134
- vad_signals : typing.Optional[str]
138
+ vad_signals : typing.Optional[SpeechToTextTranslateStreamingVadSignals]
135
139
  Enable VAD signals in response
136
140
 
137
141
  api_subscription_key : typing.Optional[str]
@@ -10,7 +10,11 @@ from ..core.api_error import ApiError
10
10
  from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
11
11
  from ..core.request_options import RequestOptions
12
12
  from .socket_client import AsyncSpeechToTextTranslateStreamingSocketClient, SpeechToTextTranslateStreamingSocketClient
13
+ from .types.speech_to_text_translate_streaming_high_vad_sensitivity import (
14
+ SpeechToTextTranslateStreamingHighVadSensitivity,
15
+ )
13
16
  from .types.speech_to_text_translate_streaming_model import SpeechToTextTranslateStreamingModel
17
+ from .types.speech_to_text_translate_streaming_vad_signals import SpeechToTextTranslateStreamingVadSignals
14
18
 
15
19
 
16
20
  class RawSpeechToTextTranslateStreamingClient:
@@ -22,8 +26,8 @@ class RawSpeechToTextTranslateStreamingClient:
22
26
  self,
23
27
  *,
24
28
  model: typing.Optional[SpeechToTextTranslateStreamingModel] = None,
25
- high_vad_sensitivity: typing.Optional[str] = None,
26
- vad_signals: typing.Optional[str] = None,
29
+ high_vad_sensitivity: typing.Optional[SpeechToTextTranslateStreamingHighVadSensitivity] = None,
30
+ vad_signals: typing.Optional[SpeechToTextTranslateStreamingVadSignals] = None,
27
31
  api_subscription_key: typing.Optional[str] = None,
28
32
  request_options: typing.Optional[RequestOptions] = None,
29
33
  ) -> typing.Iterator[SpeechToTextTranslateStreamingSocketClient]:
@@ -35,10 +39,10 @@ class RawSpeechToTextTranslateStreamingClient:
35
39
  model : typing.Optional[SpeechToTextTranslateStreamingModel]
36
40
  Speech to text model to use (defaults to "saaras:v2.5" if not specified)
37
41
 
38
- high_vad_sensitivity : typing.Optional[str]
42
+ high_vad_sensitivity : typing.Optional[SpeechToTextTranslateStreamingHighVadSensitivity]
39
43
  Enable high VAD (Voice Activity Detection) sensitivity
40
44
 
41
- vad_signals : typing.Optional[str]
45
+ vad_signals : typing.Optional[SpeechToTextTranslateStreamingVadSignals]
42
46
  Enable VAD signals in response
43
47
 
44
48
  api_subscription_key : typing.Optional[str]
@@ -92,8 +96,8 @@ class AsyncRawSpeechToTextTranslateStreamingClient:
92
96
  self,
93
97
  *,
94
98
  model: typing.Optional[SpeechToTextTranslateStreamingModel] = None,
95
- high_vad_sensitivity: typing.Optional[str] = None,
96
- vad_signals: typing.Optional[str] = None,
99
+ high_vad_sensitivity: typing.Optional[SpeechToTextTranslateStreamingHighVadSensitivity] = None,
100
+ vad_signals: typing.Optional[SpeechToTextTranslateStreamingVadSignals] = None,
97
101
  api_subscription_key: typing.Optional[str] = None,
98
102
  request_options: typing.Optional[RequestOptions] = None,
99
103
  ) -> typing.AsyncIterator[AsyncSpeechToTextTranslateStreamingSocketClient]:
@@ -105,10 +109,10 @@ class AsyncRawSpeechToTextTranslateStreamingClient:
105
109
  model : typing.Optional[SpeechToTextTranslateStreamingModel]
106
110
  Speech to text model to use (defaults to "saaras:v2.5" if not specified)
107
111
 
108
- high_vad_sensitivity : typing.Optional[str]
112
+ high_vad_sensitivity : typing.Optional[SpeechToTextTranslateStreamingHighVadSensitivity]
109
113
  Enable high VAD (Voice Activity Detection) sensitivity
110
114
 
111
- vad_signals : typing.Optional[str]
115
+ vad_signals : typing.Optional[SpeechToTextTranslateStreamingVadSignals]
112
116
  Enable VAD signals in response
113
117
 
114
118
  api_subscription_key : typing.Optional[str]
@@ -2,6 +2,12 @@
2
2
 
3
3
  # isort: skip_file
4
4
 
5
+ from .speech_to_text_translate_streaming_high_vad_sensitivity import SpeechToTextTranslateStreamingHighVadSensitivity
5
6
  from .speech_to_text_translate_streaming_model import SpeechToTextTranslateStreamingModel
7
+ from .speech_to_text_translate_streaming_vad_signals import SpeechToTextTranslateStreamingVadSignals
6
8
 
7
- __all__ = ["SpeechToTextTranslateStreamingModel"]
9
+ __all__ = [
10
+ "SpeechToTextTranslateStreamingHighVadSensitivity",
11
+ "SpeechToTextTranslateStreamingModel",
12
+ "SpeechToTextTranslateStreamingVadSignals",
13
+ ]
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ SpeechToTextTranslateStreamingHighVadSensitivity = typing.Union[typing.Literal["true", "false"], typing.Any]
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ SpeechToTextTranslateStreamingVadSignals = typing.Union[typing.Literal["true", "false"], typing.Any]
@@ -58,13 +58,9 @@ class TextToSpeechClient:
58
58
  speaker : typing.Optional[TextToSpeechSpeaker]
59
59
  The speaker voice to be used for the output audio.
60
60
 
61
- **Default:** Meera
62
-
63
- **Model Compatibility (Speakers compatible with respective models):**
64
- - **bulbul:v1:**
65
- - Female: Diya, Maya, Meera, Pavithra, Maitreyi, Misha
66
- - Male: Amol, Arjun, Amartya, Arvind, Neel, Vian
61
+ **Default:** Anushka
67
62
 
63
+ **Model Compatibility (Speakers compatible with respective model):**
68
64
  - **bulbul:v2:**
69
65
  - Female: Anushka, Manisha, Vidya, Arya
70
66
  - Male: Abhilash, Karun, Hitesh
@@ -87,7 +83,7 @@ class TextToSpeechClient:
87
83
  Controls whether normalization of English words and numeric entities (e.g., numbers, dates) is performed. Set to true for better handling of mixed-language text. Default is false.
88
84
 
89
85
  model : typing.Optional[TextToSpeechModel]
90
- Specifies the model to use for text-to-speech conversion. Default is bulbul:v1.
86
+ Specifies the model to use for text-to-speech conversion. Default is bulbul:v2.
91
87
 
92
88
  request_options : typing.Optional[RequestOptions]
93
89
  Request-specific configuration.
@@ -167,13 +163,9 @@ class AsyncTextToSpeechClient:
167
163
  speaker : typing.Optional[TextToSpeechSpeaker]
168
164
  The speaker voice to be used for the output audio.
169
165
 
170
- **Default:** Meera
171
-
172
- **Model Compatibility (Speakers compatible with respective models):**
173
- - **bulbul:v1:**
174
- - Female: Diya, Maya, Meera, Pavithra, Maitreyi, Misha
175
- - Male: Amol, Arjun, Amartya, Arvind, Neel, Vian
166
+ **Default:** Anushka
176
167
 
168
+ **Model Compatibility (Speakers compatible with respective model):**
177
169
  - **bulbul:v2:**
178
170
  - Female: Anushka, Manisha, Vidya, Arya
179
171
  - Male: Abhilash, Karun, Hitesh
@@ -196,7 +188,7 @@ class AsyncTextToSpeechClient:
196
188
  Controls whether normalization of English words and numeric entities (e.g., numbers, dates) is performed. Set to true for better handling of mixed-language text. Default is false.
197
189
 
198
190
  model : typing.Optional[TextToSpeechModel]
199
- Specifies the model to use for text-to-speech conversion. Default is bulbul:v1.
191
+ Specifies the model to use for text-to-speech conversion. Default is bulbul:v2.
200
192
 
201
193
  request_options : typing.Optional[RequestOptions]
202
194
  Request-specific configuration.
@@ -55,13 +55,9 @@ class RawTextToSpeechClient:
55
55
  speaker : typing.Optional[TextToSpeechSpeaker]
56
56
  The speaker voice to be used for the output audio.
57
57
 
58
- **Default:** Meera
59
-
60
- **Model Compatibility (Speakers compatible with respective models):**
61
- - **bulbul:v1:**
62
- - Female: Diya, Maya, Meera, Pavithra, Maitreyi, Misha
63
- - Male: Amol, Arjun, Amartya, Arvind, Neel, Vian
58
+ **Default:** Anushka
64
59
 
60
+ **Model Compatibility (Speakers compatible with respective model):**
65
61
  - **bulbul:v2:**
66
62
  - Female: Anushka, Manisha, Vidya, Arya
67
63
  - Male: Abhilash, Karun, Hitesh
@@ -84,7 +80,7 @@ class RawTextToSpeechClient:
84
80
  Controls whether normalization of English words and numeric entities (e.g., numbers, dates) is performed. Set to true for better handling of mixed-language text. Default is false.
85
81
 
86
82
  model : typing.Optional[TextToSpeechModel]
87
- Specifies the model to use for text-to-speech conversion. Default is bulbul:v1.
83
+ Specifies the model to use for text-to-speech conversion. Default is bulbul:v2.
88
84
 
89
85
  request_options : typing.Optional[RequestOptions]
90
86
  Request-specific configuration.
@@ -218,13 +214,9 @@ class AsyncRawTextToSpeechClient:
218
214
  speaker : typing.Optional[TextToSpeechSpeaker]
219
215
  The speaker voice to be used for the output audio.
220
216
 
221
- **Default:** Meera
222
-
223
- **Model Compatibility (Speakers compatible with respective models):**
224
- - **bulbul:v1:**
225
- - Female: Diya, Maya, Meera, Pavithra, Maitreyi, Misha
226
- - Male: Amol, Arjun, Amartya, Arvind, Neel, Vian
217
+ **Default:** Anushka
227
218
 
219
+ **Model Compatibility (Speakers compatible with respective model):**
228
220
  - **bulbul:v2:**
229
221
  - Female: Anushka, Manisha, Vidya, Arya
230
222
  - Male: Abhilash, Karun, Hitesh
@@ -247,7 +239,7 @@ class AsyncRawTextToSpeechClient:
247
239
  Controls whether normalization of English words and numeric entities (e.g., numbers, dates) is performed. Set to true for better handling of mixed-language text. Default is false.
248
240
 
249
241
  model : typing.Optional[TextToSpeechModel]
250
- Specifies the model to use for text-to-speech conversion. Default is bulbul:v1.
242
+ Specifies the model to use for text-to-speech conversion. Default is bulbul:v2.
251
243
 
252
244
  request_options : typing.Optional[RequestOptions]
253
245
  Request-specific configuration.
@@ -3,7 +3,6 @@
3
3
  # isort: skip_file
4
4
 
5
5
  from .audio_data import AudioData
6
- from .audio_data_encoding import AudioDataEncoding
7
6
  from .audio_message import AudioMessage
8
7
  from .chat_completion_request_assistant_message import ChatCompletionRequestAssistantMessage
9
8
  from .chat_completion_request_message import (
@@ -68,7 +67,6 @@ from .transliteration_response import TransliterationResponse
68
67
 
69
68
  __all__ = [
70
69
  "AudioData",
71
- "AudioDataEncoding",
72
70
  "AudioMessage",
73
71
  "ChatCompletionRequestAssistantMessage",
74
72
  "ChatCompletionRequestMessage",
@@ -4,7 +4,6 @@ import typing
4
4
 
5
5
  import pydantic
6
6
  from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
- from .audio_data_encoding import AudioDataEncoding
8
7
 
9
8
 
10
9
  class AudioData(UniversalBaseModel):
@@ -18,7 +17,7 @@ class AudioData(UniversalBaseModel):
18
17
  Audio sample rate in Hz (16kHz preferred, 8kHz least preferred)
19
18
  """
20
19
 
21
- encoding: AudioDataEncoding = pydantic.Field()
20
+ encoding: typing.Literal["audio/wav"] = pydantic.Field(default="audio/wav")
22
21
  """
23
22
  Audio encoding format
24
23
  """
@@ -2,4 +2,4 @@
2
2
 
3
3
  import typing
4
4
 
5
- TextToSpeechModel = typing.Union[typing.Literal["bulbul:v1", "bulbul:v2"], typing.Any]
5
+ TextToSpeechModel = typing.Literal["bulbul:v2"]
@@ -3,26 +3,5 @@
3
3
  import typing
4
4
 
5
5
  TextToSpeechSpeaker = typing.Union[
6
- typing.Literal[
7
- "meera",
8
- "pavithra",
9
- "maitreyi",
10
- "arvind",
11
- "amol",
12
- "amartya",
13
- "diya",
14
- "neel",
15
- "misha",
16
- "vian",
17
- "arjun",
18
- "maya",
19
- "anushka",
20
- "abhilash",
21
- "manisha",
22
- "vidya",
23
- "arya",
24
- "karun",
25
- "hitesh",
26
- ],
27
- typing.Any,
6
+ typing.Literal["anushka", "abhilash", "manisha", "vidya", "arya", "karun", "hitesh"], typing.Any
28
7
  ]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: sarvamai
3
- Version: 0.1.5a13
3
+ Version: 0.1.6
4
4
  Summary:
5
5
  Requires-Python: >=3.8,<4.0
6
6
  Classifier: Intended Audience :: Developers
@@ -1,11 +1,11 @@
1
- sarvamai/__init__.py,sha256=rN99IDvheQJQijubWD47TaGNlx8Q4IEUZ5CHh8QrIuY,7165
1
+ sarvamai/__init__.py,sha256=-XV5i09uHn5nMVvhydUPG84_lryvCgcEmOwzOgCEsMk,7511
2
2
  sarvamai/chat/__init__.py,sha256=_VhToAyIt_5axN6CLJwtxg3-CO7THa_23pbUzqhXJa4,85
3
3
  sarvamai/chat/client.py,sha256=xOSj83Gr6Q7eY2qUeATiuXYQqBqWqSCQlIEopK5fKus,11022
4
4
  sarvamai/chat/raw_client.py,sha256=A2kRuZcVWlJhyYCD7YKgqNkZEp3cYa1731KhRkhirU0,17885
5
5
  sarvamai/client.py,sha256=5-fW9679vlfqw6hQCtFIG0gnqd6tdz2W8BWx2ypFUHE,6855
6
6
  sarvamai/core/__init__.py,sha256=YE2CtXeASe1RAbaI39twKWYKCuT4tW5is9HWHhJjR_g,1653
7
7
  sarvamai/core/api_error.py,sha256=44vPoTyWN59gonCIZMdzw7M1uspygiLnr3GNFOoVL2Q,614
8
- sarvamai/core/client_wrapper.py,sha256=2xThcCgUTsV6Zwxi6Dh1SGGbyA38v_QdjH7x1jqfcFI,2080
8
+ sarvamai/core/client_wrapper.py,sha256=bhcuvbPbeVNO-t82IlCbIbLn9dgs5UVDbsDdHs8DLYQ,2074
9
9
  sarvamai/core/datetime_utils.py,sha256=nBys2IsYrhPdszxGKCNRPSOCwa-5DWOHG95FB8G9PKo,1047
10
10
  sarvamai/core/events.py,sha256=j7VWXgMpOsjCXdzY22wIhI7Q-v5InZ4WchRzA88x_Sk,856
11
11
  sarvamai/core/file.py,sha256=d4NNbX8XvXP32z8KpK2Xovv33nFfruIrpz0QWxlgpZk,2663
@@ -29,7 +29,7 @@ sarvamai/errors/unprocessable_entity_error.py,sha256=JqxtzIhvjkpQDqbT9Q-go1n-gyv
29
29
  sarvamai/play.py,sha256=4fh86zy8g8IPU2O8yPBY7QxXQOivv_nWQvPQsOa1arw,2183
30
30
  sarvamai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
31
31
  sarvamai/requests/__init__.py,sha256=orINJGeCXuK0ep5LLDhoiGp0kCYICrfJiLC6S-JmYNw,3697
32
- sarvamai/requests/audio_data.py,sha256=zbGm4jK04s1l9d9gCM-5F9c7H1qzhxbi_0YjoOnKFg8,443
32
+ sarvamai/requests/audio_data.py,sha256=QI3SK5aiAg2yJ-m3l9CxOkONnH3CCKMFCl9kAdMs19o,410
33
33
  sarvamai/requests/audio_message.py,sha256=ZBeogjGE6YFXXM-0g8zq9SoizDk21reR0YXSB-0fMjg,214
34
34
  sarvamai/requests/chat_completion_request_assistant_message.py,sha256=xI6nqqY2t4j56DGEAt2aasDnI7no_mxxCBk_ChxNQjg,247
35
35
  sarvamai/requests/chat_completion_request_message.py,sha256=B5tOPGNdSaMOJRl0k26uuXaqvpTrftiu-99CDDBTnSI,736
@@ -64,28 +64,31 @@ sarvamai/requests/transliteration_response.py,sha256=KqRkqnegLmt7LjdVxjRePX6Roqa
64
64
  sarvamai/speech_to_text/__init__.py,sha256=_VhToAyIt_5axN6CLJwtxg3-CO7THa_23pbUzqhXJa4,85
65
65
  sarvamai/speech_to_text/client.py,sha256=E76V1BZ236AN37cvm7c-NTRTnDleiYQ4_hKHE3wfevY,11435
66
66
  sarvamai/speech_to_text/raw_client.py,sha256=rencVGZ5cVX1eHhYoVMhcrlkqVmpI4aAGTrSC7kcYUI,25690
67
- sarvamai/speech_to_text_streaming/__init__.py,sha256=S4UmJOq-iqUhm1LoHBry0tAc-ojsyXhYyiPs0BxlYm8,245
68
- sarvamai/speech_to_text_streaming/client.py,sha256=OXBYbfRY7DwF70xxprp1xDyNrviMhdoLJePsjm-BbGM,7775
69
- sarvamai/speech_to_text_streaming/raw_client.py,sha256=oyCysLboisbmwKd8p_IIspg3GbxvGC5C3iXVSqgKYvg,6956
67
+ sarvamai/speech_to_text_streaming/__init__.py,sha256=q7QygMmZCHJ-4FMhhL_6XNV_dsqlIFRCO1iSxoyxaaY,437
68
+ sarvamai/speech_to_text_streaming/client.py,sha256=WdkzZxKMdnQ2hHv9hzJlfSNggRJLKFljRiC7695Jcog,8224
69
+ sarvamai/speech_to_text_streaming/raw_client.py,sha256=7zcgVw7CXA2TySkJKQkS-NdnJOMRudmV_m4NaigICN0,7405
70
70
  sarvamai/speech_to_text_streaming/socket_client.py,sha256=QI0vEjDvNTG9-EH99NolIuARYwD-P8Fms8rqKOD9HJQ,6419
71
- sarvamai/speech_to_text_streaming/types/__init__.py,sha256=EAmovJvXu5XZTWa079d3MoZMTO4au1SxjdjxrJfGJPM,321
71
+ sarvamai/speech_to_text_streaming/types/__init__.py,sha256=hzEDbcyy6K0Q9-Zx5k5lxAHxIgeHnAoPpvTLrL13YT0,599
72
+ sarvamai/speech_to_text_streaming/types/speech_to_text_streaming_high_vad_sensitivity.py,sha256=OwPwffa8TkLPGMnOTn5S7d-HmV8QmN3B7fHz8I1-VT8,180
72
73
  sarvamai/speech_to_text_streaming/types/speech_to_text_streaming_language_code.py,sha256=LxgEifmgWTCFZn9U-f-TWKxRPng3a2J26Zt526QrA0Y,267
73
74
  sarvamai/speech_to_text_streaming/types/speech_to_text_streaming_model.py,sha256=b6F4ymgz4got6KVDqrweYvkET8itze63wUwWyjqDlO4,180
74
- sarvamai/speech_to_text_translate_streaming/__init__.py,sha256=TQyn8tpGwhx5iAVwgJzLP4Hxp0M2VXej2JCIESMiJxQ,191
75
- sarvamai/speech_to_text_translate_streaming/client.py,sha256=nd0_fBWydKbptgfITSQhIdq47Fm-aGyLRUL1qcIeb8o,7500
76
- sarvamai/speech_to_text_translate_streaming/raw_client.py,sha256=ShPaomtppKUhfgvSm4sYLXJu1h3Mg-o1ZMyFcx7KJGQ,6609
75
+ sarvamai/speech_to_text_streaming/types/speech_to_text_streaming_vad_signals.py,sha256=8wiFOB7WDMbYCcMTYgNFJaIjEytYeXpJLwr_O_mH0TI,172
76
+ sarvamai/speech_to_text_translate_streaming/__init__.py,sha256=_hmlce1Zs1grylysZhBUdtKfkaUROwVydtwz6l-1qqg,411
77
+ sarvamai/speech_to_text_translate_streaming/client.py,sha256=TnHCcspbbYFaimcEk8km3QNrNkm8JlX7e2ydpeHL9EE,8068
78
+ sarvamai/speech_to_text_translate_streaming/raw_client.py,sha256=_TlUro1NwWc1dAvC0SHcOoTnsnAqRRXxzxKHxw8BGTs,7177
77
79
  sarvamai/speech_to_text_translate_streaming/socket_client.py,sha256=Xy86G9Mv1kGLf8xwFo3OBf98vqGQ50iigUAoOaApLdI,7842
78
- sarvamai/speech_to_text_translate_streaming/types/__init__.py,sha256=gYC5MLZAotSa5ChIJnyFV5dNP18wC8vcHVeRm9DAjvQ,226
80
+ sarvamai/speech_to_text_translate_streaming/types/__init__.py,sha256=zyKoGAbKW4d0-Zi56F0RfPUqtk_xUjWjF_RjTxbXXW4,556
81
+ sarvamai/speech_to_text_translate_streaming/types/speech_to_text_translate_streaming_high_vad_sensitivity.py,sha256=r6MvTlkM0VEpb4dpnMHtINOZ-gYc22o0Fx_Xce2rjvo,189
79
82
  sarvamai/speech_to_text_translate_streaming/types/speech_to_text_translate_streaming_model.py,sha256=6B8VxkpJG_pNprCSctseDtJb_ULVdKrPaeENkQ6Jvjg,187
83
+ sarvamai/speech_to_text_translate_streaming/types/speech_to_text_translate_streaming_vad_signals.py,sha256=EV3xd9qyKMnMvA9rO-qFDDIac4b84roBu7n-maaPxG8,181
80
84
  sarvamai/text/__init__.py,sha256=_VhToAyIt_5axN6CLJwtxg3-CO7THa_23pbUzqhXJa4,85
81
85
  sarvamai/text/client.py,sha256=CMwDVtMsPor08e8F9a7yhHtgrdLzd1__samwm4S9CGM,30525
82
86
  sarvamai/text/raw_client.py,sha256=lQ7bV9aVqxjwEUHMPEZ4x0_Xs036_yFArMK9rnYT4ZI,48625
83
87
  sarvamai/text_to_speech/__init__.py,sha256=_VhToAyIt_5axN6CLJwtxg3-CO7THa_23pbUzqhXJa4,85
84
- sarvamai/text_to_speech/client.py,sha256=dyJlmHhVHUiXsi_5uh8Oged-ynN_NcDzD7JC_MByzV4,9164
85
- sarvamai/text_to_speech/raw_client.py,sha256=dwVPzx1kjXjuYGuNUjB1ibxHrYHXQJOJ6TnMGpDZOdo,15102
86
- sarvamai/types/__init__.py,sha256=BQZ6YuNB-iuXpHZZWoHzTKCTMA1cyK8PyUYEUkiVuU8,5302
87
- sarvamai/types/audio_data.py,sha256=yKkMcUbg9QGWhpfJSpWXEmwgun0K4NV7OZQiN_YkoVw,851
88
- sarvamai/types/audio_data_encoding.py,sha256=bfXb83yGdbLChm-XoN73PW-ak-iFPR24lVti07Ej00A,194
88
+ sarvamai/text_to_speech/client.py,sha256=aVvwdGTfgVUekALLpdyxKNsGUZQ2Ee2OZBLx6WP6g_E,8842
89
+ sarvamai/text_to_speech/raw_client.py,sha256=3Zu6HN_FOY683Vm-EN-OL7YAbLsftjJlFm5OyRGNtYc,14780
90
+ sarvamai/types/__init__.py,sha256=OWVmsa_5dWjmCtLRsCpSlXrlTPd1ZLNAxqfAYvxAF80,5226
91
+ sarvamai/types/audio_data.py,sha256=rgOukLkLNJ_HBBVE2g5dfEL2CWjRoGiMvCtpq0qTB1Y,829
89
92
  sarvamai/types/audio_message.py,sha256=sB4EgkWkWJzipYXobkmM9AYZTTZtCpg_ySKssUeznUE,560
90
93
  sarvamai/types/chat_completion_request_assistant_message.py,sha256=pFSONJ6CBsv3frcteid66SOKMkFwQ1UJs_e0XwwbKis,624
91
94
  sarvamai/types/chat_completion_request_message.py,sha256=4ZfaNvaphnPdJqYSeSXMZwBhkrAqBz3aob6j-4Cklho,1638
@@ -127,9 +130,9 @@ sarvamai/types/speech_to_text_translate_transcription_data.py,sha256=-cZZm21um6e
127
130
  sarvamai/types/spoken_form_numerals_format.py,sha256=soBly93wMkazIcp2GDM0Mf1MjY140Pe24hBlwNoWge0,169
128
131
  sarvamai/types/stop_configuration.py,sha256=yA_q4s4BIrbl3FotZpg4ZcyL10C7gVI0s2dqvH32BNw,136
129
132
  sarvamai/types/text_to_speech_language.py,sha256=T5-rP93WyJwkdJTmNZuvNkkoVtgU0G25a8R2F3OwRZ4,254
130
- sarvamai/types/text_to_speech_model.py,sha256=pQT3AA7CgxbwFAHLAsMM0ZIXmtLbellJF-jtP5U7bLc,167
133
+ sarvamai/types/text_to_speech_model.py,sha256=qRkpGCcfrLD45l499cBUcBgZDo_qKPZtFxA7wPbp1NQ,128
131
134
  sarvamai/types/text_to_speech_response.py,sha256=Yzvwvwm65IR2vUzxZws9OLBW0GgB6bbmvjClqSuZzdg,742
132
- sarvamai/types/text_to_speech_speaker.py,sha256=r-khCl0iD-oZu5Mf9FMoO3FzFKKLTzaUmfi0CySrFok,495
135
+ sarvamai/types/text_to_speech_speaker.py,sha256=300mXxDKnWV9O7ccuO8EkLooAu2-y2ZdqDynrckaHew,219
133
136
  sarvamai/types/timestamps_model.py,sha256=ZlqcxYNtAcm2c61NIwTcS2nGYMeM-T7hfhI0BMnnhI0,852
134
137
  sarvamai/types/transcription_metrics.py,sha256=jSQAjBY-gpGq_USnIpHNwTWUQKDvaYWrkX2FlFu58oo,717
135
138
  sarvamai/types/translate_mode.py,sha256=lLBJyAaawLMH1eCGWTZKuzAA0iCEc2TL_7cpRZzmIac,210
@@ -143,6 +146,6 @@ sarvamai/types/transliterate_mode.py,sha256=1jSEMlGcoLkWuk12TgoOpSgwifa4rThGKZ1h
143
146
  sarvamai/types/transliterate_source_language.py,sha256=bSY9wJszF0sg-Cgg6F-YcWC8ly1mIlj9rqa15-jBtx8,283
144
147
  sarvamai/types/transliteration_response.py,sha256=yt-lzTbDeJ_ZL4I8kQa6oESxA9ebeJJY7LfFHpdEsmM,815
145
148
  sarvamai/version.py,sha256=Qkp3Ee9YH-O9RTix90e0i7iNrFAGN-QDt2AFwGA4n8k,75
146
- sarvamai-0.1.5a13.dist-info/METADATA,sha256=vqTJfugkpQ2pubvW2fJl-LpDIBNezWwbvhIXh2-QvvU,1038
147
- sarvamai-0.1.5a13.dist-info/WHEEL,sha256=Zb28QaM1gQi8f4VCBhsUklF61CTlNYfs9YAZn-TOGFk,88
148
- sarvamai-0.1.5a13.dist-info/RECORD,,
149
+ sarvamai-0.1.6.dist-info/METADATA,sha256=DW3eeStVixYGiHsf1xXcjYfVSl9tvC8JRbxl0VBqooQ,1035
150
+ sarvamai-0.1.6.dist-info/WHEEL,sha256=Zb28QaM1gQi8f4VCBhsUklF61CTlNYfs9YAZn-TOGFk,88
151
+ sarvamai-0.1.6.dist-info/RECORD,,
@@ -1,5 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import typing
4
-
5
- AudioDataEncoding = typing.Union[typing.Literal["audio/wav", "audio/pcm", "audio/flac", "audio/mp3"], typing.Any]