sarvamai 0.1.11a1__py3-none-any.whl → 0.1.11a3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -4,10 +4,10 @@ import typing
4
4
 
5
5
  from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
6
6
  from ..core.request_options import RequestOptions
7
- from ..types.audio_codec import AudioCodec
8
7
  from ..types.speech_sample_rate import SpeechSampleRate
9
8
  from ..types.text_to_speech_language import TextToSpeechLanguage
10
9
  from ..types.text_to_speech_model import TextToSpeechModel
10
+ from ..types.text_to_speech_output_audio_codec import TextToSpeechOutputAudioCodec
11
11
  from ..types.text_to_speech_response import TextToSpeechResponse
12
12
  from ..types.text_to_speech_speaker import TextToSpeechSpeaker
13
13
  from .raw_client import AsyncRawTextToSpeechClient, RawTextToSpeechClient
@@ -43,7 +43,7 @@ class TextToSpeechClient:
43
43
  speech_sample_rate: typing.Optional[SpeechSampleRate] = OMIT,
44
44
  enable_preprocessing: typing.Optional[bool] = OMIT,
45
45
  model: typing.Optional[TextToSpeechModel] = OMIT,
46
- audio_codec: typing.Optional[AudioCodec] = OMIT,
46
+ output_audio_codec: typing.Optional[TextToSpeechOutputAudioCodec] = OMIT,
47
47
  request_options: typing.Optional[RequestOptions] = None,
48
48
  ) -> TextToSpeechResponse:
49
49
  """
@@ -87,7 +87,7 @@ class TextToSpeechClient:
87
87
  model : typing.Optional[TextToSpeechModel]
88
88
  Specifies the model to use for text-to-speech conversion. Default is bulbul:v2.
89
89
 
90
- audio_codec : typing.Optional[AudioCodec]
90
+ output_audio_codec : typing.Optional[TextToSpeechOutputAudioCodec]
91
91
  Specifies the audio codec for the output audio file. Different codecs offer various compression and quality characteristics.
92
92
 
93
93
  request_options : typing.Optional[RequestOptions]
@@ -120,7 +120,7 @@ class TextToSpeechClient:
120
120
  speech_sample_rate=speech_sample_rate,
121
121
  enable_preprocessing=enable_preprocessing,
122
122
  model=model,
123
- audio_codec=audio_codec,
123
+ output_audio_codec=output_audio_codec,
124
124
  request_options=request_options,
125
125
  )
126
126
  return _response.data
@@ -153,7 +153,7 @@ class AsyncTextToSpeechClient:
153
153
  speech_sample_rate: typing.Optional[SpeechSampleRate] = OMIT,
154
154
  enable_preprocessing: typing.Optional[bool] = OMIT,
155
155
  model: typing.Optional[TextToSpeechModel] = OMIT,
156
- audio_codec: typing.Optional[AudioCodec] = OMIT,
156
+ output_audio_codec: typing.Optional[TextToSpeechOutputAudioCodec] = OMIT,
157
157
  request_options: typing.Optional[RequestOptions] = None,
158
158
  ) -> TextToSpeechResponse:
159
159
  """
@@ -197,7 +197,7 @@ class AsyncTextToSpeechClient:
197
197
  model : typing.Optional[TextToSpeechModel]
198
198
  Specifies the model to use for text-to-speech conversion. Default is bulbul:v2.
199
199
 
200
- audio_codec : typing.Optional[AudioCodec]
200
+ output_audio_codec : typing.Optional[TextToSpeechOutputAudioCodec]
201
201
  Specifies the audio codec for the output audio file. Different codecs offer various compression and quality characteristics.
202
202
 
203
203
  request_options : typing.Optional[RequestOptions]
@@ -238,7 +238,7 @@ class AsyncTextToSpeechClient:
238
238
  speech_sample_rate=speech_sample_rate,
239
239
  enable_preprocessing=enable_preprocessing,
240
240
  model=model,
241
- audio_codec=audio_codec,
241
+ output_audio_codec=output_audio_codec,
242
242
  request_options=request_options,
243
243
  )
244
244
  return _response.data
@@ -13,10 +13,10 @@ from ..errors.forbidden_error import ForbiddenError
13
13
  from ..errors.internal_server_error import InternalServerError
14
14
  from ..errors.too_many_requests_error import TooManyRequestsError
15
15
  from ..errors.unprocessable_entity_error import UnprocessableEntityError
16
- from ..types.audio_codec import AudioCodec
17
16
  from ..types.speech_sample_rate import SpeechSampleRate
18
17
  from ..types.text_to_speech_language import TextToSpeechLanguage
19
18
  from ..types.text_to_speech_model import TextToSpeechModel
19
+ from ..types.text_to_speech_output_audio_codec import TextToSpeechOutputAudioCodec
20
20
  from ..types.text_to_speech_response import TextToSpeechResponse
21
21
  from ..types.text_to_speech_speaker import TextToSpeechSpeaker
22
22
 
@@ -40,7 +40,7 @@ class RawTextToSpeechClient:
40
40
  speech_sample_rate: typing.Optional[SpeechSampleRate] = OMIT,
41
41
  enable_preprocessing: typing.Optional[bool] = OMIT,
42
42
  model: typing.Optional[TextToSpeechModel] = OMIT,
43
- audio_codec: typing.Optional[AudioCodec] = OMIT,
43
+ output_audio_codec: typing.Optional[TextToSpeechOutputAudioCodec] = OMIT,
44
44
  request_options: typing.Optional[RequestOptions] = None,
45
45
  ) -> HttpResponse[TextToSpeechResponse]:
46
46
  """
@@ -84,7 +84,7 @@ class RawTextToSpeechClient:
84
84
  model : typing.Optional[TextToSpeechModel]
85
85
  Specifies the model to use for text-to-speech conversion. Default is bulbul:v2.
86
86
 
87
- audio_codec : typing.Optional[AudioCodec]
87
+ output_audio_codec : typing.Optional[TextToSpeechOutputAudioCodec]
88
88
  Specifies the audio codec for the output audio file. Different codecs offer various compression and quality characteristics.
89
89
 
90
90
  request_options : typing.Optional[RequestOptions]
@@ -109,7 +109,7 @@ class RawTextToSpeechClient:
109
109
  "speech_sample_rate": speech_sample_rate,
110
110
  "enable_preprocessing": enable_preprocessing,
111
111
  "model": model,
112
- "audio_codec": audio_codec,
112
+ "output_audio_codec": output_audio_codec,
113
113
  },
114
114
  headers={
115
115
  "content-type": "application/json",
@@ -204,7 +204,7 @@ class AsyncRawTextToSpeechClient:
204
204
  speech_sample_rate: typing.Optional[SpeechSampleRate] = OMIT,
205
205
  enable_preprocessing: typing.Optional[bool] = OMIT,
206
206
  model: typing.Optional[TextToSpeechModel] = OMIT,
207
- audio_codec: typing.Optional[AudioCodec] = OMIT,
207
+ output_audio_codec: typing.Optional[TextToSpeechOutputAudioCodec] = OMIT,
208
208
  request_options: typing.Optional[RequestOptions] = None,
209
209
  ) -> AsyncHttpResponse[TextToSpeechResponse]:
210
210
  """
@@ -248,7 +248,7 @@ class AsyncRawTextToSpeechClient:
248
248
  model : typing.Optional[TextToSpeechModel]
249
249
  Specifies the model to use for text-to-speech conversion. Default is bulbul:v2.
250
250
 
251
- audio_codec : typing.Optional[AudioCodec]
251
+ output_audio_codec : typing.Optional[TextToSpeechOutputAudioCodec]
252
252
  Specifies the audio codec for the output audio file. Different codecs offer various compression and quality characteristics.
253
253
 
254
254
  request_options : typing.Optional[RequestOptions]
@@ -273,7 +273,7 @@ class AsyncRawTextToSpeechClient:
273
273
  "speech_sample_rate": speech_sample_rate,
274
274
  "enable_preprocessing": enable_preprocessing,
275
275
  "model": model,
276
- "audio_codec": audio_codec,
276
+ "output_audio_codec": output_audio_codec,
277
277
  },
278
278
  headers={
279
279
  "content-type": "application/json",
@@ -2,7 +2,6 @@
2
2
 
3
3
  # isort: skip_file
4
4
 
5
- from .audio_codec import AudioCodec
6
5
  from .audio_data import AudioData
7
6
  from .audio_message import AudioMessage
8
7
  from .audio_output import AudioOutput
@@ -26,6 +25,7 @@ from .config_message import ConfigMessage
26
25
  from .configure_connection import ConfigureConnection
27
26
  from .configure_connection_data import ConfigureConnectionData
28
27
  from .configure_connection_data_output_audio_bitrate import ConfigureConnectionDataOutputAudioBitrate
28
+ from .configure_connection_data_output_audio_codec import ConfigureConnectionDataOutputAudioCodec
29
29
  from .configure_connection_data_speaker import ConfigureConnectionDataSpeaker
30
30
  from .configure_connection_data_target_language_code import ConfigureConnectionDataTargetLanguageCode
31
31
  from .create_chat_completion_response import CreateChatCompletionResponse
@@ -44,7 +44,6 @@ from .files_request import FilesRequest
44
44
  from .files_upload_response import FilesUploadResponse
45
45
  from .finish_reason import FinishReason
46
46
  from .flush_signal import FlushSignal
47
- from .format import Format
48
47
  from .job_state import JobState
49
48
  from .job_status_v_1 import JobStatusV1
50
49
  from .language_identification_response import LanguageIdentificationResponse
@@ -64,6 +63,7 @@ from .speech_to_text_response import SpeechToTextResponse
64
63
  from .speech_to_text_response_data import SpeechToTextResponseData
65
64
  from .speech_to_text_streaming_response import SpeechToTextStreamingResponse
66
65
  from .speech_to_text_transcription_data import SpeechToTextTranscriptionData
66
+ from .speech_to_text_translate_job_parameters import SpeechToTextTranslateJobParameters
67
67
  from .speech_to_text_translate_language import SpeechToTextTranslateLanguage
68
68
  from .speech_to_text_translate_model import SpeechToTextTranslateModel
69
69
  from .speech_to_text_translate_response import SpeechToTextTranslateResponse
@@ -78,6 +78,7 @@ from .task_file_details import TaskFileDetails
78
78
  from .task_state import TaskState
79
79
  from .text_to_speech_language import TextToSpeechLanguage
80
80
  from .text_to_speech_model import TextToSpeechModel
81
+ from .text_to_speech_output_audio_codec import TextToSpeechOutputAudioCodec
81
82
  from .text_to_speech_response import TextToSpeechResponse
82
83
  from .text_to_speech_speaker import TextToSpeechSpeaker
83
84
  from .timestamps_model import TimestampsModel
@@ -94,7 +95,6 @@ from .transliterate_source_language import TransliterateSourceLanguage
94
95
  from .transliteration_response import TransliterationResponse
95
96
 
96
97
  __all__ = [
97
- "AudioCodec",
98
98
  "AudioData",
99
99
  "AudioMessage",
100
100
  "AudioOutput",
@@ -116,6 +116,7 @@ __all__ = [
116
116
  "ConfigureConnection",
117
117
  "ConfigureConnectionData",
118
118
  "ConfigureConnectionDataOutputAudioBitrate",
119
+ "ConfigureConnectionDataOutputAudioCodec",
119
120
  "ConfigureConnectionDataSpeaker",
120
121
  "ConfigureConnectionDataTargetLanguageCode",
121
122
  "CreateChatCompletionResponse",
@@ -134,7 +135,6 @@ __all__ = [
134
135
  "FilesUploadResponse",
135
136
  "FinishReason",
136
137
  "FlushSignal",
137
- "Format",
138
138
  "JobState",
139
139
  "JobStatusV1",
140
140
  "LanguageIdentificationResponse",
@@ -154,6 +154,7 @@ __all__ = [
154
154
  "SpeechToTextResponseData",
155
155
  "SpeechToTextStreamingResponse",
156
156
  "SpeechToTextTranscriptionData",
157
+ "SpeechToTextTranslateJobParameters",
157
158
  "SpeechToTextTranslateLanguage",
158
159
  "SpeechToTextTranslateModel",
159
160
  "SpeechToTextTranslateResponse",
@@ -168,6 +169,7 @@ __all__ = [
168
169
  "TaskState",
169
170
  "TextToSpeechLanguage",
170
171
  "TextToSpeechModel",
172
+ "TextToSpeechOutputAudioCodec",
171
173
  "TextToSpeechResponse",
172
174
  "TextToSpeechSpeaker",
173
175
  "TimestampsModel",
@@ -5,6 +5,7 @@ import typing
5
5
  import pydantic
6
6
  from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
7
  from .configure_connection_data_output_audio_bitrate import ConfigureConnectionDataOutputAudioBitrate
8
+ from .configure_connection_data_output_audio_codec import ConfigureConnectionDataOutputAudioCodec
8
9
  from .configure_connection_data_speaker import ConfigureConnectionDataSpeaker
9
10
  from .configure_connection_data_target_language_code import ConfigureConnectionDataTargetLanguageCode
10
11
 
@@ -63,7 +64,7 @@ class ConfigureConnectionData(UniversalBaseModel):
63
64
  of mixed-language text. Default is false.
64
65
  """
65
66
 
66
- output_audio_codec: typing.Optional[typing.Literal["mp3"]] = pydantic.Field(default=None)
67
+ output_audio_codec: typing.Optional[ConfigureConnectionDataOutputAudioCodec] = pydantic.Field(default=None)
67
68
  """
68
69
  Audio codec (currently supports MP3 only, optimized for real-time playback)
69
70
  """
@@ -0,0 +1,7 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ ConfigureConnectionDataOutputAudioCodec = typing.Union[
6
+ typing.Literal["linear16", "mulaw", "alaw", "opus", "flac", "aac", "wav"], typing.Any
7
+ ]
@@ -0,0 +1,40 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ import pydantic
6
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2, UniversalBaseModel
7
+ from .speech_to_text_translate_model import SpeechToTextTranslateModel
8
+
9
+
10
+ class SpeechToTextTranslateJobParameters(UniversalBaseModel):
11
+ prompt: typing.Optional[str] = pydantic.Field(default=None)
12
+ """
13
+ Prompt to assist the transcription
14
+ """
15
+
16
+ model: typing.Optional[SpeechToTextTranslateModel] = pydantic.Field(default=None)
17
+ """
18
+ Model to be used for converting speech to text in target language
19
+ """
20
+
21
+ with_diarization: typing.Optional[bool] = pydantic.Field(default=None)
22
+ """
23
+ Enables speaker diarization, which identifies and separates different speakers in the audio.
24
+ When set to true, the API will provide speaker-specific segments in the response.
25
+ Note: This parameter is currently in Beta mode.
26
+ """
27
+
28
+ num_speakers: typing.Optional[int] = pydantic.Field(default=None)
29
+ """
30
+ Number of speakers to be detected in the audio. This is used when with_diarization is set to true.
31
+ """
32
+
33
+ if IS_PYDANTIC_V2:
34
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
35
+ else:
36
+
37
+ class Config:
38
+ frozen = True
39
+ smart_union = True
40
+ extra = pydantic.Extra.allow
@@ -0,0 +1,7 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ TextToSpeechOutputAudioCodec = typing.Union[
6
+ typing.Literal["mp3", "linear16", "mulaw", "alaw", "opus", "flac", "aac", "wav"], typing.Any
7
+ ]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: sarvamai
3
- Version: 0.1.11a1
3
+ Version: 0.1.11a3
4
4
  Summary:
5
5
  Requires-Python: >=3.8,<4.0
6
6
  Classifier: Intended Audience :: Developers
@@ -1,11 +1,11 @@
1
- sarvamai/__init__.py,sha256=3GBj0sRY6_5r171hnXCnlBqmNWAwjvsuq9JTnNnmIRY,10216
1
+ sarvamai/__init__.py,sha256=wq1a7uiqmB-87ii_Ye7Jif3sV1nvrdA0JF7M5e_FvVI,10564
2
2
  sarvamai/chat/__init__.py,sha256=_VhToAyIt_5axN6CLJwtxg3-CO7THa_23pbUzqhXJa4,85
3
3
  sarvamai/chat/client.py,sha256=xOSj83Gr6Q7eY2qUeATiuXYQqBqWqSCQlIEopK5fKus,11022
4
4
  sarvamai/chat/raw_client.py,sha256=A2kRuZcVWlJhyYCD7YKgqNkZEp3cYa1731KhRkhirU0,17885
5
- sarvamai/client.py,sha256=5YC2fxVENOxQXoY-t3n8qZ0aQ9UasDjFRzBZw8ce9OQ,7861
5
+ sarvamai/client.py,sha256=J30X_os1lPf8Wml0KDFEf6p8VGHhgF_lf3nw1T2D3qo,8207
6
6
  sarvamai/core/__init__.py,sha256=YE2CtXeASe1RAbaI39twKWYKCuT4tW5is9HWHhJjR_g,1653
7
7
  sarvamai/core/api_error.py,sha256=44vPoTyWN59gonCIZMdzw7M1uspygiLnr3GNFOoVL2Q,614
8
- sarvamai/core/client_wrapper.py,sha256=jXSnpwjhgnSazz2XKOdWnRWGQVqhKTopyF7ZXCoL2jo,2570
8
+ sarvamai/core/client_wrapper.py,sha256=eyHeN1B7fvk8dfMZfv6DdCgRHHv13fEABklg1DOLkhA,2570
9
9
  sarvamai/core/datetime_utils.py,sha256=nBys2IsYrhPdszxGKCNRPSOCwa-5DWOHG95FB8G9PKo,1047
10
10
  sarvamai/core/events.py,sha256=j7VWXgMpOsjCXdzY22wIhI7Q-v5InZ4WchRzA88x_Sk,856
11
11
  sarvamai/core/file.py,sha256=d4NNbX8XvXP32z8KpK2Xovv33nFfruIrpz0QWxlgpZk,2663
@@ -28,7 +28,7 @@ sarvamai/errors/too_many_requests_error.py,sha256=Dl-_pfpboXJh-OtSbRaPQOB-UXvpVO
28
28
  sarvamai/errors/unprocessable_entity_error.py,sha256=JqxtzIhvjkpQDqbT9Q-go1n-gyv9PsYqq0ng_ZYyBMo,347
29
29
  sarvamai/play.py,sha256=4fh86zy8g8IPU2O8yPBY7QxXQOivv_nWQvPQsOa1arw,2183
30
30
  sarvamai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
31
- sarvamai/requests/__init__.py,sha256=MzOu8I9kvxHbuwd0KZIzlKNv7nRjw6D5XScovaf9ALo,5456
31
+ sarvamai/requests/__init__.py,sha256=NUwWK2r35t3wgDeukSf0pOtbrUzPBh6bMyK1oxxwQzw,5598
32
32
  sarvamai/requests/audio_data.py,sha256=QI3SK5aiAg2yJ-m3l9CxOkONnH3CCKMFCl9kAdMs19o,410
33
33
  sarvamai/requests/audio_message.py,sha256=ZBeogjGE6YFXXM-0g8zq9SoizDk21reR0YXSB-0fMjg,214
34
34
  sarvamai/requests/audio_output.py,sha256=BnoX345rwoWgaMaj24u_19-SjmPV0xt7vlFEEDKRw20,280
@@ -45,7 +45,7 @@ sarvamai/requests/choice.py,sha256=uulX4MZUoThEMcD3a80o_3V5YpnpqN8DfPaNZWVz-1o,8
45
45
  sarvamai/requests/completion_usage.py,sha256=LbZV-RxcxKdCAYqhCiaRtSFF3VwMJq71A989Z1rm-I8,428
46
46
  sarvamai/requests/config_message.py,sha256=EpYioGvDhCXDMvGH7Q1F7448zJzoHmlkQ1owoNGbWAw,383
47
47
  sarvamai/requests/configure_connection.py,sha256=a-foQtLxArL4CulvKEdeebbRqmS1GRmko3MZdnHVPEk,716
48
- sarvamai/requests/configure_connection_data.py,sha256=Niil2OrVBzQEtmWFn1JC-StLVp6WzzRIsu2i_M_8_44,2908
48
+ sarvamai/requests/configure_connection_data.py,sha256=lRk_4rYPQLLlwS2HXjQ9Abxdf98_DuOOja-VkrIR44Q,3016
49
49
  sarvamai/requests/create_chat_completion_response.py,sha256=TqS9u5_WVWMok_NreT4TeOsLJQeybPkbJm45Q0Zxw30,857
50
50
  sarvamai/requests/diarized_entry.py,sha256=gbXB4D_r5_Q8gs1arRKjxPeFcYg16dVDLcg2VhxmKQA,462
51
51
  sarvamai/requests/diarized_transcript.py,sha256=X-znuJ45oqwXzVyJumBHSqVGLz6JnoYFZmluQlEpEAw,323
@@ -70,6 +70,7 @@ sarvamai/requests/speech_to_text_response.py,sha256=GS3jNmHDOxqNZ7cvftD62khUMSBI
70
70
  sarvamai/requests/speech_to_text_response_data.py,sha256=69fYRdL0tCKpgKQqwzcM4T4Nf_lRxJFh-VCFe_tN964,364
71
71
  sarvamai/requests/speech_to_text_streaming_response.py,sha256=cN5tKE9wOWuyBna4wmrf-0LfkOULMpRaJ7qjLuu76V0,348
72
72
  sarvamai/requests/speech_to_text_transcription_data.py,sha256=Vc65hXDq65d14cP-fDJm151bi7XEKgPItNGt1UL6cOY,877
73
+ sarvamai/requests/speech_to_text_translate_job_parameters.py,sha256=Cco38i8IhX00S2eW3MhLekqUFMS7hZW2AwbpWyCAgpU,990
73
74
  sarvamai/requests/speech_to_text_translate_response.py,sha256=xLV2F37PkGR0erRDfTBEPWvywR8eVSL9JbH5a0C9wkY,893
74
75
  sarvamai/requests/speech_to_text_translate_response_data.py,sha256=OmjunP9R2xertJKn4fmpyzjDdj1_B_Yh6ZjH1eOlR-Q,407
75
76
  sarvamai/requests/speech_to_text_translate_streaming_response.py,sha256=KTjYZ0_oLapuM5Iiq7UwejMsrL1TGgFAW4k5l17TkZs,385
@@ -98,6 +99,9 @@ sarvamai/speech_to_text_streaming/types/speech_to_text_streaming_high_vad_sensit
98
99
  sarvamai/speech_to_text_streaming/types/speech_to_text_streaming_language_code.py,sha256=LxgEifmgWTCFZn9U-f-TWKxRPng3a2J26Zt526QrA0Y,267
99
100
  sarvamai/speech_to_text_streaming/types/speech_to_text_streaming_model.py,sha256=b6F4ymgz4got6KVDqrweYvkET8itze63wUwWyjqDlO4,180
100
101
  sarvamai/speech_to_text_streaming/types/speech_to_text_streaming_vad_signals.py,sha256=8wiFOB7WDMbYCcMTYgNFJaIjEytYeXpJLwr_O_mH0TI,172
102
+ sarvamai/speech_to_text_translate_job/__init__.py,sha256=_VhToAyIt_5axN6CLJwtxg3-CO7THa_23pbUzqhXJa4,85
103
+ sarvamai/speech_to_text_translate_job/client.py,sha256=Qo0Uc-O6Y7JzUlrOH4RpFk46gdIdDWb6rHajbJzpxYc,13565
104
+ sarvamai/speech_to_text_translate_job/raw_client.py,sha256=g-xk7H8ZwjmPSuJSgblVSH7kqGh_5wAkYUy5PdwTm-U,49362
101
105
  sarvamai/speech_to_text_translate_streaming/__init__.py,sha256=_hmlce1Zs1grylysZhBUdtKfkaUROwVydtwz6l-1qqg,411
102
106
  sarvamai/speech_to_text_translate_streaming/client.py,sha256=TnHCcspbbYFaimcEk8km3QNrNkm8JlX7e2ydpeHL9EE,8068
103
107
  sarvamai/speech_to_text_translate_streaming/raw_client.py,sha256=_TlUro1NwWc1dAvC0SHcOoTnsnAqRRXxzxKHxw8BGTs,7177
@@ -110,14 +114,13 @@ sarvamai/text/__init__.py,sha256=_VhToAyIt_5axN6CLJwtxg3-CO7THa_23pbUzqhXJa4,85
110
114
  sarvamai/text/client.py,sha256=2kA0Gxfi-r52zMQdqRRD811014alzlHB_FANkp3Kn_c,30595
111
115
  sarvamai/text/raw_client.py,sha256=7xYmJA50kTKy_gj8tkAPckKp2djHB37zOdm0_icbMb8,48695
112
116
  sarvamai/text_to_speech/__init__.py,sha256=_VhToAyIt_5axN6CLJwtxg3-CO7THa_23pbUzqhXJa4,85
113
- sarvamai/text_to_speech/client.py,sha256=ERj6e9ZBkdv16D344Z3C2EeqBbGezZx6GuzANoXHKkY,9449
114
- sarvamai/text_to_speech/raw_client.py,sha256=WCbuL3i7FRL-vwCRER9Y47wPGRMzSzoeQHHeV2E7l78,15401
117
+ sarvamai/text_to_speech/client.py,sha256=iwrQNfoMgCSOgvztTIXtLHQmSmn0RInwt5RSo9TwdtA,9617
118
+ sarvamai/text_to_speech/raw_client.py,sha256=si_aSjMR7SocIpKZFoVYqBmaIDuRm_6vxTM0dJ73PEo,15569
115
119
  sarvamai/text_to_speech_streaming/__init__.py,sha256=_VhToAyIt_5axN6CLJwtxg3-CO7THa_23pbUzqhXJa4,85
116
120
  sarvamai/text_to_speech_streaming/client.py,sha256=geTF5xy-batzO12XVt0sPw_XJCi7-m2sDFK_B7SL7qc,6088
117
121
  sarvamai/text_to_speech_streaming/raw_client.py,sha256=asOcNw1WAViOiXDVWH4sxWSXGVoLwAOh9vUtq_xralA,5269
118
122
  sarvamai/text_to_speech_streaming/socket_client.py,sha256=NEcijnvjuNcWfzqpBi-xWsXVkL0NPq6EGAkEjnaq9hw,13909
119
- sarvamai/types/__init__.py,sha256=Vdknm-xJ1MfSywqApXjWkjATf_ZPfWFm4mI2-2RSRBs,7389
120
- sarvamai/types/audio_codec.py,sha256=9qNJc1SdIP4IKHYwclqqtU9810qTaJtsSGx22AqKhKY,200
123
+ sarvamai/types/__init__.py,sha256=_tcONO157t6xm41llByU9FzSYWCQO2gExsqKZOz0HpQ,7681
121
124
  sarvamai/types/audio_data.py,sha256=rgOukLkLNJ_HBBVE2g5dfEL2CWjRoGiMvCtpq0qTB1Y,829
122
125
  sarvamai/types/audio_message.py,sha256=sB4EgkWkWJzipYXobkmM9AYZTTZtCpg_ySKssUeznUE,560
123
126
  sarvamai/types/audio_output.py,sha256=Eq-YUZa1mSDwt7bax2c4Vv2gBlyM_JBJWzHhTAhFSko,621
@@ -134,8 +137,9 @@ sarvamai/types/choice.py,sha256=uXBCsjWP9VK3XWQWZUeI4EnU10w0G9nAfKn2tJZvxko,1244
134
137
  sarvamai/types/completion_usage.py,sha256=xYQGlQUbKqsksuV73H-1ajjfT5M7w47eLfdWXSlrI5M,843
135
138
  sarvamai/types/config_message.py,sha256=sGrT-qYTRqLVfIo5nRUuRlqPtPVmiAkUAnaMtlmQYCU,778
136
139
  sarvamai/types/configure_connection.py,sha256=SnSNk02gQqP8e4VB4y88jjeFQ4ClpImjGLn2ANI8cZ4,1058
137
- sarvamai/types/configure_connection_data.py,sha256=brMO-Z1TDq3oTJ22m1icBkkmnd9k67p_DzecnMcqNko,3421
140
+ sarvamai/types/configure_connection_data.py,sha256=uXC7fhNJWCpaKc2Vrz2DNpUxx1gN3PwAoDL-H8L401A,3537
138
141
  sarvamai/types/configure_connection_data_output_audio_bitrate.py,sha256=h00YvKLxsZC8L3__rH4XH53nN_GY40UElW1EjysCwUs,208
142
+ sarvamai/types/configure_connection_data_output_audio_codec.py,sha256=ddd-MjgmKE0e5-TPgPKclBu4h9WLC5g3kL8Ap_91i50,228
139
143
  sarvamai/types/configure_connection_data_speaker.py,sha256=SzyAiK5LynXwb9KniaO2qoOLY-II3-PMZbRuIsQ9shw,230
140
144
  sarvamai/types/configure_connection_data_target_language_code.py,sha256=jrU1EblAtDYbybUO1KUkHhevmlSBj2AQxX13ii3QhAQ,275
141
145
  sarvamai/types/create_chat_completion_response.py,sha256=4nEzeWzHGW1_BmRAtOuGsbRZ0ojNgnzJSMUFyYuYviw,1285
@@ -154,7 +158,6 @@ sarvamai/types/files_request.py,sha256=Jh8xPjoOTjY7DOE2EieoRqtkWkYxz9j-BP8TvWxuR
154
158
  sarvamai/types/files_upload_response.py,sha256=wRntZyh1-LGpo4-x_986Nv2A9rv9asDx93pqoQxNpTY,804
155
159
  sarvamai/types/finish_reason.py,sha256=PBWtBNkX4FMaODmlUehpF6qLB5uH_zR-Mw3M4uhIB6U,209
156
160
  sarvamai/types/flush_signal.py,sha256=N7MJWb658KoxRpFN9cIbyQGY45zZcg8YCou3E1v--9o,759
157
- sarvamai/types/format.py,sha256=57LicD0XLqW4D1QEnZWsWGifzRy1GV9P5utKPXLoxtg,144
158
161
  sarvamai/types/job_state.py,sha256=H6Zph2mIcjsd3upEDt1VzIEORkEpnIDs0kH8BvIyrow,189
159
162
  sarvamai/types/job_status_v_1.py,sha256=i1xopAptPVbGGIUcjKWgjAzSKwLVy6y4oGVEYcOA1P0,1798
160
163
  sarvamai/types/language_identification_response.py,sha256=jG4ZQ6KQHCiEDqC51OniOwiRdW14Fbz22bbTsUDp_kc,1483
@@ -174,6 +177,7 @@ sarvamai/types/speech_to_text_response.py,sha256=iWRGEJeHUFIOxeEhoCQu68njeA6lcqX
174
177
  sarvamai/types/speech_to_text_response_data.py,sha256=gbxZTBSjbN3ZIa10K6tWPYtymcpnQTFIaUnXkOmsmD4,322
175
178
  sarvamai/types/speech_to_text_streaming_response.py,sha256=z6tVAHbVK9lC3w3lac__LEUfO8AAzEilkeGlaLskTtc,687
176
179
  sarvamai/types/speech_to_text_transcription_data.py,sha256=EqwPAPSi98PwARaTj-ufzFUSHyN-NPoPla5vi_KERrU,1297
180
+ sarvamai/types/speech_to_text_translate_job_parameters.py,sha256=fvfcyzIyT0DtcRYePDvglHH-wAhGbsi3H5G4i5nuWT8,1409
177
181
  sarvamai/types/speech_to_text_translate_language.py,sha256=yikNM-roIumVG-eqBWss93wLGudZdLPwd0i3VcXH5zo,263
178
182
  sarvamai/types/speech_to_text_translate_model.py,sha256=hxKnhnelhH5Ah8QO5SymTJMnDw_WWAhGDJnutoNk3qw,213
179
183
  sarvamai/types/speech_to_text_translate_response.py,sha256=Z5Na7IQW2ok3TP21xd-jKkwioplEKfonNIMhoJQKkVw,1278
@@ -188,6 +192,7 @@ sarvamai/types/task_file_details.py,sha256=oJV7zaUVrbqqw-If-2_V1aLk28qW0ZbeIDtIp
188
192
  sarvamai/types/task_state.py,sha256=fSrmD00Goi0J6s9hzqcFqz3Fkh37diBYpxnz4FkwHdU,182
189
193
  sarvamai/types/text_to_speech_language.py,sha256=T5-rP93WyJwkdJTmNZuvNkkoVtgU0G25a8R2F3OwRZ4,254
190
194
  sarvamai/types/text_to_speech_model.py,sha256=qRkpGCcfrLD45l499cBUcBgZDo_qKPZtFxA7wPbp1NQ,128
195
+ sarvamai/types/text_to_speech_output_audio_codec.py,sha256=lW-w0RIQiLZGdVmUgg20yYEblvaZ9AvvqzE6o7xRhWQ,224
191
196
  sarvamai/types/text_to_speech_response.py,sha256=Yzvwvwm65IR2vUzxZws9OLBW0GgB6bbmvjClqSuZzdg,742
192
197
  sarvamai/types/text_to_speech_speaker.py,sha256=300mXxDKnWV9O7ccuO8EkLooAu2-y2ZdqDynrckaHew,219
193
198
  sarvamai/types/timestamps_model.py,sha256=ZlqcxYNtAcm2c61NIwTcS2nGYMeM-T7hfhI0BMnnhI0,852
@@ -203,6 +208,6 @@ sarvamai/types/transliterate_mode.py,sha256=1jSEMlGcoLkWuk12TgoOpSgwifa4rThGKZ1h
203
208
  sarvamai/types/transliterate_source_language.py,sha256=bSY9wJszF0sg-Cgg6F-YcWC8ly1mIlj9rqa15-jBtx8,283
204
209
  sarvamai/types/transliteration_response.py,sha256=yt-lzTbDeJ_ZL4I8kQa6oESxA9ebeJJY7LfFHpdEsmM,815
205
210
  sarvamai/version.py,sha256=Qkp3Ee9YH-O9RTix90e0i7iNrFAGN-QDt2AFwGA4n8k,75
206
- sarvamai-0.1.11a1.dist-info/METADATA,sha256=dfnuJp6su4GLfw335hVroaiLC44B37vrSjV6-0aVnfY,26753
207
- sarvamai-0.1.11a1.dist-info/WHEEL,sha256=Zb28QaM1gQi8f4VCBhsUklF61CTlNYfs9YAZn-TOGFk,88
208
- sarvamai-0.1.11a1.dist-info/RECORD,,
211
+ sarvamai-0.1.11a3.dist-info/METADATA,sha256=c6e6mRlKnUEgebTQ0LhrFajaqvE_z3xT-i0sOLBTB-o,26753
212
+ sarvamai-0.1.11a3.dist-info/WHEEL,sha256=Zb28QaM1gQi8f4VCBhsUklF61CTlNYfs9YAZn-TOGFk,88
213
+ sarvamai-0.1.11a3.dist-info/RECORD,,
@@ -1,5 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import typing
4
-
5
- AudioCodec = typing.Union[typing.Literal["mp3", "linear16", "mulaw", "alaw", "opus", "flac", "aac", "wav"], typing.Any]
sarvamai/types/format.py DELETED
@@ -1,5 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import typing
4
-
5
- Format = typing.Union[typing.Literal["wav", "mp3"], typing.Any]