cartesia 1.4.0__py3-none-any.whl → 2.0.0a0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cartesia/__init__.py +288 -3
- cartesia/api_status/__init__.py +6 -0
- cartesia/api_status/client.py +104 -0
- cartesia/api_status/requests/__init__.py +5 -0
- cartesia/api_status/requests/api_info.py +8 -0
- cartesia/api_status/types/__init__.py +5 -0
- cartesia/api_status/types/api_info.py +20 -0
- cartesia/base_client.py +160 -0
- cartesia/client.py +163 -40
- cartesia/core/__init__.py +47 -0
- cartesia/core/api_error.py +15 -0
- cartesia/core/client_wrapper.py +55 -0
- cartesia/core/datetime_utils.py +28 -0
- cartesia/core/file.py +67 -0
- cartesia/core/http_client.py +499 -0
- cartesia/core/jsonable_encoder.py +101 -0
- cartesia/core/pydantic_utilities.py +296 -0
- cartesia/core/query_encoder.py +58 -0
- cartesia/core/remove_none_from_dict.py +11 -0
- cartesia/core/request_options.py +35 -0
- cartesia/core/serialization.py +272 -0
- cartesia/datasets/__init__.py +24 -0
- cartesia/datasets/client.py +422 -0
- cartesia/datasets/requests/__init__.py +15 -0
- cartesia/datasets/requests/create_dataset_request.py +7 -0
- cartesia/datasets/requests/dataset.py +9 -0
- cartesia/datasets/requests/dataset_file.py +9 -0
- cartesia/datasets/requests/paginated_dataset_files.py +10 -0
- cartesia/datasets/requests/paginated_datasets.py +10 -0
- cartesia/datasets/types/__init__.py +17 -0
- cartesia/datasets/types/create_dataset_request.py +19 -0
- cartesia/datasets/types/dataset.py +21 -0
- cartesia/datasets/types/dataset_file.py +21 -0
- cartesia/datasets/types/file_purpose.py +5 -0
- cartesia/datasets/types/paginated_dataset_files.py +21 -0
- cartesia/datasets/types/paginated_datasets.py +21 -0
- cartesia/embedding/__init__.py +5 -0
- cartesia/embedding/types/__init__.py +5 -0
- cartesia/embedding/types/embedding.py +201 -0
- cartesia/environment.py +7 -0
- cartesia/infill/__init__.py +2 -0
- cartesia/infill/client.py +294 -0
- cartesia/tts/__init__.py +167 -0
- cartesia/{_async_websocket.py → tts/_async_websocket.py} +159 -84
- cartesia/tts/_websocket.py +430 -0
- cartesia/tts/client.py +407 -0
- cartesia/tts/requests/__init__.py +76 -0
- cartesia/tts/requests/cancel_context_request.py +17 -0
- cartesia/tts/requests/controls.py +11 -0
- cartesia/tts/requests/generation_request.py +53 -0
- cartesia/tts/requests/mp_3_output_format.py +11 -0
- cartesia/tts/requests/output_format.py +30 -0
- cartesia/tts/requests/phoneme_timestamps.py +10 -0
- cartesia/tts/requests/raw_output_format.py +11 -0
- cartesia/tts/requests/speed.py +7 -0
- cartesia/tts/requests/tts_request.py +24 -0
- cartesia/tts/requests/tts_request_embedding_specifier.py +16 -0
- cartesia/tts/requests/tts_request_id_specifier.py +16 -0
- cartesia/tts/requests/tts_request_voice_specifier.py +7 -0
- cartesia/tts/requests/wav_output_format.py +7 -0
- cartesia/tts/requests/web_socket_base_response.py +11 -0
- cartesia/tts/requests/web_socket_chunk_response.py +8 -0
- cartesia/tts/requests/web_socket_done_response.py +7 -0
- cartesia/tts/requests/web_socket_error_response.py +7 -0
- cartesia/tts/requests/web_socket_flush_done_response.py +9 -0
- cartesia/tts/requests/web_socket_phoneme_timestamps_response.py +9 -0
- cartesia/tts/requests/web_socket_raw_output_format.py +11 -0
- cartesia/tts/requests/web_socket_request.py +7 -0
- cartesia/tts/requests/web_socket_response.py +69 -0
- cartesia/tts/requests/web_socket_stream_options.py +8 -0
- cartesia/tts/requests/web_socket_timestamps_response.py +9 -0
- cartesia/tts/requests/web_socket_tts_output.py +18 -0
- cartesia/tts/requests/web_socket_tts_request.py +24 -0
- cartesia/tts/requests/word_timestamps.py +10 -0
- cartesia/tts/socket_client.py +302 -0
- cartesia/tts/types/__init__.py +90 -0
- cartesia/tts/types/cancel_context_request.py +28 -0
- cartesia/tts/types/context_id.py +3 -0
- cartesia/tts/types/controls.py +22 -0
- cartesia/tts/types/emotion.py +29 -0
- cartesia/tts/types/flush_id.py +3 -0
- cartesia/tts/types/generation_request.py +66 -0
- cartesia/tts/types/mp_3_output_format.py +23 -0
- cartesia/tts/types/natural_specifier.py +5 -0
- cartesia/tts/types/numerical_specifier.py +3 -0
- cartesia/tts/types/output_format.py +58 -0
- cartesia/tts/types/phoneme_timestamps.py +21 -0
- cartesia/tts/types/raw_encoding.py +5 -0
- cartesia/tts/types/raw_output_format.py +22 -0
- cartesia/tts/types/speed.py +7 -0
- cartesia/tts/types/supported_language.py +7 -0
- cartesia/tts/types/tts_request.py +35 -0
- cartesia/tts/types/tts_request_embedding_specifier.py +27 -0
- cartesia/tts/types/tts_request_id_specifier.py +27 -0
- cartesia/tts/types/tts_request_voice_specifier.py +7 -0
- cartesia/tts/types/wav_output_format.py +17 -0
- cartesia/tts/types/web_socket_base_response.py +22 -0
- cartesia/tts/types/web_socket_chunk_response.py +20 -0
- cartesia/tts/types/web_socket_done_response.py +17 -0
- cartesia/tts/types/web_socket_error_response.py +19 -0
- cartesia/tts/types/web_socket_flush_done_response.py +21 -0
- cartesia/tts/types/web_socket_phoneme_timestamps_response.py +20 -0
- cartesia/tts/types/web_socket_raw_output_format.py +22 -0
- cartesia/tts/types/web_socket_request.py +7 -0
- cartesia/tts/types/web_socket_response.py +124 -0
- cartesia/tts/types/web_socket_stream_options.py +19 -0
- cartesia/tts/types/web_socket_timestamps_response.py +20 -0
- cartesia/tts/types/web_socket_tts_output.py +27 -0
- cartesia/tts/types/web_socket_tts_request.py +36 -0
- cartesia/tts/types/word_timestamps.py +21 -0
- cartesia/tts/utils/tts.py +64 -0
- cartesia/tts/utils/types.py +70 -0
- cartesia/version.py +3 -1
- cartesia/voice_changer/__init__.py +27 -0
- cartesia/voice_changer/client.py +395 -0
- cartesia/voice_changer/requests/__init__.py +15 -0
- cartesia/voice_changer/requests/streaming_response.py +36 -0
- cartesia/voice_changer/types/__init__.py +17 -0
- cartesia/voice_changer/types/output_format_container.py +5 -0
- cartesia/voice_changer/types/streaming_response.py +62 -0
- cartesia/voices/__init__.py +67 -0
- cartesia/voices/client.py +1812 -0
- cartesia/voices/requests/__init__.py +27 -0
- cartesia/voices/requests/create_voice_request.py +21 -0
- cartesia/voices/requests/embedding_response.py +8 -0
- cartesia/voices/requests/embedding_specifier.py +10 -0
- cartesia/voices/requests/id_specifier.py +10 -0
- cartesia/voices/requests/localize_dialect.py +6 -0
- cartesia/voices/requests/localize_voice_request.py +15 -0
- cartesia/voices/requests/mix_voice_specifier.py +7 -0
- cartesia/voices/requests/mix_voices_request.py +9 -0
- cartesia/voices/requests/update_voice_request.py +15 -0
- cartesia/voices/requests/voice.py +39 -0
- cartesia/voices/requests/voice_metadata.py +36 -0
- cartesia/voices/types/__init__.py +41 -0
- cartesia/voices/types/base_voice_id.py +5 -0
- cartesia/voices/types/clone_mode.py +5 -0
- cartesia/voices/types/create_voice_request.py +32 -0
- cartesia/voices/types/embedding_response.py +20 -0
- cartesia/voices/types/embedding_specifier.py +22 -0
- cartesia/voices/types/gender.py +5 -0
- cartesia/voices/types/id_specifier.py +22 -0
- cartesia/voices/types/localize_dialect.py +6 -0
- cartesia/voices/types/localize_english_dialect.py +5 -0
- cartesia/voices/types/localize_target_language.py +7 -0
- cartesia/voices/types/localize_voice_request.py +26 -0
- cartesia/voices/types/mix_voice_specifier.py +7 -0
- cartesia/voices/types/mix_voices_request.py +20 -0
- cartesia/voices/types/update_voice_request.py +27 -0
- cartesia/voices/types/voice.py +50 -0
- cartesia/voices/types/voice_id.py +3 -0
- cartesia/voices/types/voice_metadata.py +48 -0
- cartesia/voices/types/weight.py +3 -0
- cartesia-2.0.0a0.dist-info/METADATA +306 -0
- cartesia-2.0.0a0.dist-info/RECORD +158 -0
- {cartesia-1.4.0.dist-info → cartesia-2.0.0a0.dist-info}/WHEEL +1 -1
- cartesia/_async_sse.py +0 -95
- cartesia/_logger.py +0 -3
- cartesia/_sse.py +0 -143
- cartesia/_types.py +0 -70
- cartesia/_websocket.py +0 -358
- cartesia/async_client.py +0 -82
- cartesia/async_tts.py +0 -176
- cartesia/resource.py +0 -44
- cartesia/tts.py +0 -292
- cartesia/utils/deprecated.py +0 -55
- cartesia/utils/retry.py +0 -87
- cartesia/utils/tts.py +0 -78
- cartesia/voices.py +0 -204
- cartesia-1.4.0.dist-info/METADATA +0 -663
- cartesia-1.4.0.dist-info/RECORD +0 -23
- cartesia-1.4.0.dist-info/licenses/LICENSE.md +0 -21
- /cartesia/{utils/__init__.py → py.typed} +0 -0
- /cartesia/{_constants.py → tts/utils/constants.py} +0 -0
@@ -0,0 +1,296 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
# nopycln: file
|
4
|
+
import datetime as dt
|
5
|
+
import typing
|
6
|
+
from collections import defaultdict
|
7
|
+
|
8
|
+
import typing_extensions
|
9
|
+
|
10
|
+
import pydantic
|
11
|
+
|
12
|
+
from .datetime_utils import serialize_datetime
|
13
|
+
from .serialization import convert_and_respect_annotation_metadata
|
14
|
+
|
15
|
+
IS_PYDANTIC_V2 = pydantic.VERSION.startswith("2.")
|
16
|
+
|
17
|
+
if IS_PYDANTIC_V2:
|
18
|
+
# isort will try to reformat the comments on these imports, which breaks mypy
|
19
|
+
# isort: off
|
20
|
+
from pydantic.v1.datetime_parse import ( # type: ignore # pyright: ignore[reportMissingImports] # Pydantic v2
|
21
|
+
parse_date as parse_date,
|
22
|
+
)
|
23
|
+
from pydantic.v1.datetime_parse import ( # pyright: ignore[reportMissingImports] # Pydantic v2
|
24
|
+
parse_datetime as parse_datetime,
|
25
|
+
)
|
26
|
+
from pydantic.v1.json import ( # type: ignore # pyright: ignore[reportMissingImports] # Pydantic v2
|
27
|
+
ENCODERS_BY_TYPE as encoders_by_type,
|
28
|
+
)
|
29
|
+
from pydantic.v1.typing import ( # type: ignore # pyright: ignore[reportMissingImports] # Pydantic v2
|
30
|
+
get_args as get_args,
|
31
|
+
)
|
32
|
+
from pydantic.v1.typing import ( # pyright: ignore[reportMissingImports] # Pydantic v2
|
33
|
+
get_origin as get_origin,
|
34
|
+
)
|
35
|
+
from pydantic.v1.typing import ( # pyright: ignore[reportMissingImports] # Pydantic v2
|
36
|
+
is_literal_type as is_literal_type,
|
37
|
+
)
|
38
|
+
from pydantic.v1.typing import ( # pyright: ignore[reportMissingImports] # Pydantic v2
|
39
|
+
is_union as is_union,
|
40
|
+
)
|
41
|
+
from pydantic.v1.fields import ModelField as ModelField # type: ignore # pyright: ignore[reportMissingImports] # Pydantic v2
|
42
|
+
else:
|
43
|
+
from pydantic.datetime_parse import parse_date as parse_date # type: ignore # Pydantic v1
|
44
|
+
from pydantic.datetime_parse import parse_datetime as parse_datetime # type: ignore # Pydantic v1
|
45
|
+
from pydantic.fields import ModelField as ModelField # type: ignore # Pydantic v1
|
46
|
+
from pydantic.json import ENCODERS_BY_TYPE as encoders_by_type # type: ignore # Pydantic v1
|
47
|
+
from pydantic.typing import get_args as get_args # type: ignore # Pydantic v1
|
48
|
+
from pydantic.typing import get_origin as get_origin # type: ignore # Pydantic v1
|
49
|
+
from pydantic.typing import is_literal_type as is_literal_type # type: ignore # Pydantic v1
|
50
|
+
from pydantic.typing import is_union as is_union # type: ignore # Pydantic v1
|
51
|
+
|
52
|
+
# isort: on
|
53
|
+
|
54
|
+
|
55
|
+
T = typing.TypeVar("T")
|
56
|
+
Model = typing.TypeVar("Model", bound=pydantic.BaseModel)
|
57
|
+
|
58
|
+
|
59
|
+
def parse_obj_as(type_: typing.Type[T], object_: typing.Any) -> T:
|
60
|
+
dealiased_object = convert_and_respect_annotation_metadata(object_=object_, annotation=type_, direction="read")
|
61
|
+
if IS_PYDANTIC_V2:
|
62
|
+
adapter = pydantic.TypeAdapter(type_) # type: ignore # Pydantic v2
|
63
|
+
return adapter.validate_python(dealiased_object)
|
64
|
+
else:
|
65
|
+
return pydantic.parse_obj_as(type_, dealiased_object)
|
66
|
+
|
67
|
+
|
68
|
+
def to_jsonable_with_fallback(
|
69
|
+
obj: typing.Any, fallback_serializer: typing.Callable[[typing.Any], typing.Any]
|
70
|
+
) -> typing.Any:
|
71
|
+
if IS_PYDANTIC_V2:
|
72
|
+
from pydantic_core import to_jsonable_python
|
73
|
+
|
74
|
+
return to_jsonable_python(obj, fallback=fallback_serializer)
|
75
|
+
else:
|
76
|
+
return fallback_serializer(obj)
|
77
|
+
|
78
|
+
|
79
|
+
class UniversalBaseModel(pydantic.BaseModel):
|
80
|
+
if IS_PYDANTIC_V2:
|
81
|
+
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(
|
82
|
+
# Allow fields begining with `model_` to be used in the model
|
83
|
+
protected_namespaces=(),
|
84
|
+
) # type: ignore # Pydantic v2
|
85
|
+
|
86
|
+
@pydantic.model_serializer(mode="wrap", when_used="json") # type: ignore # Pydantic v2
|
87
|
+
def serialize_model(self, handler: pydantic.SerializerFunctionWrapHandler) -> typing.Any: # type: ignore # Pydantic v2
|
88
|
+
serialized = handler(self)
|
89
|
+
data = {k: serialize_datetime(v) if isinstance(v, dt.datetime) else v for k, v in serialized.items()}
|
90
|
+
return data
|
91
|
+
|
92
|
+
else:
|
93
|
+
|
94
|
+
class Config:
|
95
|
+
smart_union = True
|
96
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
97
|
+
|
98
|
+
@classmethod
|
99
|
+
def model_construct(
|
100
|
+
cls: typing.Type["Model"], _fields_set: typing.Optional[typing.Set[str]] = None, **values: typing.Any
|
101
|
+
) -> "Model":
|
102
|
+
dealiased_object = convert_and_respect_annotation_metadata(object_=values, annotation=cls, direction="read")
|
103
|
+
return cls.construct(_fields_set, **dealiased_object)
|
104
|
+
|
105
|
+
@classmethod
|
106
|
+
def construct(
|
107
|
+
cls: typing.Type["Model"], _fields_set: typing.Optional[typing.Set[str]] = None, **values: typing.Any
|
108
|
+
) -> "Model":
|
109
|
+
dealiased_object = convert_and_respect_annotation_metadata(object_=values, annotation=cls, direction="read")
|
110
|
+
if IS_PYDANTIC_V2:
|
111
|
+
return super().model_construct(_fields_set, **dealiased_object) # type: ignore # Pydantic v2
|
112
|
+
else:
|
113
|
+
return super().construct(_fields_set, **dealiased_object)
|
114
|
+
|
115
|
+
def json(self, **kwargs: typing.Any) -> str:
|
116
|
+
kwargs_with_defaults: typing.Any = {
|
117
|
+
"by_alias": True,
|
118
|
+
"exclude_unset": True,
|
119
|
+
**kwargs,
|
120
|
+
}
|
121
|
+
if IS_PYDANTIC_V2:
|
122
|
+
return super().model_dump_json(**kwargs_with_defaults) # type: ignore # Pydantic v2
|
123
|
+
else:
|
124
|
+
return super().json(**kwargs_with_defaults)
|
125
|
+
|
126
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
127
|
+
"""
|
128
|
+
Override the default dict method to `exclude_unset` by default. This function patches
|
129
|
+
`exclude_unset` to work include fields within non-None default values.
|
130
|
+
"""
|
131
|
+
# Note: the logic here is multi-plexed given the levers exposed in Pydantic V1 vs V2
|
132
|
+
# Pydantic V1's .dict can be extremely slow, so we do not want to call it twice.
|
133
|
+
#
|
134
|
+
# We'd ideally do the same for Pydantic V2, but it shells out to a library to serialize models
|
135
|
+
# that we have less control over, and this is less intrusive than custom serializers for now.
|
136
|
+
if IS_PYDANTIC_V2:
|
137
|
+
kwargs_with_defaults_exclude_unset: typing.Any = {
|
138
|
+
**kwargs,
|
139
|
+
"by_alias": True,
|
140
|
+
"exclude_unset": True,
|
141
|
+
"exclude_none": False,
|
142
|
+
}
|
143
|
+
kwargs_with_defaults_exclude_none: typing.Any = {
|
144
|
+
**kwargs,
|
145
|
+
"by_alias": True,
|
146
|
+
"exclude_none": True,
|
147
|
+
"exclude_unset": False,
|
148
|
+
}
|
149
|
+
dict_dump = deep_union_pydantic_dicts(
|
150
|
+
super().model_dump(**kwargs_with_defaults_exclude_unset), # type: ignore # Pydantic v2
|
151
|
+
super().model_dump(**kwargs_with_defaults_exclude_none), # type: ignore # Pydantic v2
|
152
|
+
)
|
153
|
+
|
154
|
+
else:
|
155
|
+
_fields_set = self.__fields_set__.copy()
|
156
|
+
|
157
|
+
fields = _get_model_fields(self.__class__)
|
158
|
+
for name, field in fields.items():
|
159
|
+
if name not in _fields_set:
|
160
|
+
default = _get_field_default(field)
|
161
|
+
|
162
|
+
# If the default values are non-null act like they've been set
|
163
|
+
# This effectively allows exclude_unset to work like exclude_none where
|
164
|
+
# the latter passes through intentionally set none values.
|
165
|
+
if default is not None or ("exclude_unset" in kwargs and not kwargs["exclude_unset"]):
|
166
|
+
_fields_set.add(name)
|
167
|
+
|
168
|
+
if default is not None:
|
169
|
+
self.__fields_set__.add(name)
|
170
|
+
|
171
|
+
kwargs_with_defaults_exclude_unset_include_fields: typing.Any = {
|
172
|
+
"by_alias": True,
|
173
|
+
"exclude_unset": True,
|
174
|
+
"include": _fields_set,
|
175
|
+
**kwargs,
|
176
|
+
}
|
177
|
+
|
178
|
+
dict_dump = super().dict(**kwargs_with_defaults_exclude_unset_include_fields)
|
179
|
+
|
180
|
+
return convert_and_respect_annotation_metadata(object_=dict_dump, annotation=self.__class__, direction="write")
|
181
|
+
|
182
|
+
|
183
|
+
def _union_list_of_pydantic_dicts(
|
184
|
+
source: typing.List[typing.Any], destination: typing.List[typing.Any]
|
185
|
+
) -> typing.List[typing.Any]:
|
186
|
+
converted_list: typing.List[typing.Any] = []
|
187
|
+
for i, item in enumerate(source):
|
188
|
+
destination_value = destination[i] # type: ignore
|
189
|
+
if isinstance(item, dict):
|
190
|
+
converted_list.append(deep_union_pydantic_dicts(item, destination_value))
|
191
|
+
elif isinstance(item, list):
|
192
|
+
converted_list.append(_union_list_of_pydantic_dicts(item, destination_value))
|
193
|
+
else:
|
194
|
+
converted_list.append(item)
|
195
|
+
return converted_list
|
196
|
+
|
197
|
+
|
198
|
+
def deep_union_pydantic_dicts(
|
199
|
+
source: typing.Dict[str, typing.Any], destination: typing.Dict[str, typing.Any]
|
200
|
+
) -> typing.Dict[str, typing.Any]:
|
201
|
+
for key, value in source.items():
|
202
|
+
node = destination.setdefault(key, {})
|
203
|
+
if isinstance(value, dict):
|
204
|
+
deep_union_pydantic_dicts(value, node)
|
205
|
+
# Note: we do not do this same processing for sets given we do not have sets of models
|
206
|
+
# and given the sets are unordered, the processing of the set and matching objects would
|
207
|
+
# be non-trivial.
|
208
|
+
elif isinstance(value, list):
|
209
|
+
destination[key] = _union_list_of_pydantic_dicts(value, node)
|
210
|
+
else:
|
211
|
+
destination[key] = value
|
212
|
+
|
213
|
+
return destination
|
214
|
+
|
215
|
+
|
216
|
+
if IS_PYDANTIC_V2:
|
217
|
+
|
218
|
+
class V2RootModel(UniversalBaseModel, pydantic.RootModel): # type: ignore # Pydantic v2
|
219
|
+
pass
|
220
|
+
|
221
|
+
UniversalRootModel: typing_extensions.TypeAlias = V2RootModel # type: ignore
|
222
|
+
else:
|
223
|
+
UniversalRootModel: typing_extensions.TypeAlias = UniversalBaseModel # type: ignore
|
224
|
+
|
225
|
+
|
226
|
+
def encode_by_type(o: typing.Any) -> typing.Any:
|
227
|
+
encoders_by_class_tuples: typing.Dict[typing.Callable[[typing.Any], typing.Any], typing.Tuple[typing.Any, ...]] = (
|
228
|
+
defaultdict(tuple)
|
229
|
+
)
|
230
|
+
for type_, encoder in encoders_by_type.items():
|
231
|
+
encoders_by_class_tuples[encoder] += (type_,)
|
232
|
+
|
233
|
+
if type(o) in encoders_by_type:
|
234
|
+
return encoders_by_type[type(o)](o)
|
235
|
+
for encoder, classes_tuple in encoders_by_class_tuples.items():
|
236
|
+
if isinstance(o, classes_tuple):
|
237
|
+
return encoder(o)
|
238
|
+
|
239
|
+
|
240
|
+
def update_forward_refs(model: typing.Type["Model"], **localns: typing.Any) -> None:
|
241
|
+
if IS_PYDANTIC_V2:
|
242
|
+
model.model_rebuild(raise_errors=False) # type: ignore # Pydantic v2
|
243
|
+
else:
|
244
|
+
model.update_forward_refs(**localns)
|
245
|
+
|
246
|
+
|
247
|
+
# Mirrors Pydantic's internal typing
|
248
|
+
AnyCallable = typing.Callable[..., typing.Any]
|
249
|
+
|
250
|
+
|
251
|
+
def universal_root_validator(
|
252
|
+
pre: bool = False,
|
253
|
+
) -> typing.Callable[[AnyCallable], AnyCallable]:
|
254
|
+
def decorator(func: AnyCallable) -> AnyCallable:
|
255
|
+
if IS_PYDANTIC_V2:
|
256
|
+
return pydantic.model_validator(mode="before" if pre else "after")(func) # type: ignore # Pydantic v2
|
257
|
+
else:
|
258
|
+
return pydantic.root_validator(pre=pre)(func) # type: ignore # Pydantic v1
|
259
|
+
|
260
|
+
return decorator
|
261
|
+
|
262
|
+
|
263
|
+
def universal_field_validator(field_name: str, pre: bool = False) -> typing.Callable[[AnyCallable], AnyCallable]:
|
264
|
+
def decorator(func: AnyCallable) -> AnyCallable:
|
265
|
+
if IS_PYDANTIC_V2:
|
266
|
+
return pydantic.field_validator(field_name, mode="before" if pre else "after")(func) # type: ignore # Pydantic v2
|
267
|
+
else:
|
268
|
+
return pydantic.validator(field_name, pre=pre)(func) # type: ignore # Pydantic v1
|
269
|
+
|
270
|
+
return decorator
|
271
|
+
|
272
|
+
|
273
|
+
PydanticField = typing.Union[ModelField, pydantic.fields.FieldInfo]
|
274
|
+
|
275
|
+
|
276
|
+
def _get_model_fields(
|
277
|
+
model: typing.Type["Model"],
|
278
|
+
) -> typing.Mapping[str, PydanticField]:
|
279
|
+
if IS_PYDANTIC_V2:
|
280
|
+
return model.model_fields # type: ignore # Pydantic v2
|
281
|
+
else:
|
282
|
+
return model.__fields__ # type: ignore # Pydantic v1
|
283
|
+
|
284
|
+
|
285
|
+
def _get_field_default(field: PydanticField) -> typing.Any:
|
286
|
+
try:
|
287
|
+
value = field.get_default() # type: ignore # Pydantic < v1.10.15
|
288
|
+
except:
|
289
|
+
value = field.default
|
290
|
+
if IS_PYDANTIC_V2:
|
291
|
+
from pydantic_core import PydanticUndefined
|
292
|
+
|
293
|
+
if value == PydanticUndefined:
|
294
|
+
return None
|
295
|
+
return value
|
296
|
+
return value
|
@@ -0,0 +1,58 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
from typing import Any, Dict, List, Optional, Tuple
|
4
|
+
|
5
|
+
import pydantic
|
6
|
+
|
7
|
+
|
8
|
+
# Flattens dicts to be of the form {"key[subkey][subkey2]": value} where value is not a dict
|
9
|
+
def traverse_query_dict(dict_flat: Dict[str, Any], key_prefix: Optional[str] = None) -> List[Tuple[str, Any]]:
|
10
|
+
result = []
|
11
|
+
for k, v in dict_flat.items():
|
12
|
+
key = f"{key_prefix}[{k}]" if key_prefix is not None else k
|
13
|
+
if isinstance(v, dict):
|
14
|
+
result.extend(traverse_query_dict(v, key))
|
15
|
+
elif isinstance(v, list):
|
16
|
+
for arr_v in v:
|
17
|
+
if isinstance(arr_v, dict):
|
18
|
+
result.extend(traverse_query_dict(arr_v, key))
|
19
|
+
else:
|
20
|
+
result.append((key, arr_v))
|
21
|
+
else:
|
22
|
+
result.append((key, v))
|
23
|
+
return result
|
24
|
+
|
25
|
+
|
26
|
+
def single_query_encoder(query_key: str, query_value: Any) -> List[Tuple[str, Any]]:
|
27
|
+
if isinstance(query_value, pydantic.BaseModel) or isinstance(query_value, dict):
|
28
|
+
if isinstance(query_value, pydantic.BaseModel):
|
29
|
+
obj_dict = query_value.dict(by_alias=True)
|
30
|
+
else:
|
31
|
+
obj_dict = query_value
|
32
|
+
return traverse_query_dict(obj_dict, query_key)
|
33
|
+
elif isinstance(query_value, list):
|
34
|
+
encoded_values: List[Tuple[str, Any]] = []
|
35
|
+
for value in query_value:
|
36
|
+
if isinstance(value, pydantic.BaseModel) or isinstance(value, dict):
|
37
|
+
if isinstance(value, pydantic.BaseModel):
|
38
|
+
obj_dict = value.dict(by_alias=True)
|
39
|
+
elif isinstance(value, dict):
|
40
|
+
obj_dict = value
|
41
|
+
|
42
|
+
encoded_values.extend(single_query_encoder(query_key, obj_dict))
|
43
|
+
else:
|
44
|
+
encoded_values.append((query_key, value))
|
45
|
+
|
46
|
+
return encoded_values
|
47
|
+
|
48
|
+
return [(query_key, query_value)]
|
49
|
+
|
50
|
+
|
51
|
+
def encode_query(query: Optional[Dict[str, Any]]) -> Optional[List[Tuple[str, Any]]]:
|
52
|
+
if query is None:
|
53
|
+
return None
|
54
|
+
|
55
|
+
encoded_query = []
|
56
|
+
for k, v in query.items():
|
57
|
+
encoded_query.extend(single_query_encoder(k, v))
|
58
|
+
return encoded_query
|
@@ -0,0 +1,11 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
from typing import Any, Dict, Mapping, Optional
|
4
|
+
|
5
|
+
|
6
|
+
def remove_none_from_dict(original: Mapping[str, Optional[Any]]) -> Dict[str, Any]:
|
7
|
+
new: Dict[str, Any] = {}
|
8
|
+
for key, value in original.items():
|
9
|
+
if value is not None:
|
10
|
+
new[key] = value
|
11
|
+
return new
|
@@ -0,0 +1,35 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import typing
|
4
|
+
|
5
|
+
try:
|
6
|
+
from typing import NotRequired # type: ignore
|
7
|
+
except ImportError:
|
8
|
+
from typing_extensions import NotRequired
|
9
|
+
|
10
|
+
|
11
|
+
class RequestOptions(typing.TypedDict, total=False):
|
12
|
+
"""
|
13
|
+
Additional options for request-specific configuration when calling APIs via the SDK.
|
14
|
+
This is used primarily as an optional final parameter for service functions.
|
15
|
+
|
16
|
+
Attributes:
|
17
|
+
- timeout_in_seconds: int. The number of seconds to await an API call before timing out.
|
18
|
+
|
19
|
+
- max_retries: int. The max number of retries to attempt if the API call fails.
|
20
|
+
|
21
|
+
- additional_headers: typing.Dict[str, typing.Any]. A dictionary containing additional parameters to spread into the request's header dict
|
22
|
+
|
23
|
+
- additional_query_parameters: typing.Dict[str, typing.Any]. A dictionary containing additional parameters to spread into the request's query parameters dict
|
24
|
+
|
25
|
+
- additional_body_parameters: typing.Dict[str, typing.Any]. A dictionary containing additional parameters to spread into the request's body parameters dict
|
26
|
+
|
27
|
+
- chunk_size: int. The size, in bytes, to process each chunk of data being streamed back within the response. This equates to leveraging `chunk_size` within `requests` or `httpx`, and is only leveraged for file downloads.
|
28
|
+
"""
|
29
|
+
|
30
|
+
timeout_in_seconds: NotRequired[int]
|
31
|
+
max_retries: NotRequired[int]
|
32
|
+
additional_headers: NotRequired[typing.Dict[str, typing.Any]]
|
33
|
+
additional_query_parameters: NotRequired[typing.Dict[str, typing.Any]]
|
34
|
+
additional_body_parameters: NotRequired[typing.Dict[str, typing.Any]]
|
35
|
+
chunk_size: NotRequired[int]
|
@@ -0,0 +1,272 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import collections
|
4
|
+
import inspect
|
5
|
+
import typing
|
6
|
+
|
7
|
+
import typing_extensions
|
8
|
+
|
9
|
+
import pydantic
|
10
|
+
|
11
|
+
|
12
|
+
class FieldMetadata:
|
13
|
+
"""
|
14
|
+
Metadata class used to annotate fields to provide additional information.
|
15
|
+
|
16
|
+
Example:
|
17
|
+
class MyDict(TypedDict):
|
18
|
+
field: typing.Annotated[str, FieldMetadata(alias="field_name")]
|
19
|
+
|
20
|
+
Will serialize: `{"field": "value"}`
|
21
|
+
To: `{"field_name": "value"}`
|
22
|
+
"""
|
23
|
+
|
24
|
+
alias: str
|
25
|
+
|
26
|
+
def __init__(self, *, alias: str) -> None:
|
27
|
+
self.alias = alias
|
28
|
+
|
29
|
+
|
30
|
+
def convert_and_respect_annotation_metadata(
|
31
|
+
*,
|
32
|
+
object_: typing.Any,
|
33
|
+
annotation: typing.Any,
|
34
|
+
inner_type: typing.Optional[typing.Any] = None,
|
35
|
+
direction: typing.Literal["read", "write"],
|
36
|
+
) -> typing.Any:
|
37
|
+
"""
|
38
|
+
Respect the metadata annotations on a field, such as aliasing. This function effectively
|
39
|
+
manipulates the dict-form of an object to respect the metadata annotations. This is primarily used for
|
40
|
+
TypedDicts, which cannot support aliasing out of the box, and can be extended for additional
|
41
|
+
utilities, such as defaults.
|
42
|
+
|
43
|
+
Parameters
|
44
|
+
----------
|
45
|
+
object_ : typing.Any
|
46
|
+
|
47
|
+
annotation : type
|
48
|
+
The type we're looking to apply typing annotations from
|
49
|
+
|
50
|
+
inner_type : typing.Optional[type]
|
51
|
+
|
52
|
+
Returns
|
53
|
+
-------
|
54
|
+
typing.Any
|
55
|
+
"""
|
56
|
+
|
57
|
+
if object_ is None:
|
58
|
+
return None
|
59
|
+
if inner_type is None:
|
60
|
+
inner_type = annotation
|
61
|
+
|
62
|
+
clean_type = _remove_annotations(inner_type)
|
63
|
+
# Pydantic models
|
64
|
+
if (
|
65
|
+
inspect.isclass(clean_type)
|
66
|
+
and issubclass(clean_type, pydantic.BaseModel)
|
67
|
+
and isinstance(object_, typing.Mapping)
|
68
|
+
):
|
69
|
+
return _convert_mapping(object_, clean_type, direction)
|
70
|
+
# TypedDicts
|
71
|
+
if typing_extensions.is_typeddict(clean_type) and isinstance(object_, typing.Mapping):
|
72
|
+
return _convert_mapping(object_, clean_type, direction)
|
73
|
+
|
74
|
+
if (
|
75
|
+
typing_extensions.get_origin(clean_type) == typing.Dict
|
76
|
+
or typing_extensions.get_origin(clean_type) == dict
|
77
|
+
or clean_type == typing.Dict
|
78
|
+
) and isinstance(object_, typing.Dict):
|
79
|
+
key_type = typing_extensions.get_args(clean_type)[0]
|
80
|
+
value_type = typing_extensions.get_args(clean_type)[1]
|
81
|
+
|
82
|
+
return {
|
83
|
+
key: convert_and_respect_annotation_metadata(
|
84
|
+
object_=value,
|
85
|
+
annotation=annotation,
|
86
|
+
inner_type=value_type,
|
87
|
+
direction=direction,
|
88
|
+
)
|
89
|
+
for key, value in object_.items()
|
90
|
+
}
|
91
|
+
|
92
|
+
# If you're iterating on a string, do not bother to coerce it to a sequence.
|
93
|
+
if not isinstance(object_, str):
|
94
|
+
if (
|
95
|
+
typing_extensions.get_origin(clean_type) == typing.Set
|
96
|
+
or typing_extensions.get_origin(clean_type) == set
|
97
|
+
or clean_type == typing.Set
|
98
|
+
) and isinstance(object_, typing.Set):
|
99
|
+
inner_type = typing_extensions.get_args(clean_type)[0]
|
100
|
+
return {
|
101
|
+
convert_and_respect_annotation_metadata(
|
102
|
+
object_=item,
|
103
|
+
annotation=annotation,
|
104
|
+
inner_type=inner_type,
|
105
|
+
direction=direction,
|
106
|
+
)
|
107
|
+
for item in object_
|
108
|
+
}
|
109
|
+
elif (
|
110
|
+
(
|
111
|
+
typing_extensions.get_origin(clean_type) == typing.List
|
112
|
+
or typing_extensions.get_origin(clean_type) == list
|
113
|
+
or clean_type == typing.List
|
114
|
+
)
|
115
|
+
and isinstance(object_, typing.List)
|
116
|
+
) or (
|
117
|
+
(
|
118
|
+
typing_extensions.get_origin(clean_type) == typing.Sequence
|
119
|
+
or typing_extensions.get_origin(clean_type) == collections.abc.Sequence
|
120
|
+
or clean_type == typing.Sequence
|
121
|
+
)
|
122
|
+
and isinstance(object_, typing.Sequence)
|
123
|
+
):
|
124
|
+
inner_type = typing_extensions.get_args(clean_type)[0]
|
125
|
+
return [
|
126
|
+
convert_and_respect_annotation_metadata(
|
127
|
+
object_=item,
|
128
|
+
annotation=annotation,
|
129
|
+
inner_type=inner_type,
|
130
|
+
direction=direction,
|
131
|
+
)
|
132
|
+
for item in object_
|
133
|
+
]
|
134
|
+
|
135
|
+
if typing_extensions.get_origin(clean_type) == typing.Union:
|
136
|
+
# We should be able to ~relatively~ safely try to convert keys against all
|
137
|
+
# member types in the union, the edge case here is if one member aliases a field
|
138
|
+
# of the same name to a different name from another member
|
139
|
+
# Or if another member aliases a field of the same name that another member does not.
|
140
|
+
for member in typing_extensions.get_args(clean_type):
|
141
|
+
object_ = convert_and_respect_annotation_metadata(
|
142
|
+
object_=object_,
|
143
|
+
annotation=annotation,
|
144
|
+
inner_type=member,
|
145
|
+
direction=direction,
|
146
|
+
)
|
147
|
+
return object_
|
148
|
+
|
149
|
+
annotated_type = _get_annotation(annotation)
|
150
|
+
if annotated_type is None:
|
151
|
+
return object_
|
152
|
+
|
153
|
+
# If the object is not a TypedDict, a Union, or other container (list, set, sequence, etc.)
|
154
|
+
# Then we can safely call it on the recursive conversion.
|
155
|
+
return object_
|
156
|
+
|
157
|
+
|
158
|
+
def _convert_mapping(
|
159
|
+
object_: typing.Mapping[str, object],
|
160
|
+
expected_type: typing.Any,
|
161
|
+
direction: typing.Literal["read", "write"],
|
162
|
+
) -> typing.Mapping[str, object]:
|
163
|
+
converted_object: typing.Dict[str, object] = {}
|
164
|
+
annotations = typing_extensions.get_type_hints(expected_type, include_extras=True)
|
165
|
+
aliases_to_field_names = _get_alias_to_field_name(annotations)
|
166
|
+
for key, value in object_.items():
|
167
|
+
if direction == "read" and key in aliases_to_field_names:
|
168
|
+
dealiased_key = aliases_to_field_names.get(key)
|
169
|
+
if dealiased_key is not None:
|
170
|
+
type_ = annotations.get(dealiased_key)
|
171
|
+
else:
|
172
|
+
type_ = annotations.get(key)
|
173
|
+
# Note you can't get the annotation by the field name if you're in read mode, so you must check the aliases map
|
174
|
+
#
|
175
|
+
# So this is effectively saying if we're in write mode, and we don't have a type, or if we're in read mode and we don't have an alias
|
176
|
+
# then we can just pass the value through as is
|
177
|
+
if type_ is None:
|
178
|
+
converted_object[key] = value
|
179
|
+
elif direction == "read" and key not in aliases_to_field_names:
|
180
|
+
converted_object[key] = convert_and_respect_annotation_metadata(
|
181
|
+
object_=value, annotation=type_, direction=direction
|
182
|
+
)
|
183
|
+
else:
|
184
|
+
converted_object[_alias_key(key, type_, direction, aliases_to_field_names)] = (
|
185
|
+
convert_and_respect_annotation_metadata(object_=value, annotation=type_, direction=direction)
|
186
|
+
)
|
187
|
+
return converted_object
|
188
|
+
|
189
|
+
|
190
|
+
def _get_annotation(type_: typing.Any) -> typing.Optional[typing.Any]:
|
191
|
+
maybe_annotated_type = typing_extensions.get_origin(type_)
|
192
|
+
if maybe_annotated_type is None:
|
193
|
+
return None
|
194
|
+
|
195
|
+
if maybe_annotated_type == typing_extensions.NotRequired:
|
196
|
+
type_ = typing_extensions.get_args(type_)[0]
|
197
|
+
maybe_annotated_type = typing_extensions.get_origin(type_)
|
198
|
+
|
199
|
+
if maybe_annotated_type == typing_extensions.Annotated:
|
200
|
+
return type_
|
201
|
+
|
202
|
+
return None
|
203
|
+
|
204
|
+
|
205
|
+
def _remove_annotations(type_: typing.Any) -> typing.Any:
|
206
|
+
maybe_annotated_type = typing_extensions.get_origin(type_)
|
207
|
+
if maybe_annotated_type is None:
|
208
|
+
return type_
|
209
|
+
|
210
|
+
if maybe_annotated_type == typing_extensions.NotRequired:
|
211
|
+
return _remove_annotations(typing_extensions.get_args(type_)[0])
|
212
|
+
|
213
|
+
if maybe_annotated_type == typing_extensions.Annotated:
|
214
|
+
return _remove_annotations(typing_extensions.get_args(type_)[0])
|
215
|
+
|
216
|
+
return type_
|
217
|
+
|
218
|
+
|
219
|
+
def get_alias_to_field_mapping(type_: typing.Any) -> typing.Dict[str, str]:
|
220
|
+
annotations = typing_extensions.get_type_hints(type_, include_extras=True)
|
221
|
+
return _get_alias_to_field_name(annotations)
|
222
|
+
|
223
|
+
|
224
|
+
def get_field_to_alias_mapping(type_: typing.Any) -> typing.Dict[str, str]:
|
225
|
+
annotations = typing_extensions.get_type_hints(type_, include_extras=True)
|
226
|
+
return _get_field_to_alias_name(annotations)
|
227
|
+
|
228
|
+
|
229
|
+
def _get_alias_to_field_name(
|
230
|
+
field_to_hint: typing.Dict[str, typing.Any],
|
231
|
+
) -> typing.Dict[str, str]:
|
232
|
+
aliases = {}
|
233
|
+
for field, hint in field_to_hint.items():
|
234
|
+
maybe_alias = _get_alias_from_type(hint)
|
235
|
+
if maybe_alias is not None:
|
236
|
+
aliases[maybe_alias] = field
|
237
|
+
return aliases
|
238
|
+
|
239
|
+
|
240
|
+
def _get_field_to_alias_name(
|
241
|
+
field_to_hint: typing.Dict[str, typing.Any],
|
242
|
+
) -> typing.Dict[str, str]:
|
243
|
+
aliases = {}
|
244
|
+
for field, hint in field_to_hint.items():
|
245
|
+
maybe_alias = _get_alias_from_type(hint)
|
246
|
+
if maybe_alias is not None:
|
247
|
+
aliases[field] = maybe_alias
|
248
|
+
return aliases
|
249
|
+
|
250
|
+
|
251
|
+
def _get_alias_from_type(type_: typing.Any) -> typing.Optional[str]:
|
252
|
+
maybe_annotated_type = _get_annotation(type_)
|
253
|
+
|
254
|
+
if maybe_annotated_type is not None:
|
255
|
+
# The actual annotations are 1 onward, the first is the annotated type
|
256
|
+
annotations = typing_extensions.get_args(maybe_annotated_type)[1:]
|
257
|
+
|
258
|
+
for annotation in annotations:
|
259
|
+
if isinstance(annotation, FieldMetadata) and annotation.alias is not None:
|
260
|
+
return annotation.alias
|
261
|
+
return None
|
262
|
+
|
263
|
+
|
264
|
+
def _alias_key(
|
265
|
+
key: str,
|
266
|
+
type_: typing.Any,
|
267
|
+
direction: typing.Literal["read", "write"],
|
268
|
+
aliases_to_field_names: typing.Dict[str, str],
|
269
|
+
) -> str:
|
270
|
+
if direction == "read":
|
271
|
+
return aliases_to_field_names.get(key, key)
|
272
|
+
return _get_alias_from_type(type_=type_) or key
|
@@ -0,0 +1,24 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
from .types import CreateDatasetRequest, Dataset, DatasetFile, FilePurpose, PaginatedDatasetFiles, PaginatedDatasets
|
4
|
+
from .requests import (
|
5
|
+
CreateDatasetRequestParams,
|
6
|
+
DatasetFileParams,
|
7
|
+
DatasetParams,
|
8
|
+
PaginatedDatasetFilesParams,
|
9
|
+
PaginatedDatasetsParams,
|
10
|
+
)
|
11
|
+
|
12
|
+
__all__ = [
|
13
|
+
"CreateDatasetRequest",
|
14
|
+
"CreateDatasetRequestParams",
|
15
|
+
"Dataset",
|
16
|
+
"DatasetFile",
|
17
|
+
"DatasetFileParams",
|
18
|
+
"DatasetParams",
|
19
|
+
"FilePurpose",
|
20
|
+
"PaginatedDatasetFiles",
|
21
|
+
"PaginatedDatasetFilesParams",
|
22
|
+
"PaginatedDatasets",
|
23
|
+
"PaginatedDatasetsParams",
|
24
|
+
]
|