camb-sdk 1.5.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- camb/__init__.py +335 -0
- camb/audio_separation/__init__.py +4 -0
- camb/audio_separation/client.py +406 -0
- camb/audio_separation/raw_client.py +534 -0
- camb/client.py +717 -0
- camb/core/__init__.py +105 -0
- camb/core/api_error.py +23 -0
- camb/core/client_wrapper.py +113 -0
- camb/core/datetime_utils.py +28 -0
- camb/core/file.py +67 -0
- camb/core/force_multipart.py +18 -0
- camb/core/http_client.py +663 -0
- camb/core/http_response.py +55 -0
- camb/core/http_sse/__init__.py +42 -0
- camb/core/http_sse/_api.py +112 -0
- camb/core/http_sse/_decoders.py +61 -0
- camb/core/http_sse/_exceptions.py +7 -0
- camb/core/http_sse/_models.py +17 -0
- camb/core/jsonable_encoder.py +100 -0
- camb/core/pydantic_utilities.py +260 -0
- camb/core/query_encoder.py +58 -0
- camb/core/remove_none_from_dict.py +11 -0
- camb/core/request_options.py +35 -0
- camb/core/serialization.py +276 -0
- camb/deprecated_streaming/__init__.py +4 -0
- camb/deprecated_streaming/client.py +532 -0
- camb/deprecated_streaming/raw_client.py +639 -0
- camb/dictionaries/__init__.py +4 -0
- camb/dictionaries/client.py +785 -0
- camb/dictionaries/raw_client.py +1048 -0
- camb/dub/__init__.py +49 -0
- camb/dub/client.py +846 -0
- camb/dub/raw_client.py +1194 -0
- camb/dub/types/__init__.py +53 -0
- camb/dub/types/dubbed_output_in_alt_format_request_payload_output_format.py +8 -0
- camb/dub/types/get_dubbed_output_in_alt_format_dub_alt_format_run_id_language_post_response.py +9 -0
- camb/dub/types/get_dubbed_run_info_dub_result_run_id_get_response.py +7 -0
- camb/dub/types/get_dubbing_runs_results_dubbing_results_post_response_value.py +7 -0
- camb/environment.py +7 -0
- camb/errors/__init__.py +34 -0
- camb/errors/unprocessable_entity_error.py +11 -0
- camb/folders/__init__.py +4 -0
- camb/folders/client.py +213 -0
- camb/folders/raw_client.py +278 -0
- camb/languages/__init__.py +4 -0
- camb/languages/client.py +168 -0
- camb/languages/raw_client.py +223 -0
- camb/project_setup/__init__.py +4 -0
- camb/project_setup/client.py +537 -0
- camb/project_setup/raw_client.py +655 -0
- camb/py.typed +0 -0
- camb/raw_client.py +236 -0
- camb/story/__init__.py +37 -0
- camb/story/client.py +579 -0
- camb/story/raw_client.py +743 -0
- camb/story/types/__init__.py +38 -0
- camb/story/types/create_story_story_post_response.py +8 -0
- camb/story/types/setup_story_story_setup_post_response.py +8 -0
- camb/streaming/__init__.py +4 -0
- camb/streaming/client.py +645 -0
- camb/streaming/raw_client.py +796 -0
- camb/text_to_audio/__init__.py +4 -0
- camb/text_to_audio/client.py +469 -0
- camb/text_to_audio/raw_client.py +610 -0
- camb/text_to_speech/__init__.py +49 -0
- camb/text_to_speech/baseten.py +214 -0
- camb/text_to_speech/client.py +742 -0
- camb/text_to_speech/raw_client.py +995 -0
- camb/text_to_speech/types/__init__.py +47 -0
- camb/text_to_speech/types/create_stream_tts_request_payload_language.py +71 -0
- camb/text_to_speech/types/create_stream_tts_request_payload_speech_model.py +7 -0
- camb/text_to_speech/types/get_tts_results_tts_results_post_response_value.py +7 -0
- camb/text_to_speech/types/get_tts_run_info_tts_result_run_id_get_response.py +7 -0
- camb/text_to_voice/__init__.py +4 -0
- camb/text_to_voice/client.py +329 -0
- camb/text_to_voice/raw_client.py +405 -0
- camb/transcription/__init__.py +4 -0
- camb/transcription/client.py +465 -0
- camb/transcription/raw_client.py +587 -0
- camb/translated_story/__init__.py +4 -0
- camb/translated_story/client.py +309 -0
- camb/translated_story/raw_client.py +381 -0
- camb/translated_tts/__init__.py +4 -0
- camb/translated_tts/client.py +313 -0
- camb/translated_tts/raw_client.py +357 -0
- camb/translation/__init__.py +4 -0
- camb/translation/client.py +631 -0
- camb/translation/raw_client.py +787 -0
- camb/types/__init__.py +236 -0
- camb/types/add_target_language_out.py +20 -0
- camb/types/audio_output_type.py +5 -0
- camb/types/audio_stream.py +31 -0
- camb/types/config_stream.py +22 -0
- camb/types/config_stream_pipeline.py +28 -0
- camb/types/create_custom_voice_out.py +19 -0
- camb/types/create_project_setup_out.py +19 -0
- camb/types/create_stream_out.py +22 -0
- camb/types/create_stream_request_payload.py +70 -0
- camb/types/create_translated_tts_out.py +19 -0
- camb/types/create_tts_out.py +19 -0
- camb/types/data_stream.py +24 -0
- camb/types/demixing_option.py +10 -0
- camb/types/dictionary_term.py +21 -0
- camb/types/dictionary_with_terms.py +28 -0
- camb/types/dubbing_result.py +22 -0
- camb/types/exception_reasons.py +30 -0
- camb/types/folder.py +20 -0
- camb/types/formalities.py +3 -0
- camb/types/gender.py +3 -0
- camb/types/get_audio_separation_result_out.py +20 -0
- camb/types/get_create_project_setup_response.py +21 -0
- camb/types/get_probe_stream_in.py +21 -0
- camb/types/get_probe_stream_out.py +24 -0
- camb/types/get_setup_story_result_response.py +21 -0
- camb/types/get_text_to_voice_result_out.py +19 -0
- camb/types/get_tts_result_out_file_url.py +19 -0
- camb/types/http_validation_error.py +20 -0
- camb/types/language_enums.py +154 -0
- camb/types/language_pydantic_model.py +21 -0
- camb/types/languages.py +3 -0
- camb/types/orchestrator_pipeline_call_result.py +19 -0
- camb/types/orchestrator_pipeline_result.py +25 -0
- camb/types/orchestrator_pipeline_result_exception_reason.py +7 -0
- camb/types/orchestrator_pipeline_result_message.py +5 -0
- camb/types/output_format.py +10 -0
- camb/types/overdub_config.py +37 -0
- camb/types/project_details.py +28 -0
- camb/types/revoicing_option.py +5 -0
- camb/types/run_i_ds_request_payload.py +19 -0
- camb/types/segmenting_option.py +5 -0
- camb/types/source_stream.py +30 -0
- camb/types/story_details.py +27 -0
- camb/types/stream_category.py +3 -0
- camb/types/stream_tts_inference_options.py +38 -0
- camb/types/stream_tts_output_configuration.py +33 -0
- camb/types/stream_tts_voice_settings.py +28 -0
- camb/types/stream_type.py +3 -0
- camb/types/stream_url_for_languages.py +21 -0
- camb/types/target_stream.py +34 -0
- camb/types/task_status.py +5 -0
- camb/types/term_translation_input.py +21 -0
- camb/types/term_translation_output.py +20 -0
- camb/types/text_to_audio_result.py +19 -0
- camb/types/text_to_audio_type.py +5 -0
- camb/types/transcribing_option.py +5 -0
- camb/types/transcript.py +22 -0
- camb/types/transcript_data_type.py +5 -0
- camb/types/transcript_file_format.py +5 -0
- camb/types/transcription_result.py +20 -0
- camb/types/translating_option.py +5 -0
- camb/types/translation_result.py +19 -0
- camb/types/tts_provider.py +3 -0
- camb/types/validation_error.py +22 -0
- camb/types/validation_error_loc_item.py +5 -0
- camb/types/video_output_type_without_avi.py +5 -0
- camb/types/video_stream.py +28 -0
- camb/types/voice.py +28 -0
- camb/voice_cloning/__init__.py +34 -0
- camb/voice_cloning/client.py +265 -0
- camb/voice_cloning/raw_client.py +320 -0
- camb/voice_cloning/types/__init__.py +36 -0
- camb/voice_cloning/types/list_voices_list_voices_get_response_item.py +7 -0
- camb_sdk-1.5.4.dist-info/METADATA +282 -0
- camb_sdk-1.5.4.dist-info/RECORD +167 -0
- camb_sdk-1.5.4.dist-info/WHEEL +5 -0
- camb_sdk-1.5.4.dist-info/licenses/LICENSE +21 -0
- camb_sdk-1.5.4.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
from typing import Dict, Generic, TypeVar
|
|
4
|
+
|
|
5
|
+
import httpx
|
|
6
|
+
|
|
7
|
+
# Generic to represent the underlying type of the data wrapped by the HTTP response.
|
|
8
|
+
T = TypeVar("T")
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class BaseHttpResponse:
|
|
12
|
+
"""Minimalist HTTP response wrapper that exposes response headers."""
|
|
13
|
+
|
|
14
|
+
_response: httpx.Response
|
|
15
|
+
|
|
16
|
+
def __init__(self, response: httpx.Response):
|
|
17
|
+
self._response = response
|
|
18
|
+
|
|
19
|
+
@property
|
|
20
|
+
def headers(self) -> Dict[str, str]:
|
|
21
|
+
return dict(self._response.headers)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class HttpResponse(Generic[T], BaseHttpResponse):
|
|
25
|
+
"""HTTP response wrapper that exposes response headers and data."""
|
|
26
|
+
|
|
27
|
+
_data: T
|
|
28
|
+
|
|
29
|
+
def __init__(self, response: httpx.Response, data: T):
|
|
30
|
+
super().__init__(response)
|
|
31
|
+
self._data = data
|
|
32
|
+
|
|
33
|
+
@property
|
|
34
|
+
def data(self) -> T:
|
|
35
|
+
return self._data
|
|
36
|
+
|
|
37
|
+
def close(self) -> None:
|
|
38
|
+
self._response.close()
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class AsyncHttpResponse(Generic[T], BaseHttpResponse):
|
|
42
|
+
"""HTTP response wrapper that exposes response headers and data."""
|
|
43
|
+
|
|
44
|
+
_data: T
|
|
45
|
+
|
|
46
|
+
def __init__(self, response: httpx.Response, data: T):
|
|
47
|
+
super().__init__(response)
|
|
48
|
+
self._data = data
|
|
49
|
+
|
|
50
|
+
@property
|
|
51
|
+
def data(self) -> T:
|
|
52
|
+
return self._data
|
|
53
|
+
|
|
54
|
+
async def close(self) -> None:
|
|
55
|
+
await self._response.aclose()
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
# isort: skip_file
|
|
4
|
+
|
|
5
|
+
import typing
|
|
6
|
+
from importlib import import_module
|
|
7
|
+
|
|
8
|
+
if typing.TYPE_CHECKING:
|
|
9
|
+
from ._api import EventSource, aconnect_sse, connect_sse
|
|
10
|
+
from ._exceptions import SSEError
|
|
11
|
+
from ._models import ServerSentEvent
|
|
12
|
+
_dynamic_imports: typing.Dict[str, str] = {
|
|
13
|
+
"EventSource": "._api",
|
|
14
|
+
"SSEError": "._exceptions",
|
|
15
|
+
"ServerSentEvent": "._models",
|
|
16
|
+
"aconnect_sse": "._api",
|
|
17
|
+
"connect_sse": "._api",
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def __getattr__(attr_name: str) -> typing.Any:
|
|
22
|
+
module_name = _dynamic_imports.get(attr_name)
|
|
23
|
+
if module_name is None:
|
|
24
|
+
raise AttributeError(f"No {attr_name} found in _dynamic_imports for module name -> {__name__}")
|
|
25
|
+
try:
|
|
26
|
+
module = import_module(module_name, __package__)
|
|
27
|
+
if module_name == f".{attr_name}":
|
|
28
|
+
return module
|
|
29
|
+
else:
|
|
30
|
+
return getattr(module, attr_name)
|
|
31
|
+
except ImportError as e:
|
|
32
|
+
raise ImportError(f"Failed to import {attr_name} from {module_name}: {e}") from e
|
|
33
|
+
except AttributeError as e:
|
|
34
|
+
raise AttributeError(f"Failed to get {attr_name} from {module_name}: {e}") from e
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def __dir__():
|
|
38
|
+
lazy_attrs = list(_dynamic_imports.keys())
|
|
39
|
+
return sorted(lazy_attrs)
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
__all__ = ["EventSource", "SSEError", "ServerSentEvent", "aconnect_sse", "connect_sse"]
|
|
@@ -0,0 +1,112 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import re
|
|
4
|
+
from contextlib import asynccontextmanager, contextmanager
|
|
5
|
+
from typing import Any, AsyncGenerator, AsyncIterator, Iterator, cast
|
|
6
|
+
|
|
7
|
+
import httpx
|
|
8
|
+
from ._decoders import SSEDecoder
|
|
9
|
+
from ._exceptions import SSEError
|
|
10
|
+
from ._models import ServerSentEvent
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class EventSource:
|
|
14
|
+
def __init__(self, response: httpx.Response) -> None:
|
|
15
|
+
self._response = response
|
|
16
|
+
|
|
17
|
+
def _check_content_type(self) -> None:
|
|
18
|
+
content_type = self._response.headers.get("content-type", "").partition(";")[0]
|
|
19
|
+
if "text/event-stream" not in content_type:
|
|
20
|
+
raise SSEError(
|
|
21
|
+
f"Expected response header Content-Type to contain 'text/event-stream', got {content_type!r}"
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
def _get_charset(self) -> str:
|
|
25
|
+
"""Extract charset from Content-Type header, fallback to UTF-8."""
|
|
26
|
+
content_type = self._response.headers.get("content-type", "")
|
|
27
|
+
|
|
28
|
+
# Parse charset parameter using regex
|
|
29
|
+
charset_match = re.search(r"charset=([^;\s]+)", content_type, re.IGNORECASE)
|
|
30
|
+
if charset_match:
|
|
31
|
+
charset = charset_match.group(1).strip("\"'")
|
|
32
|
+
# Validate that it's a known encoding
|
|
33
|
+
try:
|
|
34
|
+
# Test if the charset is valid by trying to encode/decode
|
|
35
|
+
"test".encode(charset).decode(charset)
|
|
36
|
+
return charset
|
|
37
|
+
except (LookupError, UnicodeError):
|
|
38
|
+
# If charset is invalid, fall back to UTF-8
|
|
39
|
+
pass
|
|
40
|
+
|
|
41
|
+
# Default to UTF-8 if no charset specified or invalid charset
|
|
42
|
+
return "utf-8"
|
|
43
|
+
|
|
44
|
+
@property
|
|
45
|
+
def response(self) -> httpx.Response:
|
|
46
|
+
return self._response
|
|
47
|
+
|
|
48
|
+
def iter_sse(self) -> Iterator[ServerSentEvent]:
|
|
49
|
+
self._check_content_type()
|
|
50
|
+
decoder = SSEDecoder()
|
|
51
|
+
charset = self._get_charset()
|
|
52
|
+
|
|
53
|
+
buffer = ""
|
|
54
|
+
for chunk in self._response.iter_bytes():
|
|
55
|
+
# Decode chunk using detected charset
|
|
56
|
+
text_chunk = chunk.decode(charset, errors="replace")
|
|
57
|
+
buffer += text_chunk
|
|
58
|
+
|
|
59
|
+
# Process complete lines
|
|
60
|
+
while "\n" in buffer:
|
|
61
|
+
line, buffer = buffer.split("\n", 1)
|
|
62
|
+
line = line.rstrip("\r")
|
|
63
|
+
sse = decoder.decode(line)
|
|
64
|
+
# when we reach a "\n\n" => line = ''
|
|
65
|
+
# => decoder will attempt to return an SSE Event
|
|
66
|
+
if sse is not None:
|
|
67
|
+
yield sse
|
|
68
|
+
|
|
69
|
+
# Process any remaining data in buffer
|
|
70
|
+
if buffer.strip():
|
|
71
|
+
line = buffer.rstrip("\r")
|
|
72
|
+
sse = decoder.decode(line)
|
|
73
|
+
if sse is not None:
|
|
74
|
+
yield sse
|
|
75
|
+
|
|
76
|
+
async def aiter_sse(self) -> AsyncGenerator[ServerSentEvent, None]:
|
|
77
|
+
self._check_content_type()
|
|
78
|
+
decoder = SSEDecoder()
|
|
79
|
+
lines = cast(AsyncGenerator[str, None], self._response.aiter_lines())
|
|
80
|
+
try:
|
|
81
|
+
async for line in lines:
|
|
82
|
+
line = line.rstrip("\n")
|
|
83
|
+
sse = decoder.decode(line)
|
|
84
|
+
if sse is not None:
|
|
85
|
+
yield sse
|
|
86
|
+
finally:
|
|
87
|
+
await lines.aclose()
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
@contextmanager
|
|
91
|
+
def connect_sse(client: httpx.Client, method: str, url: str, **kwargs: Any) -> Iterator[EventSource]:
|
|
92
|
+
headers = kwargs.pop("headers", {})
|
|
93
|
+
headers["Accept"] = "text/event-stream"
|
|
94
|
+
headers["Cache-Control"] = "no-store"
|
|
95
|
+
|
|
96
|
+
with client.stream(method, url, headers=headers, **kwargs) as response:
|
|
97
|
+
yield EventSource(response)
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
@asynccontextmanager
|
|
101
|
+
async def aconnect_sse(
|
|
102
|
+
client: httpx.AsyncClient,
|
|
103
|
+
method: str,
|
|
104
|
+
url: str,
|
|
105
|
+
**kwargs: Any,
|
|
106
|
+
) -> AsyncIterator[EventSource]:
|
|
107
|
+
headers = kwargs.pop("headers", {})
|
|
108
|
+
headers["Accept"] = "text/event-stream"
|
|
109
|
+
headers["Cache-Control"] = "no-store"
|
|
110
|
+
|
|
111
|
+
async with client.stream(method, url, headers=headers, **kwargs) as response:
|
|
112
|
+
yield EventSource(response)
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
from typing import List, Optional
|
|
4
|
+
|
|
5
|
+
from ._models import ServerSentEvent
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class SSEDecoder:
|
|
9
|
+
def __init__(self) -> None:
|
|
10
|
+
self._event = ""
|
|
11
|
+
self._data: List[str] = []
|
|
12
|
+
self._last_event_id = ""
|
|
13
|
+
self._retry: Optional[int] = None
|
|
14
|
+
|
|
15
|
+
def decode(self, line: str) -> Optional[ServerSentEvent]:
|
|
16
|
+
# See: https://html.spec.whatwg.org/multipage/server-sent-events.html#event-stream-interpretation # noqa: E501
|
|
17
|
+
|
|
18
|
+
if not line:
|
|
19
|
+
if not self._event and not self._data and not self._last_event_id and self._retry is None:
|
|
20
|
+
return None
|
|
21
|
+
|
|
22
|
+
sse = ServerSentEvent(
|
|
23
|
+
event=self._event,
|
|
24
|
+
data="\n".join(self._data),
|
|
25
|
+
id=self._last_event_id,
|
|
26
|
+
retry=self._retry,
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
# NOTE: as per the SSE spec, do not reset last_event_id.
|
|
30
|
+
self._event = ""
|
|
31
|
+
self._data = []
|
|
32
|
+
self._retry = None
|
|
33
|
+
|
|
34
|
+
return sse
|
|
35
|
+
|
|
36
|
+
if line.startswith(":"):
|
|
37
|
+
return None
|
|
38
|
+
|
|
39
|
+
fieldname, _, value = line.partition(":")
|
|
40
|
+
|
|
41
|
+
if value.startswith(" "):
|
|
42
|
+
value = value[1:]
|
|
43
|
+
|
|
44
|
+
if fieldname == "event":
|
|
45
|
+
self._event = value
|
|
46
|
+
elif fieldname == "data":
|
|
47
|
+
self._data.append(value)
|
|
48
|
+
elif fieldname == "id":
|
|
49
|
+
if "\0" in value:
|
|
50
|
+
pass
|
|
51
|
+
else:
|
|
52
|
+
self._last_event_id = value
|
|
53
|
+
elif fieldname == "retry":
|
|
54
|
+
try:
|
|
55
|
+
self._retry = int(value)
|
|
56
|
+
except (TypeError, ValueError):
|
|
57
|
+
pass
|
|
58
|
+
else:
|
|
59
|
+
pass # Field is ignored.
|
|
60
|
+
|
|
61
|
+
return None
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from dataclasses import dataclass
|
|
5
|
+
from typing import Any, Optional
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@dataclass(frozen=True)
|
|
9
|
+
class ServerSentEvent:
|
|
10
|
+
event: str = "message"
|
|
11
|
+
data: str = ""
|
|
12
|
+
id: str = ""
|
|
13
|
+
retry: Optional[int] = None
|
|
14
|
+
|
|
15
|
+
def json(self) -> Any:
|
|
16
|
+
"""Parse the data field as JSON."""
|
|
17
|
+
return json.loads(self.data)
|
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
jsonable_encoder converts a Python object to a JSON-friendly dict
|
|
5
|
+
(e.g. datetimes to strings, Pydantic models to dicts).
|
|
6
|
+
|
|
7
|
+
Taken from FastAPI, and made a bit simpler
|
|
8
|
+
https://github.com/tiangolo/fastapi/blob/master/fastapi/encoders.py
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import base64
|
|
12
|
+
import dataclasses
|
|
13
|
+
import datetime as dt
|
|
14
|
+
from enum import Enum
|
|
15
|
+
from pathlib import PurePath
|
|
16
|
+
from types import GeneratorType
|
|
17
|
+
from typing import Any, Callable, Dict, List, Optional, Set, Union
|
|
18
|
+
|
|
19
|
+
import pydantic
|
|
20
|
+
from .datetime_utils import serialize_datetime
|
|
21
|
+
from .pydantic_utilities import (
|
|
22
|
+
IS_PYDANTIC_V2,
|
|
23
|
+
encode_by_type,
|
|
24
|
+
to_jsonable_with_fallback,
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
SetIntStr = Set[Union[int, str]]
|
|
28
|
+
DictIntStrAny = Dict[Union[int, str], Any]
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def jsonable_encoder(obj: Any, custom_encoder: Optional[Dict[Any, Callable[[Any], Any]]] = None) -> Any:
|
|
32
|
+
custom_encoder = custom_encoder or {}
|
|
33
|
+
if custom_encoder:
|
|
34
|
+
if type(obj) in custom_encoder:
|
|
35
|
+
return custom_encoder[type(obj)](obj)
|
|
36
|
+
else:
|
|
37
|
+
for encoder_type, encoder_instance in custom_encoder.items():
|
|
38
|
+
if isinstance(obj, encoder_type):
|
|
39
|
+
return encoder_instance(obj)
|
|
40
|
+
if isinstance(obj, pydantic.BaseModel):
|
|
41
|
+
if IS_PYDANTIC_V2:
|
|
42
|
+
encoder = getattr(obj.model_config, "json_encoders", {}) # type: ignore # Pydantic v2
|
|
43
|
+
else:
|
|
44
|
+
encoder = getattr(obj.__config__, "json_encoders", {}) # type: ignore # Pydantic v1
|
|
45
|
+
if custom_encoder:
|
|
46
|
+
encoder.update(custom_encoder)
|
|
47
|
+
obj_dict = obj.dict(by_alias=True)
|
|
48
|
+
if "__root__" in obj_dict:
|
|
49
|
+
obj_dict = obj_dict["__root__"]
|
|
50
|
+
if "root" in obj_dict:
|
|
51
|
+
obj_dict = obj_dict["root"]
|
|
52
|
+
return jsonable_encoder(obj_dict, custom_encoder=encoder)
|
|
53
|
+
if dataclasses.is_dataclass(obj):
|
|
54
|
+
obj_dict = dataclasses.asdict(obj) # type: ignore
|
|
55
|
+
return jsonable_encoder(obj_dict, custom_encoder=custom_encoder)
|
|
56
|
+
if isinstance(obj, bytes):
|
|
57
|
+
return base64.b64encode(obj).decode("utf-8")
|
|
58
|
+
if isinstance(obj, Enum):
|
|
59
|
+
return obj.value
|
|
60
|
+
if isinstance(obj, PurePath):
|
|
61
|
+
return str(obj)
|
|
62
|
+
if isinstance(obj, (str, int, float, type(None))):
|
|
63
|
+
return obj
|
|
64
|
+
if isinstance(obj, dt.datetime):
|
|
65
|
+
return serialize_datetime(obj)
|
|
66
|
+
if isinstance(obj, dt.date):
|
|
67
|
+
return str(obj)
|
|
68
|
+
if isinstance(obj, dict):
|
|
69
|
+
encoded_dict = {}
|
|
70
|
+
allowed_keys = set(obj.keys())
|
|
71
|
+
for key, value in obj.items():
|
|
72
|
+
if key in allowed_keys:
|
|
73
|
+
encoded_key = jsonable_encoder(key, custom_encoder=custom_encoder)
|
|
74
|
+
encoded_value = jsonable_encoder(value, custom_encoder=custom_encoder)
|
|
75
|
+
encoded_dict[encoded_key] = encoded_value
|
|
76
|
+
return encoded_dict
|
|
77
|
+
if isinstance(obj, (list, set, frozenset, GeneratorType, tuple)):
|
|
78
|
+
encoded_list = []
|
|
79
|
+
for item in obj:
|
|
80
|
+
encoded_list.append(jsonable_encoder(item, custom_encoder=custom_encoder))
|
|
81
|
+
return encoded_list
|
|
82
|
+
|
|
83
|
+
def fallback_serializer(o: Any) -> Any:
|
|
84
|
+
attempt_encode = encode_by_type(o)
|
|
85
|
+
if attempt_encode is not None:
|
|
86
|
+
return attempt_encode
|
|
87
|
+
|
|
88
|
+
try:
|
|
89
|
+
data = dict(o)
|
|
90
|
+
except Exception as e:
|
|
91
|
+
errors: List[Exception] = []
|
|
92
|
+
errors.append(e)
|
|
93
|
+
try:
|
|
94
|
+
data = vars(o)
|
|
95
|
+
except Exception as e:
|
|
96
|
+
errors.append(e)
|
|
97
|
+
raise ValueError(errors) from e
|
|
98
|
+
return jsonable_encoder(data, custom_encoder=custom_encoder)
|
|
99
|
+
|
|
100
|
+
return to_jsonable_with_fallback(obj, fallback_serializer)
|
|
@@ -0,0 +1,260 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
# nopycln: file
|
|
4
|
+
import datetime as dt
|
|
5
|
+
from collections import defaultdict
|
|
6
|
+
from typing import Any, Callable, ClassVar, Dict, List, Mapping, Optional, Set, Tuple, Type, TypeVar, Union, cast
|
|
7
|
+
|
|
8
|
+
import pydantic
|
|
9
|
+
|
|
10
|
+
IS_PYDANTIC_V2 = pydantic.VERSION.startswith("2.")
|
|
11
|
+
|
|
12
|
+
if IS_PYDANTIC_V2:
|
|
13
|
+
from pydantic.v1.datetime_parse import parse_date as parse_date
|
|
14
|
+
from pydantic.v1.datetime_parse import parse_datetime as parse_datetime
|
|
15
|
+
from pydantic.v1.fields import ModelField as ModelField
|
|
16
|
+
from pydantic.v1.json import ENCODERS_BY_TYPE as encoders_by_type # type: ignore[attr-defined]
|
|
17
|
+
from pydantic.v1.typing import get_args as get_args
|
|
18
|
+
from pydantic.v1.typing import get_origin as get_origin
|
|
19
|
+
from pydantic.v1.typing import is_literal_type as is_literal_type
|
|
20
|
+
from pydantic.v1.typing import is_union as is_union
|
|
21
|
+
else:
|
|
22
|
+
from pydantic.datetime_parse import parse_date as parse_date # type: ignore[no-redef]
|
|
23
|
+
from pydantic.datetime_parse import parse_datetime as parse_datetime # type: ignore[no-redef]
|
|
24
|
+
from pydantic.fields import ModelField as ModelField # type: ignore[attr-defined, no-redef]
|
|
25
|
+
from pydantic.json import ENCODERS_BY_TYPE as encoders_by_type # type: ignore[no-redef]
|
|
26
|
+
from pydantic.typing import get_args as get_args # type: ignore[no-redef]
|
|
27
|
+
from pydantic.typing import get_origin as get_origin # type: ignore[no-redef]
|
|
28
|
+
from pydantic.typing import is_literal_type as is_literal_type # type: ignore[no-redef]
|
|
29
|
+
from pydantic.typing import is_union as is_union # type: ignore[no-redef]
|
|
30
|
+
|
|
31
|
+
from .datetime_utils import serialize_datetime
|
|
32
|
+
from .serialization import convert_and_respect_annotation_metadata
|
|
33
|
+
from typing_extensions import TypeAlias
|
|
34
|
+
|
|
35
|
+
T = TypeVar("T")
|
|
36
|
+
Model = TypeVar("Model", bound=pydantic.BaseModel)
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def parse_obj_as(type_: Type[T], object_: Any) -> T:
|
|
40
|
+
dealiased_object = convert_and_respect_annotation_metadata(object_=object_, annotation=type_, direction="read")
|
|
41
|
+
if IS_PYDANTIC_V2:
|
|
42
|
+
adapter = pydantic.TypeAdapter(type_) # type: ignore[attr-defined]
|
|
43
|
+
return adapter.validate_python(dealiased_object)
|
|
44
|
+
return pydantic.parse_obj_as(type_, dealiased_object)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def to_jsonable_with_fallback(obj: Any, fallback_serializer: Callable[[Any], Any]) -> Any:
|
|
48
|
+
if IS_PYDANTIC_V2:
|
|
49
|
+
from pydantic_core import to_jsonable_python
|
|
50
|
+
|
|
51
|
+
return to_jsonable_python(obj, fallback=fallback_serializer)
|
|
52
|
+
return fallback_serializer(obj)
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
class UniversalBaseModel(pydantic.BaseModel):
|
|
56
|
+
if IS_PYDANTIC_V2:
|
|
57
|
+
model_config: ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( # type: ignore[typeddict-unknown-key]
|
|
58
|
+
# Allow fields beginning with `model_` to be used in the model
|
|
59
|
+
protected_namespaces=(),
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
@pydantic.model_serializer(mode="plain", when_used="json") # type: ignore[attr-defined]
|
|
63
|
+
def serialize_model(self) -> Any: # type: ignore[name-defined]
|
|
64
|
+
serialized = self.dict() # type: ignore[attr-defined]
|
|
65
|
+
data = {k: serialize_datetime(v) if isinstance(v, dt.datetime) else v for k, v in serialized.items()}
|
|
66
|
+
return data
|
|
67
|
+
|
|
68
|
+
else:
|
|
69
|
+
|
|
70
|
+
class Config:
|
|
71
|
+
smart_union = True
|
|
72
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
|
73
|
+
|
|
74
|
+
@classmethod
|
|
75
|
+
def model_construct(cls: Type["Model"], _fields_set: Optional[Set[str]] = None, **values: Any) -> "Model":
|
|
76
|
+
dealiased_object = convert_and_respect_annotation_metadata(object_=values, annotation=cls, direction="read")
|
|
77
|
+
return cls.construct(_fields_set, **dealiased_object)
|
|
78
|
+
|
|
79
|
+
@classmethod
|
|
80
|
+
def construct(cls: Type["Model"], _fields_set: Optional[Set[str]] = None, **values: Any) -> "Model":
|
|
81
|
+
dealiased_object = convert_and_respect_annotation_metadata(object_=values, annotation=cls, direction="read")
|
|
82
|
+
if IS_PYDANTIC_V2:
|
|
83
|
+
return super().model_construct(_fields_set, **dealiased_object) # type: ignore[misc]
|
|
84
|
+
return super().construct(_fields_set, **dealiased_object)
|
|
85
|
+
|
|
86
|
+
def json(self, **kwargs: Any) -> str:
|
|
87
|
+
kwargs_with_defaults = {
|
|
88
|
+
"by_alias": True,
|
|
89
|
+
"exclude_unset": True,
|
|
90
|
+
**kwargs,
|
|
91
|
+
}
|
|
92
|
+
if IS_PYDANTIC_V2:
|
|
93
|
+
return super().model_dump_json(**kwargs_with_defaults) # type: ignore[misc]
|
|
94
|
+
return super().json(**kwargs_with_defaults)
|
|
95
|
+
|
|
96
|
+
def dict(self, **kwargs: Any) -> Dict[str, Any]:
|
|
97
|
+
"""
|
|
98
|
+
Override the default dict method to `exclude_unset` by default. This function patches
|
|
99
|
+
`exclude_unset` to work include fields within non-None default values.
|
|
100
|
+
"""
|
|
101
|
+
# Note: the logic here is multiplexed given the levers exposed in Pydantic V1 vs V2
|
|
102
|
+
# Pydantic V1's .dict can be extremely slow, so we do not want to call it twice.
|
|
103
|
+
#
|
|
104
|
+
# We'd ideally do the same for Pydantic V2, but it shells out to a library to serialize models
|
|
105
|
+
# that we have less control over, and this is less intrusive than custom serializers for now.
|
|
106
|
+
if IS_PYDANTIC_V2:
|
|
107
|
+
kwargs_with_defaults_exclude_unset = {
|
|
108
|
+
**kwargs,
|
|
109
|
+
"by_alias": True,
|
|
110
|
+
"exclude_unset": True,
|
|
111
|
+
"exclude_none": False,
|
|
112
|
+
}
|
|
113
|
+
kwargs_with_defaults_exclude_none = {
|
|
114
|
+
**kwargs,
|
|
115
|
+
"by_alias": True,
|
|
116
|
+
"exclude_none": True,
|
|
117
|
+
"exclude_unset": False,
|
|
118
|
+
}
|
|
119
|
+
dict_dump = deep_union_pydantic_dicts(
|
|
120
|
+
super().model_dump(**kwargs_with_defaults_exclude_unset), # type: ignore[misc]
|
|
121
|
+
super().model_dump(**kwargs_with_defaults_exclude_none), # type: ignore[misc]
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
else:
|
|
125
|
+
_fields_set = self.__fields_set__.copy()
|
|
126
|
+
|
|
127
|
+
fields = _get_model_fields(self.__class__)
|
|
128
|
+
for name, field in fields.items():
|
|
129
|
+
if name not in _fields_set:
|
|
130
|
+
default = _get_field_default(field)
|
|
131
|
+
|
|
132
|
+
# If the default values are non-null act like they've been set
|
|
133
|
+
# This effectively allows exclude_unset to work like exclude_none where
|
|
134
|
+
# the latter passes through intentionally set none values.
|
|
135
|
+
if default is not None or ("exclude_unset" in kwargs and not kwargs["exclude_unset"]):
|
|
136
|
+
_fields_set.add(name)
|
|
137
|
+
|
|
138
|
+
if default is not None:
|
|
139
|
+
self.__fields_set__.add(name)
|
|
140
|
+
|
|
141
|
+
kwargs_with_defaults_exclude_unset_include_fields = {
|
|
142
|
+
"by_alias": True,
|
|
143
|
+
"exclude_unset": True,
|
|
144
|
+
"include": _fields_set,
|
|
145
|
+
**kwargs,
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
dict_dump = super().dict(**kwargs_with_defaults_exclude_unset_include_fields)
|
|
149
|
+
|
|
150
|
+
return cast(
|
|
151
|
+
Dict[str, Any],
|
|
152
|
+
convert_and_respect_annotation_metadata(object_=dict_dump, annotation=self.__class__, direction="write"),
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
def _union_list_of_pydantic_dicts(source: List[Any], destination: List[Any]) -> List[Any]:
|
|
157
|
+
converted_list: List[Any] = []
|
|
158
|
+
for i, item in enumerate(source):
|
|
159
|
+
destination_value = destination[i]
|
|
160
|
+
if isinstance(item, dict):
|
|
161
|
+
converted_list.append(deep_union_pydantic_dicts(item, destination_value))
|
|
162
|
+
elif isinstance(item, list):
|
|
163
|
+
converted_list.append(_union_list_of_pydantic_dicts(item, destination_value))
|
|
164
|
+
else:
|
|
165
|
+
converted_list.append(item)
|
|
166
|
+
return converted_list
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
def deep_union_pydantic_dicts(source: Dict[str, Any], destination: Dict[str, Any]) -> Dict[str, Any]:
|
|
170
|
+
for key, value in source.items():
|
|
171
|
+
node = destination.setdefault(key, {})
|
|
172
|
+
if isinstance(value, dict):
|
|
173
|
+
deep_union_pydantic_dicts(value, node)
|
|
174
|
+
# Note: we do not do this same processing for sets given we do not have sets of models
|
|
175
|
+
# and given the sets are unordered, the processing of the set and matching objects would
|
|
176
|
+
# be non-trivial.
|
|
177
|
+
elif isinstance(value, list):
|
|
178
|
+
destination[key] = _union_list_of_pydantic_dicts(value, node)
|
|
179
|
+
else:
|
|
180
|
+
destination[key] = value
|
|
181
|
+
|
|
182
|
+
return destination
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
if IS_PYDANTIC_V2:
|
|
186
|
+
|
|
187
|
+
class V2RootModel(UniversalBaseModel, pydantic.RootModel): # type: ignore[misc, name-defined, type-arg]
|
|
188
|
+
pass
|
|
189
|
+
|
|
190
|
+
UniversalRootModel: TypeAlias = V2RootModel # type: ignore[misc]
|
|
191
|
+
else:
|
|
192
|
+
UniversalRootModel: TypeAlias = UniversalBaseModel # type: ignore[misc, no-redef]
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
def encode_by_type(o: Any) -> Any:
|
|
196
|
+
encoders_by_class_tuples: Dict[Callable[[Any], Any], Tuple[Any, ...]] = defaultdict(tuple)
|
|
197
|
+
for type_, encoder in encoders_by_type.items():
|
|
198
|
+
encoders_by_class_tuples[encoder] += (type_,)
|
|
199
|
+
|
|
200
|
+
if type(o) in encoders_by_type:
|
|
201
|
+
return encoders_by_type[type(o)](o)
|
|
202
|
+
for encoder, classes_tuple in encoders_by_class_tuples.items():
|
|
203
|
+
if isinstance(o, classes_tuple):
|
|
204
|
+
return encoder(o)
|
|
205
|
+
|
|
206
|
+
|
|
207
|
+
def update_forward_refs(model: Type["Model"], **localns: Any) -> None:
|
|
208
|
+
if IS_PYDANTIC_V2:
|
|
209
|
+
model.model_rebuild(raise_errors=False) # type: ignore[attr-defined]
|
|
210
|
+
else:
|
|
211
|
+
model.update_forward_refs(**localns)
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
# Mirrors Pydantic's internal typing
|
|
215
|
+
AnyCallable = Callable[..., Any]
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
def universal_root_validator(
|
|
219
|
+
pre: bool = False,
|
|
220
|
+
) -> Callable[[AnyCallable], AnyCallable]:
|
|
221
|
+
def decorator(func: AnyCallable) -> AnyCallable:
|
|
222
|
+
if IS_PYDANTIC_V2:
|
|
223
|
+
# In Pydantic v2, for RootModel we always use "before" mode
|
|
224
|
+
# The custom validators transform the input value before the model is created
|
|
225
|
+
return cast(AnyCallable, pydantic.model_validator(mode="before")(func)) # type: ignore[attr-defined]
|
|
226
|
+
return cast(AnyCallable, pydantic.root_validator(pre=pre)(func)) # type: ignore[call-overload]
|
|
227
|
+
|
|
228
|
+
return decorator
|
|
229
|
+
|
|
230
|
+
|
|
231
|
+
def universal_field_validator(field_name: str, pre: bool = False) -> Callable[[AnyCallable], AnyCallable]:
|
|
232
|
+
def decorator(func: AnyCallable) -> AnyCallable:
|
|
233
|
+
if IS_PYDANTIC_V2:
|
|
234
|
+
return cast(AnyCallable, pydantic.field_validator(field_name, mode="before" if pre else "after")(func)) # type: ignore[attr-defined]
|
|
235
|
+
return cast(AnyCallable, pydantic.validator(field_name, pre=pre)(func))
|
|
236
|
+
|
|
237
|
+
return decorator
|
|
238
|
+
|
|
239
|
+
|
|
240
|
+
PydanticField = Union[ModelField, pydantic.fields.FieldInfo]
|
|
241
|
+
|
|
242
|
+
|
|
243
|
+
def _get_model_fields(model: Type["Model"]) -> Mapping[str, PydanticField]:
|
|
244
|
+
if IS_PYDANTIC_V2:
|
|
245
|
+
return cast(Mapping[str, PydanticField], model.model_fields) # type: ignore[attr-defined]
|
|
246
|
+
return cast(Mapping[str, PydanticField], model.__fields__)
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
def _get_field_default(field: PydanticField) -> Any:
|
|
250
|
+
try:
|
|
251
|
+
value = field.get_default() # type: ignore[union-attr]
|
|
252
|
+
except:
|
|
253
|
+
value = field.default
|
|
254
|
+
if IS_PYDANTIC_V2:
|
|
255
|
+
from pydantic_core import PydanticUndefined
|
|
256
|
+
|
|
257
|
+
if value == PydanticUndefined:
|
|
258
|
+
return None
|
|
259
|
+
return value
|
|
260
|
+
return value
|