magic_hour 0.36.2__py3-none-any.whl → 0.37.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of magic_hour might be problematic. Click here for more details.

Files changed (45) hide show
  1. magic_hour/__init__.py +1 -1
  2. magic_hour/client.py +1 -1
  3. magic_hour/environment.py +1 -1
  4. magic_hour/resources/v1/ai_clothes_changer/client.py +8 -8
  5. magic_hour/resources/v1/ai_face_editor/client.py +8 -8
  6. magic_hour/resources/v1/ai_gif_generator/client.py +7 -7
  7. magic_hour/resources/v1/ai_headshot_generator/client.py +8 -8
  8. magic_hour/resources/v1/ai_image_editor/client.py +8 -8
  9. magic_hour/resources/v1/ai_image_generator/client.py +7 -7
  10. magic_hour/resources/v1/ai_image_upscaler/client.py +8 -8
  11. magic_hour/resources/v1/ai_meme_generator/client.py +7 -7
  12. magic_hour/resources/v1/ai_photo_editor/client.py +8 -8
  13. magic_hour/resources/v1/ai_qr_code_generator/client.py +7 -7
  14. magic_hour/resources/v1/ai_talking_photo/client.py +8 -8
  15. magic_hour/resources/v1/animation/client.py +8 -8
  16. magic_hour/resources/v1/auto_subtitle_generator/client.py +8 -8
  17. magic_hour/resources/v1/client.py +1 -1
  18. magic_hour/resources/v1/face_detection/client.py +5 -5
  19. magic_hour/resources/v1/face_swap/client.py +8 -8
  20. magic_hour/resources/v1/face_swap_photo/client.py +8 -8
  21. magic_hour/resources/v1/files/client.py +1 -1
  22. magic_hour/resources/v1/files/upload_urls/client.py +2 -2
  23. magic_hour/resources/v1/image_background_remover/client.py +8 -8
  24. magic_hour/resources/v1/image_projects/client.py +4 -4
  25. magic_hour/resources/v1/image_to_video/client.py +8 -8
  26. magic_hour/resources/v1/lip_sync/client.py +8 -8
  27. magic_hour/resources/v1/photo_colorizer/client.py +8 -8
  28. magic_hour/resources/v1/text_to_video/client.py +7 -7
  29. magic_hour/resources/v1/video_projects/client.py +4 -4
  30. magic_hour/resources/v1/video_to_video/client.py +8 -8
  31. magic_hour/types/params/v1_ai_talking_photo_create_body_style.py +4 -3
  32. {magic_hour-0.36.2.dist-info → magic_hour-0.37.0.dist-info}/METADATA +2 -5
  33. {magic_hour-0.36.2.dist-info → magic_hour-0.37.0.dist-info}/RECORD +35 -45
  34. magic_hour/core/__init__.py +0 -50
  35. magic_hour/core/api_error.py +0 -56
  36. magic_hour/core/auth.py +0 -354
  37. magic_hour/core/base_client.py +0 -627
  38. magic_hour/core/binary_response.py +0 -23
  39. magic_hour/core/query.py +0 -124
  40. magic_hour/core/request.py +0 -162
  41. magic_hour/core/response.py +0 -297
  42. magic_hour/core/type_utils.py +0 -28
  43. magic_hour/core/utils.py +0 -55
  44. {magic_hour-0.36.2.dist-info → magic_hour-0.37.0.dist-info}/LICENSE +0 -0
  45. {magic_hour-0.36.2.dist-info → magic_hour-0.37.0.dist-info}/WHEEL +0 -0
magic_hour/core/query.py DELETED
@@ -1,124 +0,0 @@
1
- import json
2
-
3
- from typing import Any, Dict, Union
4
- from typing_extensions import Literal, Sequence
5
-
6
- import httpx
7
-
8
-
9
- # Type alias for query parameters that can handle both primitive data and sequences
10
- QueryParams = Dict[
11
- str, Union[httpx._types.PrimitiveData, Sequence[httpx._types.PrimitiveData]]
12
- ]
13
- QueryParamStyle = Literal["form", "spaceDelimited", "pipeDelimited", "deepObject"]
14
-
15
-
16
- def encode_query_param(
17
- params: QueryParams,
18
- name: str,
19
- value: Any,
20
- style: QueryParamStyle = "form",
21
- explode: bool = True,
22
- ) -> None:
23
- if style == "form":
24
- _encode_form(params, name, value, explode)
25
- elif style == "spaceDelimited":
26
- _encode_spaced_delimited(params, name, value, explode)
27
- elif style == "pipeDelimited":
28
- _encode_pipe_delimited(params, name, value, explode)
29
- elif style == "deepObject":
30
- _encode_deep_object(params, name, value, explode)
31
- else:
32
- raise NotImplementedError(f"query param style '{style}' not implemented")
33
-
34
-
35
- def _query_str(val: Any) -> str:
36
- """jsonify value without wrapping quotes for strings"""
37
- if isinstance(val, str):
38
- return val
39
- return json.dumps(val)
40
-
41
-
42
- def _encode_form(params: QueryParams, name: str, value: Any, explode: bool) -> None:
43
- """
44
- Encodes query params in the `form` style as defined by OpenAPI with both explode and non-explode
45
- variants.
46
- """
47
- if isinstance(value, list) and not explode:
48
- # non-explode form lists should be encoded like /users?id=3,4,5
49
- params[name] = ",".join(map(_query_str, value))
50
- elif isinstance(value, dict):
51
- if explode:
52
- # explode form objects should be encoded like /users?key0=val0&key1=val1
53
- # the input param name will be omitted
54
- for k, v in value.items():
55
- params[k] = _query_str(v)
56
- else:
57
- # non-explode form objects should be encoded like /users?id=key0,val0,key1,val1
58
- encoded_chunks = []
59
- for k, v in value.items():
60
- encoded_chunks.extend([str(k), _query_str(v)])
61
- params[name] = ",".join(encoded_chunks)
62
- else:
63
- params[name] = value
64
-
65
-
66
- def _encode_spaced_delimited(
67
- params: QueryParams, name: str, value: Any, explode: bool
68
- ) -> None:
69
- """
70
- Encodes query params in the `spaceDelimited` style as defined by OpenAPI with both explode and non-explode
71
- variants.
72
- """
73
- if isinstance(value, list) and not explode:
74
- # non-explode spaceDelimited lists should be encoded like /users?id=3%204%205
75
- params[name] = " ".join(map(_query_str, value))
76
- else:
77
- # according to the docs, spaceDelimited + explode=false only effects lists,
78
- # all other encodings are marked as n/a or are the same as `form` style
79
- # fall back on form style as it is the default for query params
80
- _encode_form(params, name, value, explode)
81
-
82
-
83
- def _encode_pipe_delimited(
84
- params: QueryParams, name: str, value: Any, explode: bool
85
- ) -> None:
86
- """
87
- Encodes query params in the `pipeDelimited` style as defined by OpenAPI with both explode and non-explode
88
- variants.
89
- """
90
- if isinstance(value, list) and not explode:
91
- # non-explode pipeDelimited lists should be encoded like /users?id=3|4|5
92
- params[name] = "|".join(map(_query_str, value))
93
- else:
94
- # according to the docs, pipeDelimited + explode=false only effects lists,
95
- # all other encodings are marked as n/a or are the same as `form` style
96
- # fall back on form style as it is the default for query params
97
- _encode_form(params, name, value, explode)
98
-
99
-
100
- def _encode_deep_object(
101
- params: QueryParams, name: str, value: Any, explode: bool
102
- ) -> None:
103
- """
104
- Encodes query params in the `deepObject` style as defined by with both explode and non-explode
105
- variants.
106
- """
107
- if isinstance(value, (dict, list)):
108
- _encode_deep_object_key(params, name, value)
109
- else:
110
- # according to the docs, deepObject style only applies to
111
- # object encodes, encodings for primitives are listed as n/a,
112
- # fall back on form style as it is the default for query params
113
- _encode_form(params, name, value, explode)
114
-
115
-
116
- def _encode_deep_object_key(params: QueryParams, key: str, value: Any) -> None:
117
- if isinstance(value, dict):
118
- for k, v in value.items():
119
- _encode_deep_object_key(params, f"{key}[{k}]", v)
120
- elif isinstance(value, list):
121
- for i, v in enumerate(value):
122
- _encode_deep_object_key(params, f"{key}[{i}]", v)
123
- else:
124
- params[key] = _query_str(value)
@@ -1,162 +0,0 @@
1
- from typing import Any, Dict, Type, Union, List, Mapping
2
-
3
- import httpx
4
- from typing_extensions import TypedDict, Required, NotRequired
5
- from pydantic import TypeAdapter, BaseModel
6
-
7
- from .type_utils import NotGiven
8
- from .query import QueryParams, QueryParamStyle, encode_query_param
9
-
10
- """
11
- Request configuration and utility functions for handling HTTP requests.
12
- This module provides type definitions and helper functions for building
13
- and processing HTTP requests in a type-safe manner.
14
- """
15
-
16
-
17
- class RequestConfig(TypedDict):
18
- """
19
- Configuration for HTTP requests.
20
-
21
- Defines all possible parameters that can be passed to an HTTP request,
22
- including required method and URL, as well as optional parameters like
23
- content, headers, authentication, etc.
24
- """
25
-
26
- method: Required[str]
27
- url: Required[httpx._types.URLTypes]
28
- content: NotRequired[httpx._types.RequestContent]
29
- data: NotRequired[httpx._types.RequestData]
30
- files: NotRequired[httpx._types.RequestFiles]
31
- json: NotRequired[Any]
32
- params: NotRequired[QueryParams]
33
- headers: NotRequired[Dict[str, str]]
34
- cookies: NotRequired[Dict[str, str]]
35
- auth: NotRequired[httpx._types.AuthTypes]
36
- follow_redirects: NotRequired[bool]
37
- timeout: NotRequired[httpx._types.TimeoutTypes]
38
- extensions: NotRequired[httpx._types.RequestExtensions]
39
-
40
-
41
- class RequestOptions(TypedDict):
42
- """
43
- Additional options for customizing request behavior.
44
-
45
- Provides configuration for timeouts and additional headers/parameters
46
- that should be included with requests.
47
-
48
- Attributes:
49
- timeout: Number of seconds to await an API call before timing out
50
- additional_headers: Extra headers to include in the request
51
- additional_params: Extra query parameters to include in the request
52
- """
53
-
54
- timeout: NotRequired[int]
55
- additional_headers: NotRequired[Dict[str, str]]
56
- additional_params: NotRequired[QueryParams]
57
-
58
-
59
- def default_request_options() -> RequestOptions:
60
- """
61
- Provides default request options.
62
-
63
- Returns an empty dictionary as the base configuration, allowing defaults
64
- to be handled by the underlying HTTP client.
65
- """
66
- return {}
67
-
68
-
69
- def model_dump(item: Any) -> Any:
70
- """
71
- Recursively converts Pydantic models to dictionaries.
72
-
73
- Handles nested structures including lists and individual models,
74
- preserving alias information and excluding unset values.
75
- """
76
- if isinstance(item, list):
77
- return [model_dump(i) for i in item]
78
- if isinstance(item, BaseModel):
79
- return item.model_dump(exclude_unset=True, by_alias=True)
80
- else:
81
- return item
82
-
83
-
84
- def to_encodable(
85
- *, item: Any, dump_with: Union[Type[Any], Union[Type[Any], Any], List[Type[Any]]]
86
- ) -> Any:
87
- """
88
- Validates and converts an item to an encodable format using a specified type.
89
- Uses Pydantic's TypeAdapter for validation and converts the result
90
- to a format suitable for encoding in requests.
91
- """
92
- filtered_item = filter_not_given(item)
93
- adapter: TypeAdapter[Any] = TypeAdapter(dump_with)
94
- validated_item = adapter.validate_python(filtered_item)
95
- return model_dump(validated_item)
96
-
97
-
98
- def to_form_urlencoded(
99
- *,
100
- item: Any,
101
- dump_with: Union[Type[Any], Union[Type[Any], Any]],
102
- style: Mapping[str, QueryParamStyle],
103
- explode: Mapping[str, bool],
104
- ) -> Mapping[str, Any]:
105
- """
106
- Encodes object as x-www-form-urlencoded according to style and explode options
107
- """
108
- encoded = to_encodable(item=item, dump_with=dump_with)
109
-
110
- if not isinstance(encoded, dict):
111
- raise TypeError("x-www-form-urlencoded data must be an object at the top level")
112
-
113
- form_data: QueryParams = {}
114
-
115
- for key, val in encoded.items():
116
- key_style = style.get(key, "form")
117
- key_explode = explode.get(key, key_style == "form")
118
- encode_query_param(form_data, key, val, style=key_style, explode=key_explode)
119
-
120
- return form_data
121
-
122
-
123
- def to_content(*, file: httpx._types.FileTypes) -> httpx._types.RequestContent:
124
- """
125
- Converts the various ways files can be provided to something that is accepted by
126
- the httpx.request content kwarg
127
- """
128
- if isinstance(file, tuple):
129
- file_content: httpx._types.FileContent = file[1]
130
- else:
131
- file_content = file
132
-
133
- if hasattr(file_content, "read") and callable(file_content.read):
134
- return file_content.read()
135
- else:
136
- return file_content
137
-
138
-
139
- def filter_not_given(value: Any) -> Any:
140
- """Helper function to recursively filter out NotGiven values"""
141
- if isinstance(value, NotGiven):
142
- return None # This will trigger filtering at the container level
143
- elif isinstance(value, dict):
144
- return {
145
- k: filter_not_given(v)
146
- for k, v in value.items()
147
- if not isinstance(v, NotGiven)
148
- }
149
- elif isinstance(value, (list, tuple)):
150
- return type(value)(
151
- filter_not_given(item) for item in value if not isinstance(item, NotGiven)
152
- )
153
- return value
154
-
155
-
156
- def _get_default_for_type(value_type: Any) -> Any:
157
- """Helper to provide appropriate default values for required fields"""
158
- if value_type is dict or isinstance(value_type, dict):
159
- return {}
160
- elif value_type is list or isinstance(value_type, list):
161
- return []
162
- return None
@@ -1,297 +0,0 @@
1
- import json
2
- from typing import Any, Union, Dict, Type, TypeVar, List, Generic, Optional
3
- from pydantic import BaseModel
4
- import httpx
5
-
6
- """
7
- Provides functionality for handling Server-Sent Events (SSE) streams and response data encoding.
8
- Includes utilities for both synchronous and asynchronous stream processing.
9
- """
10
-
11
- EncodableT = TypeVar(
12
- "EncodableT",
13
- bound=Union[
14
- object,
15
- str,
16
- int,
17
- float,
18
- None,
19
- BaseModel,
20
- List[Any],
21
- Dict[str, Any],
22
- ],
23
- )
24
-
25
-
26
- def from_encodable(*, data: Any, load_with: Type[EncodableT]) -> Any:
27
- """
28
- Converts raw data into a specified type using Pydantic validation.
29
-
30
- Uses a dynamic Pydantic model to validate and convert incoming data
31
- into the specified target type.
32
- """
33
-
34
- class Caster(BaseModel):
35
- data: load_with # type: ignore
36
-
37
- return Caster(data=data).data
38
-
39
-
40
- T = TypeVar("T")
41
-
42
-
43
- class StreamResponse(Generic[T]):
44
- """
45
- Handles synchronous streaming of Server-Sent Events (SSE).
46
-
47
- Processes a streaming HTTP response by buffering chunks of data
48
- and parsing them according to SSE format, converting each event
49
- into the specified type.
50
- """
51
-
52
- def __init__(
53
- self, response: httpx.Response, stream_context: Any, cast_to: Type[T]
54
- ) -> None:
55
- """
56
- Initialize the stream processor with response and conversion settings.
57
-
58
- Args:
59
- response: The HTTP response containing the SSE stream
60
- stream_context: Context manager for the stream
61
- cast_to: Target type for converting parsed events
62
- """
63
- self.response = response
64
- self._context = stream_context
65
- self.cast_to = cast_to
66
- self.iterator = response.iter_bytes()
67
- self.buffer = bytearray()
68
- self.position = 0
69
-
70
- def __iter__(self) -> "StreamResponse[T]":
71
- """Enables iteration over the stream events."""
72
- return self
73
-
74
- def __next__(self) -> T:
75
- """
76
- Retrieves and processes the next event from the stream.
77
-
78
- Buffers incoming data and processes it according to SSE format,
79
- converting each complete event into the specified type.
80
-
81
- Raises:
82
- StopIteration: When the stream is exhausted
83
- """
84
- try:
85
- while True:
86
- event = self._process_buffer()
87
- if event:
88
- return event
89
-
90
- chunk = next(self.iterator)
91
- self.buffer += chunk
92
-
93
- except StopIteration:
94
- event = self._process_buffer(final=True)
95
- if event:
96
- return event
97
- self._context.__exit__(None, None, None)
98
- raise
99
-
100
- def _process_buffer(self, final: bool = False) -> Optional[T]:
101
- """
102
- Processes the current buffer to extract complete SSE events.
103
-
104
- Searches for event boundaries and parses complete events,
105
- handling both JSON and non-JSON payloads.
106
-
107
- Args:
108
- final: Whether this is the final processing of the buffer
109
- """
110
- while self.position < len(self.buffer):
111
- for boundary in [b"\r\n\r\n", b"\n\n", b"\r\r"]:
112
- if (self.position + len(boundary)) <= len(self.buffer):
113
- if (
114
- self.buffer[self.position : self.position + len(boundary)]
115
- == boundary
116
- ):
117
- message = self.buffer[: self.position].decode()
118
- self.buffer = self.buffer[self.position + len(boundary) :]
119
- self.position = 0
120
-
121
- data = self._parse_sse(message)
122
- if data:
123
- try:
124
- parsed_data = json.loads(data)
125
- if (
126
- not isinstance(parsed_data, dict)
127
- or "data" not in parsed_data
128
- ):
129
- parsed_data = {"data": parsed_data}
130
- return from_encodable( # type: ignore[no-any-return]
131
- data=parsed_data, load_with=self.cast_to
132
- )
133
- except json.JSONDecodeError:
134
- return from_encodable( # type: ignore[no-any-return]
135
- data={"data": data}, load_with=self.cast_to
136
- )
137
- return None
138
-
139
- self.position += 1
140
-
141
- if final and self.buffer:
142
- message = self.buffer.decode()
143
- data = self._parse_sse(message)
144
- if data:
145
- try:
146
- parsed_data = json.loads(data)
147
- if not isinstance(parsed_data, dict) or "data" not in parsed_data:
148
- parsed_data = {"data": parsed_data}
149
- return from_encodable(data=parsed_data, load_with=self.cast_to) # type: ignore[no-any-return]
150
- except json.JSONDecodeError:
151
- return from_encodable(data={"data": data}, load_with=self.cast_to) # type: ignore[no-any-return]
152
-
153
- return None
154
-
155
- def _parse_sse(self, message: str) -> Optional[str]:
156
- """
157
- Parses an SSE message to extract the data field.
158
-
159
- Handles multi-line data fields and empty data fields according
160
- to the SSE specification.
161
- """
162
- data = []
163
- for line in message.split("\n"):
164
- if line.startswith("data:"):
165
- data.append(line[5:].strip())
166
- elif line.strip() == "data:": # Handle empty data field
167
- data.append("")
168
-
169
- if data:
170
- return "\n".join(data)
171
- return None
172
-
173
-
174
- class AsyncStreamResponse(Generic[T]):
175
- """
176
- Handles asynchronous streaming of Server-Sent Events (SSE).
177
-
178
- Asynchronous version of StreamResponse, providing the same functionality
179
- but compatible with async/await syntax.
180
- """
181
-
182
- def __init__(
183
- self, response: httpx.Response, stream_context: Any, cast_to: Type[T]
184
- ) -> None:
185
- """
186
- Initialize the async stream processor.
187
-
188
- Args:
189
- response: The HTTP response containing the SSE stream
190
- stream_context: Async context manager for the stream
191
- cast_to: Target type for converting parsed events
192
- """
193
- self.response = response
194
- self._context = stream_context
195
- self.cast_to = cast_to
196
- self.iterator = response.aiter_bytes()
197
- self.buffer = bytearray()
198
- self.position = 0
199
-
200
- def __aiter__(self) -> "AsyncStreamResponse[T]":
201
- """Enables async iteration over the stream events."""
202
- return self
203
-
204
- async def __anext__(self) -> T:
205
- """
206
- Asynchronously retrieves and processes the next event from the stream.
207
-
208
- Similar to synchronous version but uses async/await syntax for
209
- iteration and context management.
210
-
211
- Raises:
212
- StopAsyncIteration: When the stream is exhausted
213
- """
214
- try:
215
- while True:
216
- event = self._process_buffer()
217
- if event:
218
- return event
219
-
220
- chunk = await self.iterator.__anext__()
221
- self.buffer += chunk
222
-
223
- except StopAsyncIteration:
224
- event = self._process_buffer(final=True)
225
- if event:
226
- return event
227
- await self._context.__aexit__(None, None, None)
228
- raise
229
-
230
- def _process_buffer(self, final: bool = False) -> Optional[T]:
231
- """
232
- Processes the current buffer to extract complete SSE events.
233
-
234
- Identical to the synchronous version's buffer processing.
235
- """
236
- while self.position < len(self.buffer):
237
- for boundary in [b"\r\n\r\n", b"\n\n", b"\r\r"]:
238
- if (self.position + len(boundary)) <= len(self.buffer):
239
- if (
240
- self.buffer[self.position : self.position + len(boundary)]
241
- == boundary
242
- ):
243
- message = self.buffer[: self.position].decode()
244
- self.buffer = self.buffer[self.position + len(boundary) :]
245
- self.position = 0
246
-
247
- data = self._parse_sse(message)
248
- if data:
249
- try:
250
- parsed_data = json.loads(data)
251
- if (
252
- not isinstance(parsed_data, dict)
253
- or "data" not in parsed_data
254
- ):
255
- parsed_data = {"data": parsed_data}
256
- return from_encodable( # type: ignore[no-any-return]
257
- data=parsed_data, load_with=self.cast_to
258
- )
259
- except json.JSONDecodeError:
260
- return from_encodable( # type: ignore[no-any-return]
261
- data={"data": data}, load_with=self.cast_to
262
- )
263
- return None
264
-
265
- self.position += 1
266
-
267
- if final and self.buffer:
268
- message = self.buffer.decode()
269
- data = self._parse_sse(message)
270
- if data:
271
- try:
272
- parsed_data = json.loads(data)
273
- if not isinstance(parsed_data, dict) or "data" not in parsed_data:
274
- parsed_data = {"data": parsed_data}
275
- return from_encodable(data=parsed_data, load_with=self.cast_to) # type: ignore[no-any-return]
276
- except json.JSONDecodeError:
277
- return from_encodable(data={"data": data}, load_with=self.cast_to) # type: ignore[no-any-return]
278
-
279
- return None
280
-
281
- def _parse_sse(self, message: str) -> Optional[str]:
282
- """
283
- Parses an SSE message to extract the data field.
284
-
285
- Identical to the synchronous version's SSE parsing.
286
- """
287
- data = []
288
- for line in message.split("\n"):
289
- line = line.strip()
290
- if line.startswith("data:"):
291
- data.append(line[5:].strip())
292
- elif line == "data:": # Handle empty data field
293
- data.append("")
294
-
295
- if data:
296
- return "\n".join(data)
297
- return None
@@ -1,28 +0,0 @@
1
- from typing import (
2
- Union,
3
- )
4
- from typing_extensions import (
5
- Literal,
6
- TypeVar,
7
- override,
8
- )
9
-
10
- _T = TypeVar("_T")
11
-
12
-
13
- class NotGiven:
14
- """
15
- Used to distinguish omitted keyword arguments from those passed explicitly
16
- with the value None.
17
- """
18
-
19
- def __bool__(self) -> Literal[False]:
20
- return False
21
-
22
- @override
23
- def __repr__(self) -> str:
24
- return "NOT_GIVEN"
25
-
26
-
27
- NotGivenOr = Union[_T, NotGiven]
28
- NOT_GIVEN = NotGiven()
magic_hour/core/utils.py DELETED
@@ -1,55 +0,0 @@
1
- import typing
2
- import re
3
- from typing_extensions import Literal
4
-
5
- import httpx
6
-
7
- from .binary_response import BinaryResponse
8
-
9
-
10
- def remove_none_from_dict(
11
- original: typing.Dict[str, typing.Optional[typing.Any]],
12
- ) -> typing.Dict[str, typing.Any]:
13
- new: typing.Dict[str, typing.Any] = {}
14
- for key, value in original.items():
15
- if value is not None:
16
- new[key] = value
17
- return new
18
-
19
-
20
- def get_response_type(headers: httpx.Headers) -> Literal["json", "text", "binary"]:
21
- """Check response type based on content type"""
22
- content_type = headers.get("content-type")
23
-
24
- if re.search("^application/(.+[+])?json", content_type):
25
- return "json"
26
- elif re.search("^text/(.+)", content_type):
27
- return "text"
28
- else:
29
- return "binary"
30
-
31
-
32
- def is_union_type(type_hint: typing.Any) -> bool:
33
- """Check if a type hint is a Union type."""
34
- return hasattr(type_hint, "__origin__") and type_hint.__origin__ is typing.Union
35
-
36
-
37
- def filter_binary_response(cast_to: typing.Type[typing.Any]) -> typing.Type[typing.Any]:
38
- """
39
- Filters out BinaryResponse from a Union type.
40
- If cast_to is not a Union, returns it unchanged.
41
- """
42
- if not is_union_type(cast_to):
43
- return cast_to
44
-
45
- types = typing.get_args(cast_to)
46
- filtered = tuple(t for t in types if t != BinaryResponse)
47
-
48
- # If everything was filtered out, return original type
49
- if not filtered:
50
- return cast_to
51
- # If only one type remains, return it directly
52
- if len(filtered) == 1:
53
- return typing.cast(typing.Type[typing.Any], filtered[0])
54
- # Otherwise return new Union with filtered types
55
- return typing.cast(typing.Type[typing.Any], typing.Union[filtered])