ccs-llmconnector 1.0.4__py3-none-any.whl → 1.0.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ccs-llmconnector
3
- Version: 1.0.4
3
+ Version: 1.0.6
4
4
  Summary: Lightweight wrapper around different LLM provider Python SDK Responses APIs.
5
5
  Author: CCS
6
6
  License: MIT
@@ -91,10 +91,37 @@ vision_response = client.generate_response(
91
91
  | `reasoning_effort` | `Optional[str]` | No | Present for parity with the OpenAI client; currently ignored by the Gemini SDK. |
92
92
  | `images` | `Optional[Sequence[str \| Path]]` | No | Image references (local paths, URLs, or data URLs) read and forwarded to the Gemini SDK. |
93
93
 
94
- The method returns the generated model output as a plain string. Optional image
95
94
  references are automatically converted into the appropriate `types.Part` instances,
96
95
  allowing you to mix text and visuals in a single request.
97
96
 
97
+ ### Image Generation
98
+
99
+ Use `generate_image` to create images using Gemini's image generation models (e.g., `gemini-3-pro-image-preview`).
100
+
101
+ ```python
102
+ image_bytes = client.generate_image(
103
+ api_key="your-gemini-api-key",
104
+ prompt="Generate an infographic of the current weather in Tokyo.",
105
+ model="gemini-3-pro-image-preview",
106
+ image_size="2K", # Optional, defaults to "2K"
107
+ aspect_ratio="16:9", # Optional, e.g. "16:9", "4:3"
108
+ )
109
+
110
+ with open("weather_tokyo.png", "wb") as f:
111
+ f.write(image_bytes)
112
+ ```
113
+
114
+ You can also provide an input image for editing tasks:
115
+
116
+ ```python
117
+ image_bytes = client.generate_image(
118
+ api_key="your-gemini-api-key",
119
+ prompt="Make the background a sunset.",
120
+ model="gemini-3-pro-image-preview",
121
+ image="/path/to/original.png",
122
+ )
123
+ ```
124
+
98
125
  ### Listing models
99
126
 
100
127
  Use `list_models` to enumerate the Gemini models available to your account:
@@ -318,6 +345,15 @@ anthropic_response = llm_client.generate_response(
318
345
  # Additional providers can be registered at runtime:
319
346
  # llm_client.register_provider("custom", CustomProviderClient())
320
347
  # llm_client.generate_response(provider="custom", ...)
348
+
349
+ # Image generation (currently only supported by Gemini)
350
+ image_bytes = llm_client.generate_image(
351
+ provider="gemini",
352
+ api_key="your-gemini-api-key",
353
+ prompt="A futuristic city",
354
+ model="gemini-3-pro-image-preview",
355
+ aspect_ratio="16:9",
356
+ )
321
357
  ```
322
358
 
323
359
  ### Listing models
@@ -0,0 +1,14 @@
1
+ ccs_llmconnector-1.0.6.dist-info/licenses/LICENSE,sha256=rPcz2YmBB9VUWZTLJcRO_B4jKDpqmGRYi2eSI-unysg,1083
2
+ llmconnector/__init__.py,sha256=RIprtUKqu2SrUmPJ8C7lPpCpvknpJqd93CUyxcaXy1I,1213
3
+ llmconnector/anthropic_client.py,sha256=sBcJVmYbqTWeT_twcpDz-00XTreLjZlJ1ifVE4ik5TM,7889
4
+ llmconnector/client.py,sha256=t_vWLcL0QS7w1KNwVYc8KEmtmHih5elRMelY3RhApFg,6261
5
+ llmconnector/client_cli.py,sha256=cxu2NKix-9axNeY5jbfqR5rKPKJ-oqBSnJCY8PKMhYY,10660
6
+ llmconnector/gemini_client.py,sha256=ZdNf4teG0RiV95y3mRMgsjhS-1vrsrPPIEjP9CsKYKE,10893
7
+ llmconnector/grok_client.py,sha256=SXcufcsrYDQgx0tK7EOfIBybTZlEdhZc0MV6siUHyyQ,6453
8
+ llmconnector/openai_client.py,sha256=TeXfJq1YnQ9gegjpQyOj_7h9VY4tJk6dYvEw4KQIUU8,5993
9
+ llmconnector/py.typed,sha256=frcCV1k9oG9oKj3dpUqdJg1PxRT2RSN_XKdLCPjaYaY,2
10
+ ccs_llmconnector-1.0.6.dist-info/METADATA,sha256=msjO02kEy78WrivW8TL6g4ANV8VkFYnVyLFxmMQ9DYk,15041
11
+ ccs_llmconnector-1.0.6.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
12
+ ccs_llmconnector-1.0.6.dist-info/entry_points.txt,sha256=eFvLY3nHAG_QhaKlemhhK7echfezW0KiMdSNMZOStLc,60
13
+ ccs_llmconnector-1.0.6.dist-info/top_level.txt,sha256=Doer7TAUsN8UXQfPHPNsuBXVNCz2uV-Q0v4t4fwv_MM,13
14
+ ccs_llmconnector-1.0.6.dist-info/RECORD,,
@@ -1,22 +1,22 @@
1
- MIT License
2
-
3
- Copyright (c) 2025 CCS
4
-
5
- Permission is hereby granted, free of charge, to any person obtaining a copy
6
- of this software and associated documentation files (the "Software"), to deal
7
- in the Software without restriction, including without limitation the rights
8
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
- copies of the Software, and to permit persons to whom the Software is
10
- furnished to do so, subject to the following conditions:
11
-
12
- The above copyright notice and this permission notice shall be included in all
13
- copies or substantial portions of the Software.
14
-
15
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
- SOFTWARE.
22
-
1
+ MIT License
2
+
3
+ Copyright (c) 2025 CCS
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
22
+
llmconnector/__init__.py CHANGED
@@ -1,39 +1,39 @@
1
- """Public package interface for llmconnector."""
2
-
3
- from __future__ import annotations
4
-
5
- from typing import TYPE_CHECKING, Any
6
-
7
- from .client import LLMClient
8
-
9
- if TYPE_CHECKING:
10
- from .anthropic_client import AnthropicClient
11
- from .gemini_client import GeminiClient
12
- from .grok_client import GrokClient
13
- from .openai_client import OpenAIResponsesClient
14
-
15
- __all__ = ["LLMClient", "OpenAIResponsesClient", "GeminiClient", "AnthropicClient", "GrokClient"]
16
-
17
-
18
- def __getattr__(name: str) -> Any:
19
- if name == "OpenAIResponsesClient":
20
- from .openai_client import OpenAIResponsesClient as _OpenAIResponsesClient
21
-
22
- return _OpenAIResponsesClient
23
- if name == "GeminiClient":
24
- from .gemini_client import GeminiClient as _GeminiClient
25
-
26
- return _GeminiClient
27
- if name == "AnthropicClient":
28
- from .anthropic_client import AnthropicClient as _AnthropicClient
29
-
30
- return _AnthropicClient
31
- if name == "GrokClient":
32
- from .grok_client import GrokClient as _GrokClient
33
-
34
- return _GrokClient
35
- raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
36
-
37
-
38
- def __dir__() -> list[str]:
39
- return sorted(__all__)
1
+ """Public package interface for llmconnector."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import TYPE_CHECKING, Any
6
+
7
+ from .client import LLMClient
8
+
9
+ if TYPE_CHECKING:
10
+ from .anthropic_client import AnthropicClient
11
+ from .gemini_client import GeminiClient
12
+ from .grok_client import GrokClient
13
+ from .openai_client import OpenAIResponsesClient
14
+
15
+ __all__ = ["LLMClient", "OpenAIResponsesClient", "GeminiClient", "AnthropicClient", "GrokClient"]
16
+
17
+
18
+ def __getattr__(name: str) -> Any:
19
+ if name == "OpenAIResponsesClient":
20
+ from .openai_client import OpenAIResponsesClient as _OpenAIResponsesClient
21
+
22
+ return _OpenAIResponsesClient
23
+ if name == "GeminiClient":
24
+ from .gemini_client import GeminiClient as _GeminiClient
25
+
26
+ return _GeminiClient
27
+ if name == "AnthropicClient":
28
+ from .anthropic_client import AnthropicClient as _AnthropicClient
29
+
30
+ return _AnthropicClient
31
+ if name == "GrokClient":
32
+ from .grok_client import GrokClient as _GrokClient
33
+
34
+ return _GrokClient
35
+ raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
36
+
37
+
38
+ def __dir__() -> list[str]:
39
+ return sorted(__all__)
@@ -1,217 +1,233 @@
1
- """Thin wrapper around the Anthropic Messages API via the `anthropic` SDK."""
2
-
3
- from __future__ import annotations
4
-
5
- import base64
6
- import mimetypes
7
- from pathlib import Path
8
- import logging
9
- from typing import Optional, Sequence, Union
10
- from urllib.error import URLError
11
- from urllib.request import urlopen
12
-
13
- from anthropic import APIError, Anthropic
14
-
15
- ImageInput = Union[str, Path]
16
- logger = logging.getLogger(__name__)
17
-
18
-
19
- class AnthropicClient:
20
- """Convenience wrapper around the Anthropic Messages API."""
21
-
22
- def generate_response(
23
- self,
24
- *,
25
- api_key: str,
26
- prompt: str,
27
- model: str,
28
- max_tokens: int = 32000,
29
- reasoning_effort: Optional[str] = None,
30
- images: Optional[Sequence[ImageInput]] = None,
31
- ) -> str:
32
- """Generate a response from the specified Anthropic model.
33
-
34
- Args:
35
- api_key: API key used to authenticate with Anthropic.
36
- prompt: Natural-language instruction or query for the model.
37
- model: Identifier of the Anthropic model to target (for example, ``"claude-3-5-sonnet-20241022"``).
38
- max_tokens: Cap for tokens across the entire exchange, defaults to 32000.
39
- reasoning_effort: Included for API parity; currently unused by the Anthropic SDK.
40
- images: Optional collection of image references (local paths, URLs, or data URLs).
41
-
42
- Returns:
43
- The text output produced by the model.
44
-
45
- Raises:
46
- ValueError: If required arguments are missing or the request payload is empty.
47
- URLError: If an image URL cannot be retrieved.
48
- APIError: If the underlying Anthropic request fails.
49
- """
50
- if not api_key:
51
- raise ValueError("api_key must be provided.")
52
- if not prompt and not images:
53
- raise ValueError("At least one of prompt or images must be provided.")
54
- if not model:
55
- raise ValueError("model must be provided.")
56
-
57
- content_blocks: list[dict] = []
58
- if prompt:
59
- content_blocks.append({"type": "text", "text": prompt})
60
-
61
- if images:
62
- for image in images:
63
- content_blocks.append(self._to_image_block(image))
64
-
65
- if not content_blocks:
66
- raise ValueError("No content provided for response generation.")
67
-
68
- client = Anthropic(api_key=api_key)
69
-
70
- try:
71
- response = client.messages.create(
72
- model=model,
73
- max_tokens=max_tokens,
74
- messages=[{"role": "user", "content": content_blocks}],
75
- )
76
- except Exception as exc:
77
- logger.exception("Anthropic messages.create failed: %s", exc)
78
- raise
79
-
80
- text_blocks: list[str] = []
81
- for block in getattr(response, "content", []) or []:
82
- if getattr(block, "type", None) == "text":
83
- text = getattr(block, "text", None)
84
- if text:
85
- text_blocks.append(text)
86
-
87
- if text_blocks:
88
- result_text = "".join(text_blocks)
89
- logger.info(
90
- "Anthropic messages.create succeeded: model=%s images=%d text_len=%d",
91
- model,
92
- len(images or []),
93
- len(result_text or ""),
94
- )
95
- return result_text
96
-
97
- # Treat successful calls without textual content as a successful, empty response
98
- # rather than raising. This aligns with callers that handle empty outputs gracefully.
99
- logger.info(
100
- "Anthropic messages.create succeeded with no text: model=%s images=%d",
101
- model,
102
- len(images or []),
103
- )
104
- return ""
105
-
106
- def list_models(self, *, api_key: str) -> list[dict[str, Optional[str]]]:
107
- """Return the models available to the authenticated Anthropic account."""
108
- if not api_key:
109
- raise ValueError("api_key must be provided.")
110
-
111
- client = Anthropic(api_key=api_key)
112
- models: list[dict[str, Optional[str]]] = []
113
-
114
- try:
115
- iterator = client.models.list()
116
- except Exception as exc:
117
- logger.exception("Anthropic list models failed: %s", exc)
118
- raise
119
-
120
- for model in iterator:
121
- model_id = getattr(model, "id", None)
122
- if model_id is None and isinstance(model, dict):
123
- model_id = model.get("id")
124
- if not model_id:
125
- continue
126
-
127
- display_name = getattr(model, "display_name", None)
128
- if display_name is None and isinstance(model, dict):
129
- display_name = model.get("display_name")
130
-
131
- models.append({"id": model_id, "display_name": display_name})
132
-
133
- logger.info("Anthropic list_models succeeded: count=%d", len(models))
134
- return models
135
-
136
- @staticmethod
137
- def _to_image_block(image: ImageInput) -> dict:
138
- """Convert an image reference into an Anthropic content block."""
139
- if isinstance(image, Path):
140
- return _block_from_path(image)
141
-
142
- if image.startswith("data:"):
143
- return _block_from_data_url(image)
144
-
145
- if image.startswith(("http://", "https://")):
146
- return _block_from_url(image)
147
-
148
- return _block_from_path(Path(image))
149
-
150
-
151
- def _block_from_path(path: Path) -> dict:
152
- """Create an image block from a local filesystem path."""
153
- expanded = path.expanduser()
154
- data = expanded.read_bytes()
155
- encoded = base64.b64encode(data).decode("utf-8")
156
- mime_type = mimetypes.guess_type(expanded.name)[0] or "application/octet-stream"
157
-
158
- return {
159
- "type": "image",
160
- "source": {
161
- "type": "base64",
162
- "media_type": mime_type,
163
- "data": encoded,
164
- },
165
- }
166
-
167
-
168
- def _block_from_url(url: str) -> dict:
169
- """Create an image block by downloading content from a URL."""
170
- with urlopen(url) as response:
171
- data = response.read()
172
- mime_type = response.info().get_content_type()
173
-
174
- if not mime_type or mime_type == "application/octet-stream":
175
- mime_type = mimetypes.guess_type(url)[0] or "application/octet-stream"
176
-
177
- encoded = base64.b64encode(data).decode("utf-8")
178
-
179
- return {
180
- "type": "image",
181
- "source": {
182
- "type": "base64",
183
- "media_type": mime_type,
184
- "data": encoded,
185
- },
186
- }
187
-
188
-
189
- def _block_from_data_url(data_url: str) -> dict:
190
- """Create an image block from a data URL."""
191
- header, encoded = data_url.split(",", 1)
192
- metadata = header[len("data:") :]
193
- mime_type = "application/octet-stream"
194
- is_base64 = False
195
-
196
- if ";" in metadata:
197
- mime_type_part, _, remainder = metadata.partition(";")
198
- if mime_type_part:
199
- mime_type = mime_type_part
200
- is_base64 = "base64" in remainder
201
- elif metadata:
202
- mime_type = metadata
203
-
204
- if is_base64:
205
- data_b64 = encoded
206
- else:
207
- data_bytes = encoded.encode("utf-8")
208
- data_b64 = base64.b64encode(data_bytes).decode("utf-8")
209
-
210
- return {
211
- "type": "image",
212
- "source": {
213
- "type": "base64",
214
- "media_type": mime_type or "application/octet-stream",
215
- "data": data_b64,
216
- },
217
- }
1
+ """Thin wrapper around the Anthropic Messages API via the `anthropic` SDK."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import base64
6
+ import mimetypes
7
+ from pathlib import Path
8
+ import logging
9
+ from typing import Optional, Sequence, Union
10
+ from urllib.error import URLError
11
+ from urllib.request import urlopen
12
+
13
+ from anthropic import APIError, Anthropic
14
+
15
+ ImageInput = Union[str, Path]
16
+ logger = logging.getLogger(__name__)
17
+
18
+
19
+ class AnthropicClient:
20
+ """Convenience wrapper around the Anthropic Messages API."""
21
+
22
+ def generate_response(
23
+ self,
24
+ *,
25
+ api_key: str,
26
+ prompt: str,
27
+ model: str,
28
+ max_tokens: int = 32000,
29
+ reasoning_effort: Optional[str] = None,
30
+ images: Optional[Sequence[ImageInput]] = None,
31
+ ) -> str:
32
+ """Generate a response from the specified Anthropic model.
33
+
34
+ Args:
35
+ api_key: API key used to authenticate with Anthropic.
36
+ prompt: Natural-language instruction or query for the model.
37
+ model: Identifier of the Anthropic model to target (for example, ``"claude-3-5-sonnet-20241022"``).
38
+ max_tokens: Cap for tokens across the entire exchange, defaults to 32000.
39
+ reasoning_effort: Included for API parity; currently unused by the Anthropic SDK.
40
+ images: Optional collection of image references (local paths, URLs, or data URLs).
41
+
42
+ Returns:
43
+ The text output produced by the model.
44
+
45
+ Raises:
46
+ ValueError: If required arguments are missing or the request payload is empty.
47
+ URLError: If an image URL cannot be retrieved.
48
+ APIError: If the underlying Anthropic request fails.
49
+ """
50
+ if not api_key:
51
+ raise ValueError("api_key must be provided.")
52
+ if not prompt and not images:
53
+ raise ValueError("At least one of prompt or images must be provided.")
54
+ if not model:
55
+ raise ValueError("model must be provided.")
56
+
57
+ content_blocks: list[dict] = []
58
+ if prompt:
59
+ content_blocks.append({"type": "text", "text": prompt})
60
+
61
+ if images:
62
+ for image in images:
63
+ content_blocks.append(self._to_image_block(image))
64
+
65
+ if not content_blocks:
66
+ raise ValueError("No content provided for response generation.")
67
+
68
+ client = Anthropic(api_key=api_key)
69
+
70
+ try:
71
+ response = client.messages.create(
72
+ model=model,
73
+ max_tokens=max_tokens,
74
+ messages=[{"role": "user", "content": content_blocks}],
75
+ )
76
+ except Exception as exc:
77
+ logger.exception("Anthropic messages.create failed: %s", exc)
78
+ raise
79
+
80
+ text_blocks: list[str] = []
81
+ for block in getattr(response, "content", []) or []:
82
+ if getattr(block, "type", None) == "text":
83
+ text = getattr(block, "text", None)
84
+ if text:
85
+ text_blocks.append(text)
86
+
87
+ if text_blocks:
88
+ result_text = "".join(text_blocks)
89
+ logger.info(
90
+ "Anthropic messages.create succeeded: model=%s images=%d text_len=%d",
91
+ model,
92
+ len(images or []),
93
+ len(result_text or ""),
94
+ )
95
+ return result_text
96
+
97
+ # Treat successful calls without textual content as a successful, empty response
98
+ # rather than raising. This aligns with callers that handle empty outputs gracefully.
99
+ logger.info(
100
+ "Anthropic messages.create succeeded with no text: model=%s images=%d",
101
+ model,
102
+ len(images or []),
103
+ )
104
+ return ""
105
+
106
+ def generate_image(
107
+ self,
108
+ *,
109
+ api_key: str,
110
+ prompt: str,
111
+ model: str,
112
+ image_size: str = "2K",
113
+ image: Optional[ImageInput] = None,
114
+ ) -> bytes:
115
+ """Generate an image using the Anthropic API.
116
+
117
+ Raises:
118
+ NotImplementedError: This method is not yet implemented for Anthropic.
119
+ """
120
+ raise NotImplementedError("Image generation is not implemented for Anthropic.")
121
+
122
+ def list_models(self, *, api_key: str) -> list[dict[str, Optional[str]]]:
123
+ """Return the models available to the authenticated Anthropic account."""
124
+ if not api_key:
125
+ raise ValueError("api_key must be provided.")
126
+
127
+ client = Anthropic(api_key=api_key)
128
+ models: list[dict[str, Optional[str]]] = []
129
+
130
+ try:
131
+ iterator = client.models.list()
132
+ except Exception as exc:
133
+ logger.exception("Anthropic list models failed: %s", exc)
134
+ raise
135
+
136
+ for model in iterator:
137
+ model_id = getattr(model, "id", None)
138
+ if model_id is None and isinstance(model, dict):
139
+ model_id = model.get("id")
140
+ if not model_id:
141
+ continue
142
+
143
+ display_name = getattr(model, "display_name", None)
144
+ if display_name is None and isinstance(model, dict):
145
+ display_name = model.get("display_name")
146
+
147
+ models.append({"id": model_id, "display_name": display_name})
148
+
149
+ logger.info("Anthropic list_models succeeded: count=%d", len(models))
150
+ return models
151
+
152
+ @staticmethod
153
+ def _to_image_block(image: ImageInput) -> dict:
154
+ """Convert an image reference into an Anthropic content block."""
155
+ if isinstance(image, Path):
156
+ return _block_from_path(image)
157
+
158
+ if image.startswith("data:"):
159
+ return _block_from_data_url(image)
160
+
161
+ if image.startswith(("http://", "https://")):
162
+ return _block_from_url(image)
163
+
164
+ return _block_from_path(Path(image))
165
+
166
+
167
+ def _block_from_path(path: Path) -> dict:
168
+ """Create an image block from a local filesystem path."""
169
+ expanded = path.expanduser()
170
+ data = expanded.read_bytes()
171
+ encoded = base64.b64encode(data).decode("utf-8")
172
+ mime_type = mimetypes.guess_type(expanded.name)[0] or "application/octet-stream"
173
+
174
+ return {
175
+ "type": "image",
176
+ "source": {
177
+ "type": "base64",
178
+ "media_type": mime_type,
179
+ "data": encoded,
180
+ },
181
+ }
182
+
183
+
184
+ def _block_from_url(url: str) -> dict:
185
+ """Create an image block by downloading content from a URL."""
186
+ with urlopen(url) as response:
187
+ data = response.read()
188
+ mime_type = response.info().get_content_type()
189
+
190
+ if not mime_type or mime_type == "application/octet-stream":
191
+ mime_type = mimetypes.guess_type(url)[0] or "application/octet-stream"
192
+
193
+ encoded = base64.b64encode(data).decode("utf-8")
194
+
195
+ return {
196
+ "type": "image",
197
+ "source": {
198
+ "type": "base64",
199
+ "media_type": mime_type,
200
+ "data": encoded,
201
+ },
202
+ }
203
+
204
+
205
+ def _block_from_data_url(data_url: str) -> dict:
206
+ """Create an image block from a data URL."""
207
+ header, encoded = data_url.split(",", 1)
208
+ metadata = header[len("data:") :]
209
+ mime_type = "application/octet-stream"
210
+ is_base64 = False
211
+
212
+ if ";" in metadata:
213
+ mime_type_part, _, remainder = metadata.partition(";")
214
+ if mime_type_part:
215
+ mime_type = mime_type_part
216
+ is_base64 = "base64" in remainder
217
+ elif metadata:
218
+ mime_type = metadata
219
+
220
+ if is_base64:
221
+ data_b64 = encoded
222
+ else:
223
+ data_bytes = encoded.encode("utf-8")
224
+ data_b64 = base64.b64encode(data_bytes).decode("utf-8")
225
+
226
+ return {
227
+ "type": "image",
228
+ "source": {
229
+ "type": "base64",
230
+ "media_type": mime_type or "application/octet-stream",
231
+ "data": data_b64,
232
+ },
233
+ }