pygpt-net 2.7.6__py3-none-any.whl → 2.7.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. pygpt_net/CHANGELOG.txt +6 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/controller/chat/remote_tools.py +3 -9
  4. pygpt_net/controller/chat/stream.py +2 -2
  5. pygpt_net/controller/chat/{handler/worker.py → stream_worker.py} +13 -35
  6. pygpt_net/core/debug/models.py +2 -2
  7. pygpt_net/data/config/config.json +14 -4
  8. pygpt_net/data/config/models.json +192 -4
  9. pygpt_net/data/config/settings.json +125 -35
  10. pygpt_net/data/locale/locale.de.ini +2 -0
  11. pygpt_net/data/locale/locale.en.ini +32 -8
  12. pygpt_net/data/locale/locale.es.ini +2 -0
  13. pygpt_net/data/locale/locale.fr.ini +2 -0
  14. pygpt_net/data/locale/locale.it.ini +2 -0
  15. pygpt_net/data/locale/locale.pl.ini +3 -1
  16. pygpt_net/data/locale/locale.uk.ini +2 -0
  17. pygpt_net/data/locale/locale.zh.ini +2 -0
  18. pygpt_net/plugin/cmd_mouse_control/worker.py +2 -1
  19. pygpt_net/plugin/cmd_mouse_control/worker_sandbox.py +2 -1
  20. pygpt_net/provider/api/anthropic/__init__.py +8 -3
  21. pygpt_net/provider/api/anthropic/chat.py +259 -11
  22. pygpt_net/provider/api/anthropic/computer.py +844 -0
  23. pygpt_net/provider/api/anthropic/remote_tools.py +172 -0
  24. pygpt_net/{controller/chat/handler/anthropic_stream.py → provider/api/anthropic/stream.py} +24 -10
  25. pygpt_net/provider/api/anthropic/tools.py +32 -77
  26. pygpt_net/provider/api/anthropic/utils.py +30 -0
  27. pygpt_net/provider/api/google/chat.py +3 -7
  28. pygpt_net/{controller/chat/handler/google_stream.py → provider/api/google/stream.py} +1 -1
  29. pygpt_net/provider/api/google/utils.py +185 -0
  30. pygpt_net/{controller/chat/handler → provider/api/langchain}/__init__.py +0 -0
  31. pygpt_net/{controller/chat/handler/langchain_stream.py → provider/api/langchain/stream.py} +1 -1
  32. pygpt_net/provider/api/llama_index/__init__.py +0 -0
  33. pygpt_net/{controller/chat/handler/llamaindex_stream.py → provider/api/llama_index/stream.py} +1 -1
  34. pygpt_net/provider/api/openai/image.py +2 -2
  35. pygpt_net/{controller/chat/handler/openai_stream.py → provider/api/openai/stream.py} +1 -1
  36. pygpt_net/provider/api/openai/utils.py +69 -3
  37. pygpt_net/provider/api/x_ai/__init__.py +109 -10
  38. pygpt_net/provider/api/x_ai/chat.py +0 -0
  39. pygpt_net/provider/api/x_ai/image.py +149 -47
  40. pygpt_net/provider/api/x_ai/{remote.py → remote_tools.py} +165 -70
  41. pygpt_net/provider/api/x_ai/responses.py +507 -0
  42. pygpt_net/{controller/chat/handler/xai_stream.py → provider/api/x_ai/stream.py} +12 -1
  43. pygpt_net/provider/api/x_ai/tools.py +59 -8
  44. pygpt_net/{controller/chat/handler → provider/api/x_ai}/utils.py +1 -2
  45. pygpt_net/provider/api/x_ai/vision.py +1 -4
  46. pygpt_net/provider/core/config/patch.py +22 -1
  47. pygpt_net/provider/core/model/patch.py +26 -1
  48. pygpt_net/tools/image_viewer/ui/dialogs.py +3 -2
  49. pygpt_net/tools/text_editor/ui/dialogs.py +3 -2
  50. pygpt_net/tools/text_editor/ui/widgets.py +0 -0
  51. pygpt_net/ui/widget/dialog/base.py +16 -5
  52. pygpt_net/ui/widget/textarea/editor.py +0 -0
  53. {pygpt_net-2.7.6.dist-info → pygpt_net-2.7.7.dist-info}/METADATA +8 -2
  54. {pygpt_net-2.7.6.dist-info → pygpt_net-2.7.7.dist-info}/RECORD +54 -48
  55. {pygpt_net-2.7.6.dist-info → pygpt_net-2.7.7.dist-info}/LICENSE +0 -0
  56. {pygpt_net-2.7.6.dist-info → pygpt_net-2.7.7.dist-info}/WHEEL +0 -0
  57. {pygpt_net-2.7.6.dist-info → pygpt_net-2.7.7.dist-info}/entry_points.txt +0 -0
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.09.05 00:00:00 #
9
+ # Updated Date: 2026.01.05 20:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Optional
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.12.31 16:00:00 #
9
+ # Updated Date: 2026.01.05 20:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import base64
@@ -177,7 +177,7 @@ class ImageWorker(QRunnable):
177
177
 
178
178
  def _is_gpt_image_model(self, model_id: Optional[str] = None) -> bool:
179
179
  mid = (model_id or self.model or "").lower()
180
- return mid.startswith("gpt-image-1")
180
+ return mid.startswith("gpt-image-1") or mid.startswith("chatgpt-image")
181
181
 
182
182
  def _is_dalle2(self, model_id: Optional[str] = None) -> bool:
183
183
  mid = (model_id or self.model or "").lower()
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.12.26 00:00:00 #
9
+ # Updated Date: 2026.01.05 20:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import base64
@@ -6,11 +6,11 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.11.26 19:00:00 #
9
+ # Updated Date: 2026.01.05 20:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import re
13
-
13
+ from typing import Optional, Any
14
14
 
15
15
  def sanitize_name(name: str) -> str:
16
16
  """
@@ -24,4 +24,70 @@ def sanitize_name(name: str) -> str:
24
24
  # allowed characters: a-z, A-Z, 0-9, _, and -
25
25
  name = name.strip().lower()
26
26
  sanitized_name = re.sub(r'[^a-z0-9_-]', '_', name)
27
- return sanitized_name[:64] # limit to 64 characters
27
+ return sanitized_name[:64] # limit to 64 characters
28
+
29
+
30
+ def capture_openai_usage(state, u_obj: Any):
31
+ """
32
+ Extract usage for OpenAI/xAI-compatible chunks.
33
+
34
+ :param state: Chat state
35
+ :param u_obj: Usage object/dict
36
+ """
37
+ if not u_obj:
38
+ return
39
+ state.usage_vendor = "openai"
40
+ in_tok = as_int(safe_get(u_obj, "input_tokens")) or as_int(safe_get(u_obj, "prompt_tokens"))
41
+ out_tok = as_int(safe_get(u_obj, "output_tokens")) or as_int(safe_get(u_obj, "completion_tokens"))
42
+ total = as_int(safe_get(u_obj, "total_tokens"))
43
+ reasoning = (
44
+ as_int(safe_get(u_obj, "output_tokens_details.reasoning_tokens")) or
45
+ as_int(safe_get(u_obj, "completion_tokens_details.reasoning_tokens")) or
46
+ as_int(safe_get(u_obj, "reasoning_tokens")) or
47
+ 0
48
+ )
49
+ out_with_reason = (out_tok or 0) + (reasoning or 0)
50
+ state.usage_payload = {"in": in_tok, "out": out_with_reason, "reasoning": reasoning or 0, "total": total}
51
+
52
+ def safe_get(obj: Any, path: str) -> Any:
53
+ """
54
+ Dot-path getter for dicts and objects.
55
+
56
+ :param obj: Source object or dict
57
+ :param path: Dot-separated path, e.g. 'a.b.0.c'
58
+ :return: Value at path or None
59
+ """
60
+ cur = obj
61
+ for seg in path.split("."):
62
+ if cur is None:
63
+ return None
64
+ if isinstance(cur, dict):
65
+ cur = cur.get(seg)
66
+ else:
67
+ if seg.isdigit() and isinstance(cur, (list, tuple)):
68
+ idx = int(seg)
69
+ if 0 <= idx < len(cur):
70
+ cur = cur[idx]
71
+ else:
72
+ return None
73
+ else:
74
+ cur = getattr(cur, seg, None)
75
+ return cur
76
+
77
+
78
+ def as_int(val: Any) -> Optional[int]:
79
+ """
80
+ Coerce to int if possible, else None.
81
+
82
+ :param val: Input value
83
+ :return: int or None
84
+ """
85
+ if val is None:
86
+ return None
87
+ try:
88
+ return int(val)
89
+ except Exception:
90
+ try:
91
+ return int(float(val))
92
+ except Exception:
93
+ return None
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2026.01.03 17:00:00 #
9
+ # Updated Date: 2026.01.04 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Optional, Dict, Any
@@ -33,7 +33,8 @@ from .vision import Vision
33
33
  from .tools import Tools
34
34
  from .audio import Audio
35
35
  from .image import Image
36
- from .remote import Remote
36
+ from .remote_tools import Remote
37
+ from .responses import Responses
37
38
 
38
39
 
39
40
  class ApiXAI:
@@ -49,7 +50,8 @@ class ApiXAI:
49
50
  self.tools = Tools(window)
50
51
  self.audio = Audio(window)
51
52
  self.image = Image(window)
52
- self.remote = Remote(window) # Live Search builder
53
+ self.remote = Remote(window)
54
+ self.responses = Responses(window)
53
55
  self.client: Optional[xai_sdk.Client] = None
54
56
  self.locked = False
55
57
  self.last_client_args: Optional[Dict[str, Any]] = None
@@ -101,8 +103,9 @@ class ApiXAI:
101
103
  """
102
104
  Make an API call to xAI.
103
105
 
104
- Supports chat (stream/non-stream), images (via REST),
105
- and function-calling. Audio is not available in public xAI SDK at this time.
106
+ Uses old API and Chat Responses (stateful) via xai_sdk:
107
+ - Streaming: chat.stream() (tuples of (response, chunk))
108
+ - Non-stream: chat.sample()
106
109
 
107
110
  :param context: BridgeContext
108
111
  :param extra: Extra params (not used)
@@ -113,9 +116,14 @@ class ApiXAI:
113
116
  stream = context.stream
114
117
  ctx = context.ctx
115
118
  ai_name = (ctx.output_name if ctx else "assistant")
119
+ model = context.model # model instance (item, not id)
116
120
  used_tokens = 0
117
121
  response = None
118
122
  ctx.chunk_type = ChunkType.XAI_SDK
123
+
124
+ use_responses_api = True
125
+ if model and model.id.startswith("grok-3"):
126
+ use_responses_api = False # use old API
119
127
 
120
128
  if mode in (
121
129
  MODE_COMPLETION,
@@ -123,9 +131,15 @@ class ApiXAI:
123
131
  MODE_AUDIO,
124
132
  MODE_RESEARCH
125
133
  ):
126
- # There is no public realtime audio in SDK; treat MODE_AUDIO as chat (TTS not supported).
127
- response = self.chat.send(context=context, extra=extra)
128
- used_tokens = self.chat.get_used_tokens()
134
+ # Audio TTS is not exposed via public SDK; treat MODE_AUDIO as chat input.
135
+ # NOTE: for grok-3 use Chat completions, for > grok-4 use Chat responses
136
+ if use_responses_api:
137
+ response = self.responses.send(context=context, extra=extra) # responses
138
+ used_tokens = self.responses.get_used_tokens()
139
+ else:
140
+ response = self.chat.send(context=context, extra=extra) # completions
141
+ used_tokens = self.chat.get_used_tokens()
142
+
129
143
  if ctx:
130
144
  self.vision.append_images(ctx)
131
145
 
@@ -151,7 +165,10 @@ class ApiXAI:
151
165
 
152
166
  if ctx:
153
167
  ctx.ai_name = ai_name
154
- self.chat.unpack_response(context.mode, response, ctx)
168
+ if use_responses_api:
169
+ self.responses.unpack_response(context.mode, response, ctx)
170
+ else:
171
+ self.chat.unpack_response(context.mode, response, ctx)
155
172
  try:
156
173
  for tc in getattr(ctx, "tool_calls", []) or []:
157
174
  fn = tc.get("function") or {}
@@ -175,6 +192,88 @@ class ApiXAI:
175
192
 
176
193
  If context.request is set, makes a full call() instead (for consistency).
177
194
 
195
+ :param context: BridgeContext
196
+ :param extra: Extra params (not used)
197
+ :return: Output text or "" on error
198
+ """
199
+ model = context.model or self.window.core.models.from_defaults()
200
+ if model and model.id.startswith("grok-3"):
201
+ return self.quick_call_old(context, extra) # grok-3 uses old path
202
+
203
+ if context.request:
204
+ context.stream = False
205
+ context.mode = MODE_CHAT
206
+ self.locked = True
207
+ self.call(context, extra)
208
+ self.locked = False
209
+ return context.ctx.output
210
+
211
+ self.locked = True
212
+ try:
213
+ ctx = context.ctx
214
+ prompt = context.prompt
215
+ system_prompt = context.system_prompt
216
+ history = context.history
217
+ functions = context.external_functions
218
+ attachments = context.attachments
219
+ multimodal_ctx = context.multimodal_ctx
220
+
221
+ # Prepare client-side tools for SDK (no server-side tools in quick_call)
222
+ client_tools = self.tools.prepare_sdk_tools(functions)
223
+
224
+ client = self.get_client(MODE_CHAT, model)
225
+ # store_messages: false for quick, and false if images present (SDK guidance)
226
+ store_messages = False
227
+ prev_id = None
228
+
229
+ # Create chat session
230
+ include = []
231
+ chat = client.chat.create(
232
+ model=model.id,
233
+ tools=(client_tools if client_tools else None),
234
+ include=(include if include else None),
235
+ store_messages=store_messages,
236
+ previous_response_id=prev_id,
237
+ )
238
+
239
+ # Append history if enabled and no previous_response_id is used
240
+ self.responses.append_history_sdk(
241
+ chat=chat,
242
+ system_prompt=system_prompt,
243
+ model=model,
244
+ history=history,
245
+ )
246
+
247
+ # Append current prompt with optional images
248
+ self.responses.append_current_user_sdk(
249
+ chat=chat,
250
+ prompt=prompt,
251
+ attachments=attachments,
252
+ multimodal_ctx=multimodal_ctx,
253
+ )
254
+
255
+ resp = chat.sample()
256
+ # Extract client-side tool calls if any (leave server-side out)
257
+ out = getattr(resp, "content", "") or ""
258
+ if ctx:
259
+ self.responses.quick_collect_response_id(resp, ctx)
260
+ return out.strip()
261
+ except Exception as e:
262
+ self.window.core.debug.log(e)
263
+ return ""
264
+ finally:
265
+ self.locked = False
266
+
267
+ def quick_call_old(
268
+ self,
269
+ context: BridgeContext,
270
+ extra: dict = None
271
+ ) -> str:
272
+ """
273
+ Quick non-streaming xAI chat call and return output text.
274
+
275
+ If context.request is set, makes a full call() instead (for consistency).
276
+
178
277
  :param context: BridgeContext
179
278
  :param extra: Extra params (not used)
180
279
  :return: Output text or "" on error
@@ -202,7 +301,7 @@ class ApiXAI:
202
301
  # If tools are present, prefer non-streaming HTTP Chat Completions path to extract tool calls reliably.
203
302
  # Otherwise use native SDK chat.sample().
204
303
  if tools:
205
- out, calls, citations, usage = self.chat.call_http_nonstream(
304
+ out, calls, citations, usage = self.chat.call_http_nonstream(
206
305
  model=model.id,
207
306
  prompt=prompt,
208
307
  system_prompt=system_prompt,
File without changes
@@ -6,13 +6,13 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.12.31 16:00:00 #
9
+ # Updated Date: 2026.01.04 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import base64
13
13
  import datetime
14
14
  import os
15
- from typing import Optional, Dict, Any, List
15
+ from typing import Optional, Dict, Any, List, Iterable
16
16
 
17
17
  import requests
18
18
  from PySide6.QtCore import QObject, Signal, QRunnable, Slot
@@ -35,7 +35,7 @@ class Image:
35
35
  sync: bool = True
36
36
  ) -> bool:
37
37
  """
38
- Generate image(s) via xAI REST API /v1/images/generations (OpenAI-compatible).
38
+ Generate image(s) via xAI SDK image API.
39
39
  Model: grok-2-image (or -1212 variants).
40
40
 
41
41
  :param context: BridgeContext with prompt, model, ctx
@@ -60,7 +60,7 @@ class Image:
60
60
  worker = ImageWorker()
61
61
  worker.window = self.window
62
62
  worker.ctx = ctx
63
- worker.model = model.id or "grok-2-image"
63
+ worker.model = (model.id or "grok-2-image")
64
64
  worker.input_prompt = prompt
65
65
  worker.model_prompt = prompt_model
66
66
  worker.system_prompt = self.window.core.prompt.get('img')
@@ -108,8 +108,10 @@ class ImageWorker(QRunnable):
108
108
  self.raw = False
109
109
  self.num = 1
110
110
 
111
- # API
112
- self.api_url = "https://api.x.ai/v1/images/generations" # OpenAI-compatible endpoint
111
+ # SDK image_format:
112
+ # - "base64": returns raw image bytes in SDK response (preferred for local saving)
113
+ # - "url": returns URL on xAI managed storage (fallback: we download)
114
+ self.image_format = "base64"
113
115
 
114
116
  @Slot()
115
117
  def run(self):
@@ -143,48 +145,35 @@ class ImageWorker(QRunnable):
143
145
 
144
146
  self.signals.status.emit(trans('img.status.generating') + f": {self.input_prompt}...")
145
147
 
146
- cfg = self.window.core.config
147
- api_key = cfg.get("api_key_xai") or os.environ.get("XAI_API_KEY") or ""
148
- self.api_url = cfg.get("api_endpoint_xai") + "/images/generations"
149
- if not api_key:
150
- raise RuntimeError("Missing xAI API key. Set `api_key_xai` in config or XAI_API_KEY in env.")
151
-
152
- headers = {
153
- "Authorization": f"Bearer {api_key}",
154
- "Content-Type": "application/json",
155
- }
156
- payload = {
157
- "model": self.model or "grok-2-image",
158
- "prompt": self.input_prompt or "",
159
- "n": max(1, min(int(self.num), 10)),
160
- "response_format": "b64_json", # get base64 so we can save locally
161
- }
162
-
163
- r = requests.post(self.api_url, headers=headers, json=payload, timeout=180)
164
- r.raise_for_status()
165
- data = r.json()
166
-
167
- images = []
168
- for idx, img in enumerate((data.get("data") or [])[: self.num]):
169
- b64 = img.get("b64_json")
170
- if not b64:
171
- # fallback: url download
172
- url = img.get("url")
173
- if url:
174
- try:
175
- rr = requests.get(url, timeout=60)
176
- if rr.status_code == 200:
177
- images.append(rr.content)
178
- except Exception:
179
- pass
180
- continue
181
- try:
182
- images.append(base64.b64decode(b64))
183
- except Exception:
184
- continue
148
+ # use xAI SDK client
149
+ client = self.window.core.api.xai.get_client()
150
+
151
+ # enforce n range [1..10] as per docs
152
+ n = max(1, min(int(self.num or 1), 10))
185
153
 
154
+ images_bytes: List[bytes] = []
155
+ if n == 1:
156
+ # single image
157
+ resp = client.image.sample(
158
+ model=self.model or "grok-2-image",
159
+ prompt=self.input_prompt or "",
160
+ image_format=("base64" if self.image_format == "base64" else "url"),
161
+ )
162
+ images_bytes = self._extract_bytes_from_single(resp)
163
+ else:
164
+ # batch images
165
+ resp_iter = client.image.sample_batch(
166
+ model=self.model or "grok-2-image",
167
+ prompt=self.input_prompt or "",
168
+ n=n,
169
+ image_format=("base64" if self.image_format == "base64" else "url"),
170
+ )
171
+ images_bytes = self._extract_bytes_from_batch(resp_iter)
172
+
173
+ # save images to files
186
174
  paths: List[str] = []
187
- for i, content in enumerate(images):
175
+ for i, content in enumerate(images_bytes):
176
+ # generate filename
188
177
  name = (
189
178
  datetime.date.today().strftime("%Y-%m-%d") + "_" +
190
179
  datetime.datetime.now().strftime("%H-%M-%S") + "-" +
@@ -192,7 +181,7 @@ class ImageWorker(QRunnable):
192
181
  str(i + 1) + ".jpg"
193
182
  )
194
183
  path = os.path.join(self.window.core.config.get_user_dir("img"), name)
195
- self.signals.status.emit(trans('img.status.downloading') + f" ({i + 1} / {self.num}) -> {path}")
184
+ self.signals.status.emit(trans('img.status.downloading') + f" ({i + 1} / {len(images_bytes)}) -> {path}")
196
185
 
197
186
  if self.window.core.image.save_image(path, content):
198
187
  paths.append(path)
@@ -207,6 +196,119 @@ class ImageWorker(QRunnable):
207
196
  finally:
208
197
  self._cleanup()
209
198
 
199
+ # ---------- SDK response helpers ----------
200
+
201
+ def _extract_bytes_from_single(self, resp) -> List[bytes]:
202
+ """
203
+ Normalize single-image SDK response to a list of bytes.
204
+ Accepts:
205
+ - resp.image -> bytes or base64 str (docs say raw bytes)
206
+ - resp.url -> download
207
+ - dict-like/legacy: {'b64_json': ..., 'url': ...}
208
+ """
209
+ out: List[bytes] = []
210
+ try:
211
+ # preferred path: raw bytes when image_format="base64"
212
+ img_bytes = getattr(resp, "image", None)
213
+ if isinstance(img_bytes, (bytes, bytearray)):
214
+ out.append(bytes(img_bytes))
215
+ return out
216
+ if isinstance(img_bytes, str):
217
+ try:
218
+ out.append(base64.b64decode(img_bytes))
219
+ return out
220
+ except Exception:
221
+ pass
222
+
223
+ # url fallback
224
+ url = getattr(resp, "url", None)
225
+ if isinstance(url, str) and url:
226
+ try:
227
+ r = requests.get(url, timeout=60)
228
+ if r.status_code == 200:
229
+ out.append(r.content)
230
+ return out
231
+ except Exception:
232
+ pass
233
+
234
+ # dict-like fallbacks
235
+ if isinstance(resp, dict):
236
+ if "b64_json" in resp and resp["b64_json"]:
237
+ try:
238
+ out.append(base64.b64decode(resp["b64_json"]))
239
+ return out
240
+ except Exception:
241
+ pass
242
+ if "url" in resp and resp["url"]:
243
+ try:
244
+ r = requests.get(resp["url"], timeout=60)
245
+ if r.status_code == 200:
246
+ out.append(r.content)
247
+ return out
248
+ except Exception:
249
+ pass
250
+ except Exception:
251
+ pass
252
+ return out
253
+
254
+ def _extract_bytes_from_batch(self, resp_iter: Iterable) -> List[bytes]:
255
+ """
256
+ Normalize batch SDK response (iterable of images) to a list of bytes.
257
+ Handles item.image (bytes/str), item.url, dict-like or bytes directly.
258
+ """
259
+ out: List[bytes] = []
260
+ if resp_iter is None:
261
+ return out
262
+ try:
263
+ for item in resp_iter:
264
+ # bytes directly
265
+ if isinstance(item, (bytes, bytearray)):
266
+ out.append(bytes(item))
267
+ continue
268
+
269
+ # preferred: raw bytes in item.image
270
+ img_bytes = getattr(item, "image", None)
271
+ if isinstance(img_bytes, (bytes, bytearray)):
272
+ out.append(bytes(img_bytes))
273
+ continue
274
+ if isinstance(img_bytes, str):
275
+ try:
276
+ out.append(base64.b64decode(img_bytes))
277
+ continue
278
+ except Exception:
279
+ pass
280
+
281
+ # url fallback
282
+ url = getattr(item, "url", None)
283
+ if isinstance(url, str) and url:
284
+ try:
285
+ r = requests.get(url, timeout=60)
286
+ if r.status_code == 200:
287
+ out.append(r.content)
288
+ continue
289
+ except Exception:
290
+ pass
291
+
292
+ # dict-like fallbacks
293
+ if isinstance(item, dict):
294
+ if "b64_json" in item and item["b64_json"]:
295
+ try:
296
+ out.append(base64.b64decode(item["b64_json"]))
297
+ continue
298
+ except Exception:
299
+ pass
300
+ if "url" in item and item["url"]:
301
+ try:
302
+ r = requests.get(item["url"], timeout=60)
303
+ if r.status_code == 200:
304
+ out.append(r.content)
305
+ continue
306
+ except Exception:
307
+ pass
308
+ except Exception:
309
+ pass
310
+ return out
311
+
210
312
  def _cleanup(self):
211
313
  """Cleanup signals to avoid multiple calls."""
212
314
  sig = self.signals