pygpt-net 2.7.6__py3-none-any.whl → 2.7.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (120) hide show
  1. pygpt_net/CHANGELOG.txt +13 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/app.py +5 -1
  4. pygpt_net/controller/assistant/batch.py +2 -2
  5. pygpt_net/controller/assistant/files.py +7 -6
  6. pygpt_net/controller/assistant/threads.py +0 -0
  7. pygpt_net/controller/chat/command.py +0 -0
  8. pygpt_net/controller/chat/remote_tools.py +3 -9
  9. pygpt_net/controller/chat/stream.py +2 -2
  10. pygpt_net/controller/chat/{handler/worker.py → stream_worker.py} +13 -35
  11. pygpt_net/controller/dialogs/confirm.py +35 -58
  12. pygpt_net/controller/lang/mapping.py +9 -9
  13. pygpt_net/controller/remote_store/{google/batch.py → batch.py} +209 -252
  14. pygpt_net/controller/remote_store/remote_store.py +982 -13
  15. pygpt_net/core/command/command.py +0 -0
  16. pygpt_net/core/db/viewer.py +1 -1
  17. pygpt_net/core/debug/models.py +2 -2
  18. pygpt_net/core/realtime/worker.py +3 -1
  19. pygpt_net/{controller/remote_store/google → core/remote_store/anthropic}/__init__.py +0 -1
  20. pygpt_net/core/remote_store/anthropic/files.py +211 -0
  21. pygpt_net/core/remote_store/anthropic/store.py +208 -0
  22. pygpt_net/core/remote_store/openai/store.py +5 -4
  23. pygpt_net/core/remote_store/remote_store.py +5 -1
  24. pygpt_net/{controller/remote_store/openai → core/remote_store/xai}/__init__.py +0 -1
  25. pygpt_net/core/remote_store/xai/files.py +225 -0
  26. pygpt_net/core/remote_store/xai/store.py +219 -0
  27. pygpt_net/data/config/config.json +18 -5
  28. pygpt_net/data/config/models.json +193 -4
  29. pygpt_net/data/config/settings.json +179 -36
  30. pygpt_net/data/icons/folder_eye.svg +1 -0
  31. pygpt_net/data/icons/folder_eye_filled.svg +1 -0
  32. pygpt_net/data/icons/folder_open.svg +1 -0
  33. pygpt_net/data/icons/folder_open_filled.svg +1 -0
  34. pygpt_net/data/locale/locale.de.ini +6 -3
  35. pygpt_net/data/locale/locale.en.ini +46 -12
  36. pygpt_net/data/locale/locale.es.ini +6 -3
  37. pygpt_net/data/locale/locale.fr.ini +6 -3
  38. pygpt_net/data/locale/locale.it.ini +6 -3
  39. pygpt_net/data/locale/locale.pl.ini +7 -4
  40. pygpt_net/data/locale/locale.uk.ini +6 -3
  41. pygpt_net/data/locale/locale.zh.ini +6 -3
  42. pygpt_net/icons.qrc +4 -0
  43. pygpt_net/icons_rc.py +282 -138
  44. pygpt_net/plugin/cmd_mouse_control/worker.py +2 -1
  45. pygpt_net/plugin/cmd_mouse_control/worker_sandbox.py +2 -1
  46. pygpt_net/provider/api/anthropic/__init__.py +10 -3
  47. pygpt_net/provider/api/anthropic/chat.py +342 -11
  48. pygpt_net/provider/api/anthropic/computer.py +844 -0
  49. pygpt_net/provider/api/anthropic/remote_tools.py +172 -0
  50. pygpt_net/provider/api/anthropic/store.py +307 -0
  51. pygpt_net/{controller/chat/handler/anthropic_stream.py → provider/api/anthropic/stream.py} +99 -10
  52. pygpt_net/provider/api/anthropic/tools.py +32 -77
  53. pygpt_net/provider/api/anthropic/utils.py +30 -0
  54. pygpt_net/{controller/chat/handler → provider/api/anthropic/worker}/__init__.py +0 -0
  55. pygpt_net/provider/api/anthropic/worker/importer.py +278 -0
  56. pygpt_net/provider/api/google/chat.py +62 -9
  57. pygpt_net/provider/api/google/store.py +124 -3
  58. pygpt_net/{controller/chat/handler/google_stream.py → provider/api/google/stream.py} +92 -25
  59. pygpt_net/provider/api/google/utils.py +185 -0
  60. pygpt_net/provider/api/google/worker/importer.py +16 -28
  61. pygpt_net/provider/api/langchain/__init__.py +0 -0
  62. pygpt_net/{controller/chat/handler/langchain_stream.py → provider/api/langchain/stream.py} +1 -1
  63. pygpt_net/provider/api/llama_index/__init__.py +0 -0
  64. pygpt_net/{controller/chat/handler/llamaindex_stream.py → provider/api/llama_index/stream.py} +1 -1
  65. pygpt_net/provider/api/openai/assistants.py +2 -2
  66. pygpt_net/provider/api/openai/image.py +2 -2
  67. pygpt_net/provider/api/openai/store.py +4 -1
  68. pygpt_net/{controller/chat/handler/openai_stream.py → provider/api/openai/stream.py} +1 -1
  69. pygpt_net/provider/api/openai/utils.py +69 -3
  70. pygpt_net/provider/api/openai/worker/importer.py +19 -61
  71. pygpt_net/provider/api/openai/worker/importer_assistants.py +230 -0
  72. pygpt_net/provider/api/x_ai/__init__.py +138 -15
  73. pygpt_net/provider/api/x_ai/audio.py +43 -11
  74. pygpt_net/provider/api/x_ai/chat.py +92 -4
  75. pygpt_net/provider/api/x_ai/image.py +149 -47
  76. pygpt_net/provider/api/x_ai/realtime/__init__.py +12 -0
  77. pygpt_net/provider/api/x_ai/realtime/client.py +1825 -0
  78. pygpt_net/provider/api/x_ai/realtime/realtime.py +198 -0
  79. pygpt_net/provider/api/x_ai/{remote.py → remote_tools.py} +183 -70
  80. pygpt_net/provider/api/x_ai/responses.py +507 -0
  81. pygpt_net/provider/api/x_ai/store.py +610 -0
  82. pygpt_net/{controller/chat/handler/xai_stream.py → provider/api/x_ai/stream.py} +42 -10
  83. pygpt_net/provider/api/x_ai/tools.py +59 -8
  84. pygpt_net/{controller/chat/handler → provider/api/x_ai}/utils.py +1 -2
  85. pygpt_net/provider/api/x_ai/vision.py +1 -4
  86. pygpt_net/provider/api/x_ai/worker/importer.py +308 -0
  87. pygpt_net/provider/audio_input/xai_grok_voice.py +390 -0
  88. pygpt_net/provider/audio_output/xai_tts.py +325 -0
  89. pygpt_net/provider/core/config/patch.py +39 -3
  90. pygpt_net/provider/core/config/patches/patch_before_2_6_42.py +2 -2
  91. pygpt_net/provider/core/model/patch.py +39 -1
  92. pygpt_net/tools/image_viewer/tool.py +334 -34
  93. pygpt_net/tools/image_viewer/ui/dialogs.py +319 -22
  94. pygpt_net/tools/text_editor/ui/dialogs.py +3 -2
  95. pygpt_net/tools/text_editor/ui/widgets.py +0 -0
  96. pygpt_net/ui/dialog/assistant.py +1 -1
  97. pygpt_net/ui/dialog/plugins.py +13 -5
  98. pygpt_net/ui/dialog/remote_store.py +552 -0
  99. pygpt_net/ui/dialogs.py +3 -5
  100. pygpt_net/ui/layout/ctx/ctx_list.py +58 -7
  101. pygpt_net/ui/menu/tools.py +6 -13
  102. pygpt_net/ui/widget/dialog/base.py +16 -5
  103. pygpt_net/ui/widget/dialog/{remote_store_google.py → remote_store.py} +10 -10
  104. pygpt_net/ui/widget/element/button.py +4 -4
  105. pygpt_net/ui/widget/image/display.py +2 -2
  106. pygpt_net/ui/widget/lists/context.py +2 -2
  107. pygpt_net/ui/widget/textarea/editor.py +0 -0
  108. {pygpt_net-2.7.6.dist-info → pygpt_net-2.7.8.dist-info}/METADATA +15 -2
  109. {pygpt_net-2.7.6.dist-info → pygpt_net-2.7.8.dist-info}/RECORD +107 -89
  110. pygpt_net/controller/remote_store/google/store.py +0 -615
  111. pygpt_net/controller/remote_store/openai/batch.py +0 -524
  112. pygpt_net/controller/remote_store/openai/store.py +0 -699
  113. pygpt_net/ui/dialog/remote_store_google.py +0 -539
  114. pygpt_net/ui/dialog/remote_store_openai.py +0 -539
  115. pygpt_net/ui/widget/dialog/remote_store_openai.py +0 -56
  116. pygpt_net/ui/widget/lists/remote_store_google.py +0 -248
  117. pygpt_net/ui/widget/lists/remote_store_openai.py +0 -317
  118. {pygpt_net-2.7.6.dist-info → pygpt_net-2.7.8.dist-info}/LICENSE +0 -0
  119. {pygpt_net-2.7.6.dist-info → pygpt_net-2.7.8.dist-info}/WHEEL +0 -0
  120. {pygpt_net-2.7.6.dist-info → pygpt_net-2.7.8.dist-info}/entry_points.txt +0 -0
@@ -6,12 +6,14 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.09.05 01:00:00 #
9
+ # Updated Date: 2026.01.05 20:00:00 #
10
10
  # ================================================== #
11
11
 
12
- from typing import Optional, Dict, Any, List
12
+ import json
13
+ import os
14
+ from typing import Optional, Dict, Any, List, Set
13
15
 
14
- from pygpt_net.core.types import MODE_CHAT, MODE_AUDIO
16
+ from pygpt_net.core.types import MODE_CHAT, MODE_AUDIO, MODE_COMPUTER
15
17
  from pygpt_net.core.bridge.context import BridgeContext, MultimodalContext
16
18
  from pygpt_net.item.attachment import AttachmentItem
17
19
  from pygpt_net.item.ctx import CtxItem
@@ -51,20 +53,43 @@ class Chat:
51
53
  api = self.window.core.api.anthropic
52
54
  client: anthropic.Anthropic = api.get_client(context.mode, model)
53
55
 
54
- msgs = self.build_input(
55
- prompt=prompt,
56
- system_prompt=system_prompt,
56
+ # Tool result turn (Computer Use): if previous assistant emitted tool_use and we have tool output,
57
+ # build a minimal message triplet: [user(prompt that triggered tool), assistant(tool_use), user(tool_result + images)].
58
+ tool_result_messages = self._build_tool_result_messages_if_needed(
57
59
  model=model,
58
60
  history=context.history,
59
61
  attachments=attachments,
60
- multimodal_ctx=multimodal_ctx,
61
62
  )
63
+ if tool_result_messages is not None:
64
+ msgs = tool_result_messages
65
+ else:
66
+ msgs = self.build_input(
67
+ prompt=prompt,
68
+ system_prompt=system_prompt,
69
+ model=model,
70
+ history=context.history,
71
+ attachments=attachments,
72
+ multimodal_ctx=multimodal_ctx,
73
+ )
62
74
 
63
75
  self.reset_tokens()
64
76
  count_msgs = self._build_count_messages(prompt, system_prompt, model, context.history)
65
77
  self.input_tokens += self.window.core.tokens.from_messages(count_msgs, model.id)
66
78
 
79
+ # Build tools (client tools + Anthropic server tools)
67
80
  tools = api.tools.get_all_tools(model, functions)
81
+
82
+ # Enable Computer Use tool in computer mode (use the official Tool/ComputerUse object)
83
+ if mode == MODE_COMPUTER or (model and isinstance(model.id, str) and "computer-use" in model.id.lower()):
84
+ tool = self.window.core.api.anthropic.computer.get_tool()
85
+ tools = [tool] # reset tools to only Computer Use (multiple tools not supported together)
86
+
87
+ # MCP: servers from config
88
+ mcp_servers = self._load_mcp_servers_from_cfg()
89
+
90
+ # Compute betas required by selected server tools or MCP
91
+ betas = self._compute_required_betas(tools, mcp_servers)
92
+
68
93
  max_tokens = context.max_tokens if context.max_tokens else 1024
69
94
  temperature = self.window.core.config.get('temperature')
70
95
  top_p = self.window.core.config.get('top_p')
@@ -83,14 +108,25 @@ class Chat:
83
108
  params["top_p"] = top_p
84
109
  if tools: # only include when non-empty list
85
110
  params["tools"] = tools # must be a valid list per API
111
+ if mcp_servers:
112
+ params["mcp_servers"] = mcp_servers # MCP connector servers per docs
86
113
 
87
114
  if mode == MODE_AUDIO:
88
115
  stream = False # no native TTS
89
116
 
117
+ # Decide whether to call stable or beta endpoint
118
+ use_beta = len(betas) > 0
119
+
90
120
  if stream:
91
- return client.messages.create(stream=True, **params)
121
+ if use_beta:
122
+ return client.beta.messages.create(stream=True, betas=list(betas), **params)
123
+ else:
124
+ return client.messages.create(stream=True, **params)
92
125
  else:
93
- return client.messages.create(**params)
126
+ if use_beta:
127
+ return client.beta.messages.create(betas=list(betas), **params)
128
+ else:
129
+ return client.messages.create(**params)
94
130
 
95
131
  def unpack_response(self, mode: str, response: Message, ctx: CtxItem):
96
132
  """
@@ -115,7 +151,19 @@ class Chat:
115
151
  ctx.set_tokens(p, c)
116
152
  if not isinstance(ctx.extra, dict):
117
153
  ctx.extra = {}
118
- ctx.extra["usage"] = {"vendor": "anthropic", "input_tokens": p, "output_tokens": c}
154
+ # include server_tool_use counts if present
155
+ server_tool_use = {}
156
+ if hasattr(usage, "server_tool_use"):
157
+ try:
158
+ server_tool_use = dict(getattr(usage, "server_tool_use"))
159
+ except Exception:
160
+ server_tool_use = {}
161
+ ctx.extra["usage"] = {
162
+ "vendor": "anthropic",
163
+ "input_tokens": p,
164
+ "output_tokens": c,
165
+ "server_tool_use": server_tool_use,
166
+ }
119
167
  except Exception:
120
168
  pass
121
169
 
@@ -125,6 +173,18 @@ class Chat:
125
173
  except Exception:
126
174
  pass
127
175
 
176
+ # Collect fetched URLs from web_fetch_tool_result blocks
177
+ try:
178
+ self._collect_web_fetch_urls(response, ctx)
179
+ except Exception:
180
+ pass
181
+
182
+ # Download files referenced by code execution results (Files API)
183
+ try:
184
+ self._maybe_download_files_from_response(response, ctx)
185
+ except Exception:
186
+ pass
187
+
128
188
  def extract_text(self, response: Message) -> str:
129
189
  """
130
190
  Extract text from response content blocks.
@@ -210,6 +270,39 @@ class Chat:
210
270
  if u not in ctx.urls:
211
271
  ctx.urls.append(u)
212
272
 
273
+ def _collect_web_fetch_urls(self, response: Message, ctx: CtxItem):
274
+ """
275
+ Collect URLs from web_fetch_tool_result blocks and attach to ctx.urls.
276
+
277
+ :param response: Message response from API
278
+ :param ctx: CtxItem to update
279
+ """
280
+ urls: List[str] = []
281
+ try:
282
+ for blk in getattr(response, "content", []) or []:
283
+ if getattr(blk, "type", "") == "web_fetch_tool_result":
284
+ content = getattr(blk, "content", {}) or {}
285
+ if isinstance(content, dict):
286
+ if content.get("type") == "web_fetch_result":
287
+ u = (content.get("url") or "").strip()
288
+ if u.startswith("http://") or u.startswith("https://"):
289
+ urls.append(u)
290
+ # citations may embed multiple URLs
291
+ if content.get("type") == "web_fetch_result" and isinstance(content.get("citations"), list):
292
+ for cit in content["citations"]:
293
+ u = (cit.get("url") or "").strip()
294
+ if u.startswith("http://") or u.startswith("https://"):
295
+ urls.append(u)
296
+ except Exception:
297
+ pass
298
+
299
+ if urls:
300
+ if ctx.urls is None:
301
+ ctx.urls = []
302
+ for u in urls:
303
+ if u not in ctx.urls:
304
+ ctx.urls.append(u)
305
+
213
306
  def build_input(
214
307
  self,
215
308
  prompt: str,
@@ -271,6 +364,7 @@ class Chat:
271
364
  if attachments:
272
365
  img_parts = self.window.core.api.anthropic.vision.build_blocks(content, attachments)
273
366
  parts.extend(img_parts)
367
+ content = "" # image-first; do not duplicate text if build_blocks already added it
274
368
  if content:
275
369
  parts.append({"type": "text", "text": str(content)})
276
370
 
@@ -338,4 +432,241 @@ class Chat:
338
432
 
339
433
  :return: used input tokens
340
434
  """
341
- return self.input_tokens
435
+ return self.input_tokens
436
+
437
+ def _load_mcp_servers_from_cfg(self) -> List[dict]:
438
+ """
439
+ Load MCP servers definition from config JSON string(s).
440
+
441
+ Supports keys:
442
+ - remote_tools.anthropic.mcp.mcp_servers
443
+
444
+ :return: list of MCP servers dicts
445
+ """
446
+ cfg = self.window.core.config
447
+ enabled = cfg.get("remote_tools.anthropic.mcp", False)
448
+ if not enabled:
449
+ return [] # do not load if MCP is disabled
450
+ raw = cfg.get("remote_tools.anthropic.mcp.mcp_servers")
451
+ if not raw:
452
+ return []
453
+ try:
454
+ if isinstance(raw, (list, dict)):
455
+ # ensure list
456
+ return list(raw) if isinstance(raw, list) else [raw]
457
+ return json.loads(raw) or []
458
+ except Exception:
459
+ return []
460
+
461
+ def _compute_required_betas(self, tools: List[dict], mcp_servers: List[dict]) -> Set[str]:
462
+ """
463
+ Compute required beta headers for enabled server tools.
464
+
465
+ :param tools: Final tools list
466
+ :param mcp_servers: MCP servers list
467
+ :return: set of beta header strings
468
+ """
469
+ betas: Set[str] = set()
470
+ is_mcp = False
471
+ for t in tools or []:
472
+ ttype = str(t.get("type") or "")
473
+ if ttype.startswith("web_fetch_"):
474
+ betas.add("web-fetch-2025-09-10")
475
+ elif ttype.startswith("code_execution_"):
476
+ betas.add("code-execution-2025-08-25")
477
+ elif ttype in ("tool_search_tool_regex_20251119", "tool_search_tool_bm25_20251119"):
478
+ betas.add("advanced-tool-use-2025-11-20")
479
+ elif ttype == "mcp_toolset":
480
+ is_mcp = True
481
+ betas.add("mcp-client-2025-11-20")
482
+ elif ttype.startswith("computer_"):
483
+ betas.add("computer-use-2025-01-24")
484
+ if is_mcp and mcp_servers:
485
+ betas.add("mcp-client-2025-11-20")
486
+ return betas
487
+
488
+ # -------------------------- Tool Result helpers (Anthropic Computer Use) -------------------------- #
489
+
490
+ def _build_tool_result_messages_if_needed(
491
+ self,
492
+ model: ModelItem,
493
+ history: Optional[List[CtxItem]],
494
+ attachments: Optional[Dict[str, AttachmentItem]],
495
+ ) -> Optional[List[dict]]:
496
+ """
497
+ If the previous assistant turn emitted tool_use (computer) and the plugin produced tool_output,
498
+ construct a minimal continuation with tool_result content block(s), as required by Anthropic.
499
+
500
+ Returns a messages list or None.
501
+ """
502
+ if not self.window.core.config.get('use_context'):
503
+ return None
504
+
505
+ items: List[CtxItem] = self.window.core.ctx.get_history(
506
+ history,
507
+ model.id,
508
+ MODE_CHAT,
509
+ self.window.core.tokens.from_user("", ""),
510
+ self._fit_ctx(model),
511
+ )
512
+ if not items:
513
+ return None
514
+
515
+ last = items[-1]
516
+ if not isinstance(getattr(last, "extra", None), dict):
517
+ return None
518
+
519
+ tool_uses = last.extra.get("anthropic_tool_uses") or []
520
+ tool_output = last.extra.get("tool_output") or []
521
+ if not tool_uses or not tool_output:
522
+ return None
523
+
524
+ # Build prior user message (the one that triggered the tool use)
525
+ prior_user_text = ""
526
+ if len(items) >= 2 and getattr(items[-2], "final_input", None):
527
+ prior_user_text = str(items[-2].final_input)
528
+ elif getattr(last, "input", None):
529
+ prior_user_text = str(last.input)
530
+
531
+ user_msg_1 = None
532
+ if prior_user_text:
533
+ user_msg_1 = {"role": "user", "content": [{"type": "text", "text": prior_user_text}]}
534
+
535
+ # Recreate assistant tool_use block(s)
536
+ assistant_parts: List[dict] = []
537
+ for tu in tool_uses:
538
+ tid = str(tu.get("id", "") or "")
539
+ name = str(tu.get("name", "") or "computer")
540
+ inp = tu.get("input", {}) or {}
541
+ assistant_parts.append({
542
+ "type": "tool_use",
543
+ "id": tid,
544
+ "name": name,
545
+ "input": inp,
546
+ })
547
+ assistant_msg = {"role": "assistant", "content": assistant_parts} if assistant_parts else None
548
+
549
+ # Build tool_result with last tool output; attach screenshot images (if any) as additional blocks
550
+ result_text = self._best_tool_result_text(tool_output)
551
+ last_tool_use_id = str(tool_uses[-1].get("id", "") or "")
552
+
553
+ tool_result_block = {
554
+ "type": "tool_result",
555
+ "tool_use_id": last_tool_use_id,
556
+ "content": [{"type": "text", "text": result_text}],
557
+ }
558
+
559
+ # Convert current attachments to image blocks and append after tool_result in the same user message
560
+ image_blocks: List[dict] = []
561
+ if attachments:
562
+ img_parts = self.window.core.api.anthropic.vision.build_blocks("", attachments)
563
+ for part in img_parts:
564
+ if isinstance(part, dict) and part.get("type") in ("image", "input_image", "document"):
565
+ image_blocks.append(part)
566
+
567
+ user_msg_2 = {"role": "user", "content": [tool_result_block] + image_blocks}
568
+
569
+ out: List[dict] = []
570
+ if user_msg_1:
571
+ out.append(user_msg_1)
572
+ if assistant_msg:
573
+ out.append(assistant_msg)
574
+ out.append(user_msg_2)
575
+ return out
576
+
577
+ @staticmethod
578
+ def _best_tool_result_text(tool_output: List[dict]) -> str:
579
+ """
580
+ Build a compact text payload from plugin tool_output list.
581
+ Prefers the last item with 'result' field; falls back to the last dict JSON.
582
+ """
583
+ if not tool_output:
584
+ return "ok"
585
+ last = tool_output[-1]
586
+ try:
587
+ if isinstance(last, dict):
588
+ if "result" in last:
589
+ val = last["result"]
590
+ if isinstance(val, (dict, list)):
591
+ return json.dumps(val, ensure_ascii=False)
592
+ return str(val)
593
+ return json.dumps(last, ensure_ascii=False)
594
+ return str(last)
595
+ except Exception:
596
+ return "ok"
597
+
598
+ # -------------------------- Files download helpers -------------------------- #
599
+
600
+ def _maybe_download_files_from_response(self, response: Message, ctx: CtxItem) -> None:
601
+ """
602
+ Scan response content blocks for Files API file_ids and download them.
603
+ Works for code execution tool results that output files.
604
+ """
605
+ file_ids: List[str] = []
606
+
607
+ def _to_plain(obj):
608
+ try:
609
+ if hasattr(obj, "model_dump"):
610
+ return obj.model_dump()
611
+ if hasattr(obj, "to_dict"):
612
+ return obj.to_dict()
613
+ except Exception:
614
+ pass
615
+ if isinstance(obj, dict):
616
+ return {k: _to_plain(v) for k, v in obj.items()}
617
+ if isinstance(obj, (list, tuple)):
618
+ return [_to_plain(x) for x in obj]
619
+ return obj
620
+
621
+ def _walk(o):
622
+ if o is None:
623
+ return
624
+ if isinstance(o, dict):
625
+ for k, v in o.items():
626
+ if k == "file_id" and isinstance(v, str) and v.startswith("file_"):
627
+ if v not in file_ids:
628
+ file_ids.append(v)
629
+ else:
630
+ _walk(v)
631
+ elif isinstance(o, (list, tuple)):
632
+ for it in o:
633
+ _walk(it)
634
+
635
+ try:
636
+ for blk in getattr(response, "content", []) or []:
637
+ btype = getattr(blk, "type", "") or ""
638
+ # code_execution results appear as *_tool_result with nested 'content'
639
+ if btype.endswith("_tool_result"):
640
+ content = getattr(blk, "content", None)
641
+ _walk(_to_plain(content))
642
+ except Exception:
643
+ pass
644
+
645
+ if not file_ids:
646
+ return
647
+
648
+ saved: List[str] = []
649
+ for fid in file_ids:
650
+ try:
651
+ path = self.window.core.api.anthropic.store.download_to_dir(fid)
652
+ if path:
653
+ saved.append(path)
654
+ except Exception:
655
+ continue
656
+
657
+ if saved:
658
+ saved = self.window.core.filesystem.make_local_list(saved)
659
+ if not isinstance(ctx.files, list):
660
+ ctx.files = []
661
+ for p in saved:
662
+ if p not in ctx.files:
663
+ ctx.files.append(p)
664
+ images = []
665
+ for path in saved:
666
+ ext = os.path.splitext(path)[1].lower().lstrip(".")
667
+ if ext in ["png", "jpg", "jpeg", "gif", "bmp", "tiff", "webp"]:
668
+ images.append(path)
669
+ if images:
670
+ if not isinstance(ctx.images, list):
671
+ ctx.images = []
672
+ ctx.images += images