pygpt-net 2.7.5__py3-none-any.whl → 2.7.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (82) hide show
  1. pygpt_net/CHANGELOG.txt +14 -0
  2. pygpt_net/__init__.py +4 -4
  3. pygpt_net/controller/chat/remote_tools.py +3 -9
  4. pygpt_net/controller/chat/stream.py +2 -2
  5. pygpt_net/controller/chat/{handler/worker.py → stream_worker.py} +20 -64
  6. pygpt_net/controller/debug/fixtures.py +3 -2
  7. pygpt_net/controller/files/files.py +65 -4
  8. pygpt_net/core/debug/models.py +2 -2
  9. pygpt_net/core/filesystem/url.py +4 -1
  10. pygpt_net/core/render/web/body.py +3 -2
  11. pygpt_net/core/types/chunk.py +27 -0
  12. pygpt_net/data/config/config.json +14 -4
  13. pygpt_net/data/config/models.json +192 -4
  14. pygpt_net/data/config/settings.json +126 -36
  15. pygpt_net/data/js/app/template.js +1 -1
  16. pygpt_net/data/js/app.min.js +2 -2
  17. pygpt_net/data/locale/locale.de.ini +5 -0
  18. pygpt_net/data/locale/locale.en.ini +35 -8
  19. pygpt_net/data/locale/locale.es.ini +5 -0
  20. pygpt_net/data/locale/locale.fr.ini +5 -0
  21. pygpt_net/data/locale/locale.it.ini +5 -0
  22. pygpt_net/data/locale/locale.pl.ini +5 -0
  23. pygpt_net/data/locale/locale.uk.ini +5 -0
  24. pygpt_net/data/locale/locale.zh.ini +5 -0
  25. pygpt_net/data/locale/plugin.cmd_mouse_control.en.ini +2 -2
  26. pygpt_net/item/ctx.py +3 -5
  27. pygpt_net/js_rc.py +2449 -2447
  28. pygpt_net/plugin/cmd_mouse_control/config.py +8 -7
  29. pygpt_net/plugin/cmd_mouse_control/plugin.py +3 -4
  30. pygpt_net/plugin/cmd_mouse_control/worker.py +2 -1
  31. pygpt_net/plugin/cmd_mouse_control/worker_sandbox.py +2 -1
  32. pygpt_net/provider/api/anthropic/__init__.py +16 -9
  33. pygpt_net/provider/api/anthropic/chat.py +259 -11
  34. pygpt_net/provider/api/anthropic/computer.py +844 -0
  35. pygpt_net/provider/api/anthropic/remote_tools.py +172 -0
  36. pygpt_net/{controller/chat/handler/anthropic_stream.py → provider/api/anthropic/stream.py} +24 -10
  37. pygpt_net/provider/api/anthropic/tools.py +32 -77
  38. pygpt_net/provider/api/anthropic/utils.py +30 -0
  39. pygpt_net/provider/api/google/__init__.py +6 -5
  40. pygpt_net/provider/api/google/chat.py +3 -8
  41. pygpt_net/{controller/chat/handler/google_stream.py → provider/api/google/stream.py} +1 -1
  42. pygpt_net/provider/api/google/utils.py +185 -0
  43. pygpt_net/{controller/chat/handler → provider/api/langchain}/__init__.py +0 -0
  44. pygpt_net/{controller/chat/handler/langchain_stream.py → provider/api/langchain/stream.py} +1 -1
  45. pygpt_net/provider/api/llama_index/__init__.py +0 -0
  46. pygpt_net/{controller/chat/handler/llamaindex_stream.py → provider/api/llama_index/stream.py} +1 -1
  47. pygpt_net/provider/api/openai/__init__.py +7 -3
  48. pygpt_net/provider/api/openai/image.py +2 -2
  49. pygpt_net/provider/api/openai/responses.py +0 -0
  50. pygpt_net/{controller/chat/handler/openai_stream.py → provider/api/openai/stream.py} +1 -1
  51. pygpt_net/provider/api/openai/utils.py +69 -3
  52. pygpt_net/provider/api/x_ai/__init__.py +117 -17
  53. pygpt_net/provider/api/x_ai/chat.py +272 -102
  54. pygpt_net/provider/api/x_ai/image.py +149 -47
  55. pygpt_net/provider/api/x_ai/{remote.py → remote_tools.py} +165 -70
  56. pygpt_net/provider/api/x_ai/responses.py +507 -0
  57. pygpt_net/provider/api/x_ai/stream.py +715 -0
  58. pygpt_net/provider/api/x_ai/tools.py +59 -8
  59. pygpt_net/{controller/chat/handler → provider/api/x_ai}/utils.py +1 -2
  60. pygpt_net/provider/api/x_ai/vision.py +1 -4
  61. pygpt_net/provider/core/config/patch.py +22 -1
  62. pygpt_net/provider/core/model/patch.py +26 -1
  63. pygpt_net/tools/image_viewer/ui/dialogs.py +300 -13
  64. pygpt_net/tools/text_editor/ui/dialogs.py +3 -2
  65. pygpt_net/tools/text_editor/ui/widgets.py +5 -1
  66. pygpt_net/ui/base/context_menu.py +44 -1
  67. pygpt_net/ui/layout/toolbox/indexes.py +22 -19
  68. pygpt_net/ui/layout/toolbox/model.py +28 -5
  69. pygpt_net/ui/widget/dialog/base.py +16 -5
  70. pygpt_net/ui/widget/image/display.py +25 -8
  71. pygpt_net/ui/widget/tabs/output.py +9 -1
  72. pygpt_net/ui/widget/textarea/editor.py +14 -1
  73. pygpt_net/ui/widget/textarea/input.py +20 -7
  74. pygpt_net/ui/widget/textarea/notepad.py +24 -1
  75. pygpt_net/ui/widget/textarea/output.py +23 -1
  76. pygpt_net/ui/widget/textarea/web.py +16 -1
  77. {pygpt_net-2.7.5.dist-info → pygpt_net-2.7.7.dist-info}/METADATA +16 -2
  78. {pygpt_net-2.7.5.dist-info → pygpt_net-2.7.7.dist-info}/RECORD +80 -73
  79. pygpt_net/controller/chat/handler/xai_stream.py +0 -135
  80. {pygpt_net-2.7.5.dist-info → pygpt_net-2.7.7.dist-info}/LICENSE +0 -0
  81. {pygpt_net-2.7.5.dist-info → pygpt_net-2.7.7.dist-info}/WHEEL +0 -0
  82. {pygpt_net-2.7.5.dist-info → pygpt_net-2.7.7.dist-info}/entry_points.txt +0 -0
@@ -136,7 +136,7 @@ class Config(BaseConfig):
136
136
  "sandbox_path",
137
137
  type="text",
138
138
  value="",
139
- label="Sandbox: Playwright browsers path",
139
+ label="Browsers directory",
140
140
  description="Path to Playwright browsers installation - leave empty to use default",
141
141
  tab="Sandbox (Playwright)"
142
142
  )
@@ -144,7 +144,7 @@ class Config(BaseConfig):
144
144
  "sandbox_engine",
145
145
  type="text",
146
146
  value="chromium",
147
- label="Sandbox: Playwright engine (chromium|firefox|webkit)",
147
+ label="Engine",
148
148
  description="Playwright browser engine to use (chromium, firefox, webkit) - must be installed",
149
149
  tab="Sandbox (Playwright)"
150
150
  )
@@ -152,7 +152,7 @@ class Config(BaseConfig):
152
152
  "sandbox_headless",
153
153
  type="bool",
154
154
  value=False,
155
- label="Sandbox: headless mode",
155
+ label="Headless mode",
156
156
  description="Run Playwright browser in headless mode (default: False)",
157
157
  tab="Sandbox (Playwright)"
158
158
  )
@@ -160,14 +160,15 @@ class Config(BaseConfig):
160
160
  "sandbox_args",
161
161
  type="textarea",
162
162
  value="--disable-extensions,\n--disable-file-system",
163
- label="Sandbox: Playwright browsers args",
163
+ label="Browser args",
164
164
  description="Additional Playwright browser arguments (comma-separated)",
165
+ tab="Sandbox (Playwright)"
165
166
  )
166
167
  plugin.add_option(
167
168
  "sandbox_home",
168
169
  type="text",
169
170
  value="https://duckduckgo.com",
170
- label="Sandbox: home URL",
171
+ label="Home URL",
171
172
  description="Playwright browser home URL",
172
173
  tab="Sandbox (Playwright)"
173
174
  )
@@ -175,7 +176,7 @@ class Config(BaseConfig):
175
176
  "sandbox_viewport_w",
176
177
  type="int",
177
178
  value=1440,
178
- label="Sandbox: viewport width",
179
+ label="Viewport width",
179
180
  description="Playwright viewport width in pixels",
180
181
  tab="Sandbox (Playwright)"
181
182
  )
@@ -183,7 +184,7 @@ class Config(BaseConfig):
183
184
  "sandbox_viewport_h",
184
185
  type="int",
185
186
  value=900,
186
- label="Sandbox: viewport height",
187
+ label="Viewport height",
187
188
  description="Playwright viewport height in pixels",
188
189
  tab="Sandbox (Playwright)"
189
190
  )
@@ -223,7 +223,6 @@ class Plugin(BasePlugin):
223
223
  and response["result"]["no_screenshot"]):
224
224
  with_screenshot = False
225
225
  if ctx is not None:
226
- print("APPEND RESPONSE", response)
227
226
  self.prepare_reply_ctx(response, ctx)
228
227
  ctx.reply = True
229
228
  self.handle_delayed(ctx, with_screenshot)
@@ -356,14 +355,14 @@ class Plugin(BasePlugin):
356
355
  f"1) Please install Playwright browser(s) on host machine: \n\n"
357
356
  f"pip install playwright && playwright install {engine}\n\n"
358
357
  f"2) Set path to browsers directory in `Mouse And Keyboard` plugin settings option: "
359
- f"`Sandbox: Playwright browsers path`.")
358
+ f"`Sandbox (Playwright): Browsers directory`.")
360
359
  else:
361
360
  if os.environ.get("APPIMAGE") and not cfg_path: # set path is required for AppImage version
362
- err_msg = (f"Playwright browsers path is not set - "
361
+ err_msg = (f"Playwright browsers path is not set:\n\n "
363
362
  f"1) Please install Playwright browser(s) on host machine: \n\n"
364
363
  f"pip install playwright && playwright install {engine}\n\n"
365
364
  f"2) Set path to browsers directory in `Mouse And Keyboard` plugin settings option: "
366
- f"`Sandbox: Playwright browsers path`.")
365
+ f"`Sandbox (Playwright): Browsers directory`.")
367
366
 
368
367
  if err_msg is not None:
369
368
  self.error(err_msg)
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczyglinski #
9
- # Updated Date: 2026.01.02 02:00:00 #
9
+ # Updated Date: 2026.01.05 20:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import sys
@@ -782,6 +782,7 @@ class Worker(BaseWorker):
782
782
  mouse = MouseController()
783
783
  mouse_pos_x, mouse_pos_y = mouse.position
784
784
  return {
785
+ "result": "success",
785
786
  'current_step': current_step,
786
787
  'screen_w': screen_x,
787
788
  'screen_h': screen_y,
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2026.01.02 02:00:00 #
9
+ # Updated Date: 2026.01.05 20:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import time
@@ -231,6 +231,7 @@ class Worker(BaseWorker):
231
231
  current_step = self.get_param(item, "current_step", "")
232
232
  screen_w, screen_h = self.viewport_w, self.viewport_h
233
233
  return {
234
+ "result": "success",
234
235
  "current_step": current_step,
235
236
  "screen_w": screen_w,
236
237
  "screen_h": screen_h,
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.09.15 01:00:00 #
9
+ # Updated Date: 2026.01.05 20:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Optional, Dict, Any
@@ -18,9 +18,10 @@ from pygpt_net.core.types import (
18
18
  MODE_CHAT,
19
19
  MODE_COMPLETION,
20
20
  MODE_IMAGE,
21
- MODE_RESEARCH,
21
+ MODE_RESEARCH, MODE_COMPUTER,
22
22
  )
23
23
  from pygpt_net.core.bridge.context import BridgeContext
24
+ from pygpt_net.core.types.chunk import ChunkType
24
25
  from pygpt_net.item.model import ModelItem
25
26
 
26
27
  from .chat import Chat
@@ -28,6 +29,8 @@ from .tools import Tools
28
29
  from .vision import Vision
29
30
  from .audio import Audio
30
31
  from .image import Image
32
+ from .remote_tools import RemoteTools
33
+ from .computer import Computer
31
34
 
32
35
 
33
36
  class ApiAnthropic:
@@ -43,6 +46,8 @@ class ApiAnthropic:
43
46
  self.vision = Vision(window)
44
47
  self.audio = Audio(window) # stub helpers (no official audio out/in in SDK as of now)
45
48
  self.image = Image(window) # stub: no image generation in Anthropic
49
+ self.remote_tools = RemoteTools(window)
50
+ self.computer = Computer(window)
46
51
  self.client: Optional[anthropic.Anthropic] = None
47
52
  self.locked = False
48
53
  self.last_client_args: Optional[Dict[str, Any]] = None
@@ -94,15 +99,17 @@ class ApiAnthropic:
94
99
  stream = context.stream
95
100
  ctx = context.ctx
96
101
  ai_name = ctx.output_name if ctx else "assistant"
97
-
98
- # Anthropic: no Responses API; stream events are custom to Anthropic
99
- if ctx:
100
- ctx.use_responses_api = False
101
-
102
102
  used_tokens = 0
103
103
  response = None
104
-
105
- if mode in (MODE_COMPLETION, MODE_CHAT, MODE_AUDIO, MODE_RESEARCH):
104
+ ctx.chunk_type = ChunkType.ANTHROPIC
105
+
106
+ if mode in (
107
+ MODE_COMPLETION,
108
+ MODE_CHAT,
109
+ MODE_AUDIO,
110
+ MODE_RESEARCH,
111
+ MODE_COMPUTER
112
+ ):
106
113
  # MODE_AUDIO fallback: treat as normal chat (no native audio API)
107
114
  response = self.chat.send(context=context, extra=extra)
108
115
  used_tokens = self.chat.get_used_tokens()
@@ -6,12 +6,13 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.09.05 01:00:00 #
9
+ # Updated Date: 2026.01.05 20:00:00 #
10
10
  # ================================================== #
11
11
 
12
- from typing import Optional, Dict, Any, List
12
+ import json
13
+ from typing import Optional, Dict, Any, List, Set
13
14
 
14
- from pygpt_net.core.types import MODE_CHAT, MODE_AUDIO
15
+ from pygpt_net.core.types import MODE_CHAT, MODE_AUDIO, MODE_COMPUTER
15
16
  from pygpt_net.core.bridge.context import BridgeContext, MultimodalContext
16
17
  from pygpt_net.item.attachment import AttachmentItem
17
18
  from pygpt_net.item.ctx import CtxItem
@@ -51,20 +52,43 @@ class Chat:
51
52
  api = self.window.core.api.anthropic
52
53
  client: anthropic.Anthropic = api.get_client(context.mode, model)
53
54
 
54
- msgs = self.build_input(
55
- prompt=prompt,
56
- system_prompt=system_prompt,
55
+ # Tool result turn (Computer Use): if previous assistant emitted tool_use and we have tool output,
56
+ # build a minimal message triplet: [user(prompt that triggered tool), assistant(tool_use), user(tool_result + images)].
57
+ tool_result_messages = self._build_tool_result_messages_if_needed(
57
58
  model=model,
58
59
  history=context.history,
59
60
  attachments=attachments,
60
- multimodal_ctx=multimodal_ctx,
61
61
  )
62
+ if tool_result_messages is not None:
63
+ msgs = tool_result_messages
64
+ else:
65
+ msgs = self.build_input(
66
+ prompt=prompt,
67
+ system_prompt=system_prompt,
68
+ model=model,
69
+ history=context.history,
70
+ attachments=attachments,
71
+ multimodal_ctx=multimodal_ctx,
72
+ )
62
73
 
63
74
  self.reset_tokens()
64
75
  count_msgs = self._build_count_messages(prompt, system_prompt, model, context.history)
65
76
  self.input_tokens += self.window.core.tokens.from_messages(count_msgs, model.id)
66
77
 
78
+ # Build tools (client tools + Anthropic server tools)
67
79
  tools = api.tools.get_all_tools(model, functions)
80
+
81
+ # Enable Computer Use tool in computer mode (use the official Tool/ComputerUse object)
82
+ if mode == MODE_COMPUTER or (model and isinstance(model.id, str) and "computer-use" in model.id.lower()):
83
+ tool = self.window.core.api.anthropic.computer.get_tool()
84
+ tools = [tool] # reset tools to only Computer Use (multiple tools not supported together)
85
+
86
+ # MCP: servers from config
87
+ mcp_servers = self._load_mcp_servers_from_cfg()
88
+
89
+ # Compute betas required by selected server tools or MCP
90
+ betas = self._compute_required_betas(tools, mcp_servers)
91
+
68
92
  max_tokens = context.max_tokens if context.max_tokens else 1024
69
93
  temperature = self.window.core.config.get('temperature')
70
94
  top_p = self.window.core.config.get('top_p')
@@ -83,14 +107,25 @@ class Chat:
83
107
  params["top_p"] = top_p
84
108
  if tools: # only include when non-empty list
85
109
  params["tools"] = tools # must be a valid list per API
110
+ if mcp_servers:
111
+ params["mcp_servers"] = mcp_servers # MCP connector servers per docs
86
112
 
87
113
  if mode == MODE_AUDIO:
88
114
  stream = False # no native TTS
89
115
 
116
+ # Decide whether to call stable or beta endpoint
117
+ use_beta = len(betas) > 0
118
+
90
119
  if stream:
91
- return client.messages.create(stream=True, **params)
120
+ if use_beta:
121
+ return client.beta.messages.create(stream=True, betas=list(betas), **params)
122
+ else:
123
+ return client.messages.create(stream=True, **params)
92
124
  else:
93
- return client.messages.create(**params)
125
+ if use_beta:
126
+ return client.beta.messages.create(betas=list(betas), **params)
127
+ else:
128
+ return client.messages.create(**params)
94
129
 
95
130
  def unpack_response(self, mode: str, response: Message, ctx: CtxItem):
96
131
  """
@@ -115,7 +150,19 @@ class Chat:
115
150
  ctx.set_tokens(p, c)
116
151
  if not isinstance(ctx.extra, dict):
117
152
  ctx.extra = {}
118
- ctx.extra["usage"] = {"vendor": "anthropic", "input_tokens": p, "output_tokens": c}
153
+ # include server_tool_use counts if present
154
+ server_tool_use = {}
155
+ if hasattr(usage, "server_tool_use"):
156
+ try:
157
+ server_tool_use = dict(getattr(usage, "server_tool_use"))
158
+ except Exception:
159
+ server_tool_use = {}
160
+ ctx.extra["usage"] = {
161
+ "vendor": "anthropic",
162
+ "input_tokens": p,
163
+ "output_tokens": c,
164
+ "server_tool_use": server_tool_use,
165
+ }
119
166
  except Exception:
120
167
  pass
121
168
 
@@ -125,6 +172,12 @@ class Chat:
125
172
  except Exception:
126
173
  pass
127
174
 
175
+ # Collect fetched URLs from web_fetch_tool_result blocks
176
+ try:
177
+ self._collect_web_fetch_urls(response, ctx)
178
+ except Exception:
179
+ pass
180
+
128
181
  def extract_text(self, response: Message) -> str:
129
182
  """
130
183
  Extract text from response content blocks.
@@ -210,6 +263,39 @@ class Chat:
210
263
  if u not in ctx.urls:
211
264
  ctx.urls.append(u)
212
265
 
266
+ def _collect_web_fetch_urls(self, response: Message, ctx: CtxItem):
267
+ """
268
+ Collect URLs from web_fetch_tool_result blocks and attach to ctx.urls.
269
+
270
+ :param response: Message response from API
271
+ :param ctx: CtxItem to update
272
+ """
273
+ urls: List[str] = []
274
+ try:
275
+ for blk in getattr(response, "content", []) or []:
276
+ if getattr(blk, "type", "") == "web_fetch_tool_result":
277
+ content = getattr(blk, "content", {}) or {}
278
+ if isinstance(content, dict):
279
+ if content.get("type") == "web_fetch_result":
280
+ u = (content.get("url") or "").strip()
281
+ if u.startswith("http://") or u.startswith("https://"):
282
+ urls.append(u)
283
+ # citations may embed multiple URLs
284
+ if content.get("type") == "web_fetch_result" and isinstance(content.get("citations"), list):
285
+ for cit in content["citations"]:
286
+ u = (cit.get("url") or "").strip()
287
+ if u.startswith("http://") or u.startswith("https://"):
288
+ urls.append(u)
289
+ except Exception:
290
+ pass
291
+
292
+ if urls:
293
+ if ctx.urls is None:
294
+ ctx.urls = []
295
+ for u in urls:
296
+ if u not in ctx.urls:
297
+ ctx.urls.append(u)
298
+
213
299
  def build_input(
214
300
  self,
215
301
  prompt: str,
@@ -271,6 +357,7 @@ class Chat:
271
357
  if attachments:
272
358
  img_parts = self.window.core.api.anthropic.vision.build_blocks(content, attachments)
273
359
  parts.extend(img_parts)
360
+ content = "" # image-first; do not duplicate text if build_blocks already added it
274
361
  if content:
275
362
  parts.append({"type": "text", "text": str(content)})
276
363
 
@@ -338,4 +425,165 @@ class Chat:
338
425
 
339
426
  :return: used input tokens
340
427
  """
341
- return self.input_tokens
428
+ return self.input_tokens
429
+
430
+ def _load_mcp_servers_from_cfg(self) -> List[dict]:
431
+ """
432
+ Load MCP servers definition from config JSON string(s).
433
+
434
+ Supports keys:
435
+ - remote_tools.anthropic.mcp.mcp_servers
436
+
437
+ :return: list of MCP servers dicts
438
+ """
439
+ cfg = self.window.core.config
440
+ enabled = cfg.get("remote_tools.anthropic.mcp", False)
441
+ if not enabled:
442
+ return [] # do not load if MCP is disabled
443
+ raw = cfg.get("remote_tools.anthropic.mcp.mcp_servers")
444
+ if not raw:
445
+ return []
446
+ try:
447
+ if isinstance(raw, (list, dict)):
448
+ # ensure list
449
+ return list(raw) if isinstance(raw, list) else [raw]
450
+ return json.loads(raw) or []
451
+ except Exception:
452
+ return []
453
+
454
+ def _compute_required_betas(self, tools: List[dict], mcp_servers: List[dict]) -> Set[str]:
455
+ """
456
+ Compute required beta headers for enabled server tools.
457
+
458
+ :param tools: Final tools list
459
+ :param mcp_servers: MCP servers list
460
+ :return: set of beta header strings
461
+ """
462
+ betas: Set[str] = set()
463
+ is_mcp = False
464
+ for t in tools or []:
465
+ ttype = str(t.get("type") or "")
466
+ if ttype.startswith("web_fetch_"):
467
+ betas.add("web-fetch-2025-09-10")
468
+ elif ttype.startswith("code_execution_"):
469
+ betas.add("code-execution-2025-08-25")
470
+ elif ttype in ("tool_search_tool_regex_20251119", "tool_search_tool_bm25_20251119"):
471
+ betas.add("advanced-tool-use-2025-11-20")
472
+ elif ttype == "mcp_toolset":
473
+ is_mcp = True
474
+ betas.add("mcp-client-2025-11-20")
475
+ elif ttype.startswith("computer_"):
476
+ betas.add("computer-use-2025-01-24")
477
+ if is_mcp and mcp_servers:
478
+ betas.add("mcp-client-2025-11-20")
479
+ return betas
480
+
481
+ # -------------------------- Tool Result helpers (Anthropic Computer Use) -------------------------- #
482
+
483
+ def _build_tool_result_messages_if_needed(
484
+ self,
485
+ model: ModelItem,
486
+ history: Optional[List[CtxItem]],
487
+ attachments: Optional[Dict[str, AttachmentItem]],
488
+ ) -> Optional[List[dict]]:
489
+ """
490
+ If the previous assistant turn emitted tool_use (computer) and the plugin produced tool_output,
491
+ construct a minimal continuation with tool_result content block(s), as required by Anthropic.
492
+
493
+ Returns a messages list or None.
494
+ """
495
+ if not self.window.core.config.get('use_context'):
496
+ return None
497
+
498
+ items: List[CtxItem] = self.window.core.ctx.get_history(
499
+ history,
500
+ model.id,
501
+ MODE_CHAT,
502
+ self.window.core.tokens.from_user("", ""),
503
+ self._fit_ctx(model),
504
+ )
505
+ if not items:
506
+ return None
507
+
508
+ last = items[-1]
509
+ if not isinstance(getattr(last, "extra", None), dict):
510
+ return None
511
+
512
+ tool_uses = last.extra.get("anthropic_tool_uses") or []
513
+ tool_output = last.extra.get("tool_output") or []
514
+ if not tool_uses or not tool_output:
515
+ return None
516
+
517
+ # Build prior user message (the one that triggered the tool use)
518
+ prior_user_text = ""
519
+ if len(items) >= 2 and getattr(items[-2], "final_input", None):
520
+ prior_user_text = str(items[-2].final_input)
521
+ elif getattr(last, "input", None):
522
+ prior_user_text = str(last.input)
523
+
524
+ user_msg_1 = None
525
+ if prior_user_text:
526
+ user_msg_1 = {"role": "user", "content": [{"type": "text", "text": prior_user_text}]}
527
+
528
+ # Recreate assistant tool_use block(s)
529
+ assistant_parts: List[dict] = []
530
+ for tu in tool_uses:
531
+ tid = str(tu.get("id", "") or "")
532
+ name = str(tu.get("name", "") or "computer")
533
+ inp = tu.get("input", {}) or {}
534
+ assistant_parts.append({
535
+ "type": "tool_use",
536
+ "id": tid,
537
+ "name": name,
538
+ "input": inp,
539
+ })
540
+ assistant_msg = {"role": "assistant", "content": assistant_parts} if assistant_parts else None
541
+
542
+ # Build tool_result with last tool output; attach screenshot images (if any) as additional blocks
543
+ result_text = self._best_tool_result_text(tool_output)
544
+ last_tool_use_id = str(tool_uses[-1].get("id", "") or "")
545
+
546
+ tool_result_block = {
547
+ "type": "tool_result",
548
+ "tool_use_id": last_tool_use_id,
549
+ "content": [{"type": "text", "text": result_text}],
550
+ }
551
+
552
+ # Convert current attachments to image blocks and append after tool_result in the same user message
553
+ image_blocks: List[dict] = []
554
+ if attachments:
555
+ img_parts = self.window.core.api.anthropic.vision.build_blocks("", attachments)
556
+ for part in img_parts:
557
+ if isinstance(part, dict) and part.get("type") in ("image", "input_image", "document"):
558
+ image_blocks.append(part)
559
+
560
+ user_msg_2 = {"role": "user", "content": [tool_result_block] + image_blocks}
561
+
562
+ out: List[dict] = []
563
+ if user_msg_1:
564
+ out.append(user_msg_1)
565
+ if assistant_msg:
566
+ out.append(assistant_msg)
567
+ out.append(user_msg_2)
568
+ return out
569
+
570
+ @staticmethod
571
+ def _best_tool_result_text(tool_output: List[dict]) -> str:
572
+ """
573
+ Build a compact text payload from plugin tool_output list.
574
+ Prefers the last item with 'result' field; falls back to the last dict JSON.
575
+ """
576
+ if not tool_output:
577
+ return "ok"
578
+ last = tool_output[-1]
579
+ try:
580
+ if isinstance(last, dict):
581
+ if "result" in last:
582
+ val = last["result"]
583
+ if isinstance(val, (dict, list)):
584
+ return json.dumps(val, ensure_ascii=False)
585
+ return str(val)
586
+ return json.dumps(last, ensure_ascii=False)
587
+ return str(last)
588
+ except Exception:
589
+ return "ok"