pygpt-net 2.6.36__py3-none-any.whl → 2.6.37__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. pygpt_net/CHANGELOG.txt +5 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/controller/chat/handler/anthropic_stream.py +166 -0
  4. pygpt_net/controller/chat/handler/google_stream.py +181 -0
  5. pygpt_net/controller/chat/handler/langchain_stream.py +24 -0
  6. pygpt_net/controller/chat/handler/llamaindex_stream.py +47 -0
  7. pygpt_net/controller/chat/handler/openai_stream.py +260 -0
  8. pygpt_net/controller/chat/handler/utils.py +210 -0
  9. pygpt_net/controller/chat/handler/worker.py +566 -0
  10. pygpt_net/controller/chat/handler/xai_stream.py +135 -0
  11. pygpt_net/controller/chat/stream.py +1 -1
  12. pygpt_net/controller/ctx/ctx.py +1 -1
  13. pygpt_net/controller/model/editor.py +3 -0
  14. pygpt_net/core/bridge/context.py +35 -35
  15. pygpt_net/core/bridge/worker.py +40 -16
  16. pygpt_net/core/render/web/body.py +29 -34
  17. pygpt_net/data/config/config.json +10 -3
  18. pygpt_net/data/config/models.json +3 -3
  19. pygpt_net/data/config/settings.json +105 -0
  20. pygpt_net/data/css/style.dark.css +2 -3
  21. pygpt_net/data/css/style.light.css +2 -3
  22. pygpt_net/data/locale/locale.de.ini +3 -1
  23. pygpt_net/data/locale/locale.en.ini +19 -1
  24. pygpt_net/data/locale/locale.es.ini +3 -1
  25. pygpt_net/data/locale/locale.fr.ini +3 -1
  26. pygpt_net/data/locale/locale.it.ini +3 -1
  27. pygpt_net/data/locale/locale.pl.ini +4 -2
  28. pygpt_net/data/locale/locale.uk.ini +3 -1
  29. pygpt_net/data/locale/locale.zh.ini +3 -1
  30. pygpt_net/provider/api/__init__.py +5 -3
  31. pygpt_net/provider/api/anthropic/__init__.py +190 -29
  32. pygpt_net/provider/api/anthropic/audio.py +30 -0
  33. pygpt_net/provider/api/anthropic/chat.py +341 -0
  34. pygpt_net/provider/api/anthropic/image.py +25 -0
  35. pygpt_net/provider/api/anthropic/tools.py +266 -0
  36. pygpt_net/provider/api/anthropic/vision.py +142 -0
  37. pygpt_net/provider/api/google/chat.py +2 -2
  38. pygpt_net/provider/api/google/tools.py +58 -48
  39. pygpt_net/provider/api/google/vision.py +7 -1
  40. pygpt_net/provider/api/openai/chat.py +1 -0
  41. pygpt_net/provider/api/openai/vision.py +6 -0
  42. pygpt_net/provider/api/x_ai/__init__.py +247 -0
  43. pygpt_net/provider/api/x_ai/audio.py +32 -0
  44. pygpt_net/provider/api/x_ai/chat.py +968 -0
  45. pygpt_net/provider/api/x_ai/image.py +208 -0
  46. pygpt_net/provider/api/x_ai/remote.py +262 -0
  47. pygpt_net/provider/api/x_ai/tools.py +120 -0
  48. pygpt_net/provider/api/x_ai/vision.py +119 -0
  49. pygpt_net/provider/core/config/patch.py +28 -0
  50. pygpt_net/provider/llms/anthropic.py +4 -2
  51. pygpt_net/ui/base/config_dialog.py +5 -11
  52. pygpt_net/ui/dialog/models.py +2 -4
  53. pygpt_net/ui/dialog/plugins.py +40 -43
  54. pygpt_net/ui/widget/element/labels.py +19 -3
  55. pygpt_net/ui/widget/textarea/web.py +1 -1
  56. {pygpt_net-2.6.36.dist-info → pygpt_net-2.6.37.dist-info}/METADATA +11 -6
  57. {pygpt_net-2.6.36.dist-info → pygpt_net-2.6.37.dist-info}/RECORD +60 -41
  58. pygpt_net/controller/chat/handler/stream_worker.py +0 -1136
  59. {pygpt_net-2.6.36.dist-info → pygpt_net-2.6.37.dist-info}/LICENSE +0 -0
  60. {pygpt_net-2.6.36.dist-info → pygpt_net-2.6.37.dist-info}/WHEEL +0 -0
  61. {pygpt_net-2.6.36.dist-info → pygpt_net-2.6.37.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,341 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.09.05 01:00:00 #
10
+ # ================================================== #
11
+
12
+ from typing import Optional, Dict, Any, List
13
+
14
+ from pygpt_net.core.types import MODE_CHAT, MODE_AUDIO
15
+ from pygpt_net.core.bridge.context import BridgeContext, MultimodalContext
16
+ from pygpt_net.item.attachment import AttachmentItem
17
+ from pygpt_net.item.ctx import CtxItem
18
+ from pygpt_net.item.model import ModelItem
19
+
20
+ import anthropic
21
+ from anthropic.types import Message
22
+
23
+
24
+ class Chat:
25
+ def __init__(self, window=None):
26
+ """
27
+ Anthropic chat / multimodal API wrapper.
28
+
29
+ :param window: Window instance
30
+ """
31
+ self.window = window
32
+ self.input_tokens = 0
33
+
34
+ def send(self, context: BridgeContext, extra: Optional[Dict[str, Any]] = None):
35
+ """
36
+ Call Anthropic Messages API for chat / multimodal.
37
+
38
+ :param context: BridgeContext
39
+ :param extra: Extra parameters (not used)
40
+ :return: Message or generator of Message (if streaming)
41
+ """
42
+ prompt = context.prompt
43
+ stream = context.stream
44
+ system_prompt = context.system_prompt
45
+ model = context.model
46
+ functions = context.external_functions
47
+ attachments = context.attachments
48
+ multimodal_ctx = context.multimodal_ctx
49
+ mode = context.mode
50
+ ctx = context.ctx or CtxItem()
51
+ api = self.window.core.api.anthropic
52
+ client: anthropic.Anthropic = api.get_client(context.mode, model)
53
+
54
+ msgs = self.build_input(
55
+ prompt=prompt,
56
+ system_prompt=system_prompt,
57
+ model=model,
58
+ history=context.history,
59
+ attachments=attachments,
60
+ multimodal_ctx=multimodal_ctx,
61
+ )
62
+
63
+ self.reset_tokens()
64
+ count_msgs = self._build_count_messages(prompt, system_prompt, model, context.history)
65
+ self.input_tokens += self.window.core.tokens.from_messages(count_msgs, model.id)
66
+
67
+ tools = api.tools.get_all_tools(model, functions)
68
+ max_tokens = context.max_tokens if context.max_tokens else 1024
69
+ temperature = self.window.core.config.get('temperature')
70
+ top_p = self.window.core.config.get('top_p')
71
+
72
+ params: Dict[str, Any] = {
73
+ "model": model.id,
74
+ "messages": msgs,
75
+ "max_tokens": max_tokens,
76
+ }
77
+ # Add optional fields only if provided
78
+ if system_prompt:
79
+ params["system"] = system_prompt # SDK expects string or blocks, not None
80
+ if temperature is not None:
81
+ params["temperature"] = temperature # keep as-is; upstream config controls the type
82
+ if top_p is not None:
83
+ params["top_p"] = top_p
84
+ if tools: # only include when non-empty list
85
+ params["tools"] = tools # must be a valid list per API
86
+
87
+ if mode == MODE_AUDIO:
88
+ stream = False # no native TTS
89
+
90
+ if stream:
91
+ return client.messages.create(stream=True, **params)
92
+ else:
93
+ return client.messages.create(**params)
94
+
95
+ def unpack_response(self, mode: str, response: Message, ctx: CtxItem):
96
+ """
97
+ Unpack non-streaming response and set context.
98
+
99
+ :param mode: Mode (chat/audio)
100
+ :param response: Message response from API
101
+ :param ctx: CtxItem to update
102
+ """
103
+ ctx.output = self.extract_text(response)
104
+
105
+ calls = self.extract_tool_calls(response)
106
+ if calls:
107
+ ctx.tool_calls = calls
108
+
109
+ # Usage
110
+ try:
111
+ usage = getattr(response, "usage", None)
112
+ if usage:
113
+ p = getattr(usage, "input_tokens", 0) or 0
114
+ c = getattr(usage, "output_tokens", 0) or 0
115
+ ctx.set_tokens(p, c)
116
+ if not isinstance(ctx.extra, dict):
117
+ ctx.extra = {}
118
+ ctx.extra["usage"] = {"vendor": "anthropic", "input_tokens": p, "output_tokens": c}
119
+ except Exception:
120
+ pass
121
+
122
+ # Collect web search citations (web_search_tool_result blocks)
123
+ try:
124
+ self._collect_web_search_urls(response, ctx)
125
+ except Exception:
126
+ pass
127
+
128
+ def extract_text(self, response: Message) -> str:
129
+ """
130
+ Extract text from response content blocks.
131
+
132
+ Join all text blocks into a single string.
133
+
134
+ :param response: Message response from API
135
+ :return: Extracted text
136
+ """
137
+ out: List[str] = []
138
+ try:
139
+ for blk in getattr(response, "content", []) or []:
140
+ if getattr(blk, "type", "") == "text" and getattr(blk, "text", None):
141
+ out.append(str(blk.text))
142
+ except Exception:
143
+ pass
144
+ return "".join(out).strip()
145
+
146
+ def extract_tool_calls(self, response: Message) -> List[dict]:
147
+ """
148
+ Extract tool_use blocks as app tool calls.
149
+
150
+ Each tool call is a dict with keys: id (str), type="function", function (dict with name and arguments).
151
+
152
+ :param response: Message response from API
153
+ :return: List of tool calls
154
+ """
155
+ out: List[dict] = []
156
+
157
+ def to_plain(obj):
158
+ try:
159
+ if hasattr(obj, "model_dump"):
160
+ return obj.model_dump()
161
+ if hasattr(obj, "to_dict"):
162
+ return obj.to_dict()
163
+ except Exception:
164
+ pass
165
+ if isinstance(obj, dict):
166
+ return {k: to_plain(v) for k, v in obj.items()}
167
+ if isinstance(obj, (list, tuple)):
168
+ return [to_plain(x) for x in obj]
169
+ return obj
170
+
171
+ try:
172
+ for blk in getattr(response, "content", []) or []:
173
+ if getattr(blk, "type", "") == "tool_use":
174
+ out.append({
175
+ "id": getattr(blk, "id", "") or "",
176
+ "type": "function",
177
+ "function": {
178
+ "name": getattr(blk, "name", "") or "",
179
+ "arguments": to_plain(getattr(blk, "input", {}) or {}),
180
+ }
181
+ })
182
+ except Exception:
183
+ pass
184
+ return out
185
+
186
+ def _collect_web_search_urls(self, response: Message, ctx: CtxItem):
187
+ """
188
+ Collect URLs from web_search_tool_result blocks and attach to ctx.urls.
189
+
190
+ :param response: Message response from API
191
+ :param ctx: CtxItem to update
192
+ """
193
+ urls: List[str] = []
194
+ try:
195
+ for blk in getattr(response, "content", []) or []:
196
+ if getattr(blk, "type", "") == "web_search_tool_result":
197
+ content = getattr(blk, "content", None) or []
198
+ for item in content:
199
+ if isinstance(item, dict) and item.get("type") == "web_search_result":
200
+ u = (item.get("url") or "").strip()
201
+ if u.startswith("http://") or u.startswith("https://"):
202
+ urls.append(u)
203
+ except Exception:
204
+ pass
205
+
206
+ if urls:
207
+ if ctx.urls is None:
208
+ ctx.urls = []
209
+ for u in urls:
210
+ if u not in ctx.urls:
211
+ ctx.urls.append(u)
212
+
213
+ def build_input(
214
+ self,
215
+ prompt: str,
216
+ system_prompt: str,
217
+ model: ModelItem,
218
+ history: Optional[List[CtxItem]] = None,
219
+ attachments: Optional[Dict[str, AttachmentItem]] = None,
220
+ multimodal_ctx: Optional[MultimodalContext] = None) -> List[dict]:
221
+ """
222
+ Build Anthropic messages list.
223
+
224
+ :param prompt: User prompt
225
+ :param system_prompt: System prompt
226
+ :param model: ModelItem
227
+ :param history: Optional list of CtxItem for context
228
+ :param attachments: Optional dict of attachments (id -> AttachmentItem)
229
+ :param multimodal_ctx: Optional MultimodalContext
230
+ :return: List of messages for API
231
+ """
232
+ messages: List[dict] = []
233
+
234
+ if self.window.core.config.get('use_context'):
235
+ items = self.window.core.ctx.get_history(
236
+ history,
237
+ model.id,
238
+ MODE_CHAT,
239
+ self.window.core.tokens.from_user(prompt, system_prompt),
240
+ self._fit_ctx(model),
241
+ )
242
+ for item in items:
243
+ if item.final_input:
244
+ messages.append({"role": "user", "content": str(item.final_input)})
245
+ if item.final_output:
246
+ messages.append({"role": "assistant", "content": str(item.final_output)})
247
+
248
+ parts = self._build_user_parts(
249
+ content=str(prompt or ""),
250
+ attachments=attachments,
251
+ multimodal_ctx=multimodal_ctx,
252
+ )
253
+ messages.append({"role": "user", "content": parts if parts else [{"type": "text", "text": str(prompt or "")}]})
254
+ return messages
255
+
256
+ def _build_user_parts(
257
+ self,
258
+ content: str,
259
+ attachments: Optional[Dict[str, AttachmentItem]] = None,
260
+ multimodal_ctx: Optional[MultimodalContext] = None) -> List[dict]:
261
+ """
262
+ Build user content blocks (image + text).
263
+
264
+ :param content: Text content
265
+ :param attachments: Optional dict of attachments (id -> AttachmentItem)
266
+ :param multimodal_ctx: Optional MultimodalContext
267
+ :return: List of content blocks
268
+ """
269
+ parts: List[dict] = []
270
+ self.window.core.api.anthropic.vision.reset()
271
+ if attachments:
272
+ img_parts = self.window.core.api.anthropic.vision.build_blocks(content, attachments)
273
+ parts.extend(img_parts)
274
+ if content:
275
+ parts.append({"type": "text", "text": str(content)})
276
+
277
+ # No input_audio supported in SDK at the time of writing
278
+ if multimodal_ctx and getattr(multimodal_ctx, "is_audio_input", False):
279
+ pass
280
+
281
+ return parts
282
+
283
+ def _fit_ctx(self, model: ModelItem) -> int:
284
+ """
285
+ Fit context length to model limits.
286
+
287
+ :param model: ModelItem
288
+ :return: Max context tokens
289
+ """
290
+ max_ctx_tokens = self.window.core.config.get('max_total_tokens')
291
+ if model and model.ctx and 0 < model.ctx < max_ctx_tokens:
292
+ max_ctx_tokens = model.ctx
293
+ return max_ctx_tokens
294
+
295
+ def _build_count_messages(
296
+ self,
297
+ prompt: str,
298
+ system_prompt: str,
299
+ model: ModelItem,
300
+ history: Optional[List[CtxItem]] = None) -> List[dict]:
301
+ """
302
+ Build messages for token counting (without attachments).
303
+
304
+ :param prompt: User prompt
305
+ :param system_prompt: System prompt
306
+ :param model: ModelItem
307
+ :param history: Optional list of CtxItem for context
308
+ :return: List of messages for token counting
309
+ """
310
+ messages = []
311
+ if system_prompt:
312
+ messages.append({"role": "system", "content": system_prompt})
313
+ if self.window.core.config.get('use_context'):
314
+ used_tokens = self.window.core.tokens.from_user(prompt, system_prompt)
315
+ items = self.window.core.ctx.get_history(
316
+ history,
317
+ model.id,
318
+ MODE_CHAT,
319
+ used_tokens,
320
+ self._fit_ctx(model),
321
+ )
322
+ for item in items:
323
+ if item.final_input:
324
+ messages.append({"role": "user", "content": str(item.final_input)})
325
+ if item.final_output:
326
+ messages.append({"role": "assistant", "content": str(item.final_output)})
327
+
328
+ messages.append({"role": "user", "content": str(prompt or "")})
329
+ return messages
330
+
331
+ def reset_tokens(self):
332
+ """Reset input tokens counter."""
333
+ self.input_tokens = 0
334
+
335
+ def get_used_tokens(self) -> int:
336
+ """
337
+ Get used input tokens count.
338
+
339
+ :return: used input tokens
340
+ """
341
+ return self.input_tokens
@@ -0,0 +1,25 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.09.05 01:00:00 #
10
+ # ================================================== #
11
+
12
+ from typing import Optional, Dict
13
+ from pygpt_net.core.bridge.context import BridgeContext
14
+
15
+
16
+ class Image:
17
+ def __init__(self, window=None):
18
+ self.window = window
19
+
20
+ def generate(self, context: BridgeContext, extra: Optional[Dict] = None, sync: bool = True) -> bool:
21
+ """
22
+ Anthropic does not support image generation; only vision input.
23
+ """
24
+ # Inform handlers that nothing was generated
25
+ return False
@@ -0,0 +1,266 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.09.05 01:00:00 #
10
+ # ================================================== #
11
+
12
+ import json
13
+ from typing import List, Any, Dict, Optional
14
+
15
+ from pygpt_net.item.model import ModelItem
16
+
17
+
18
+ class Tools:
19
+ def __init__(self, window=None):
20
+ """
21
+ Tools mapper for Anthropic Messages API.
22
+
23
+ :param window: Window instance
24
+ """
25
+ self.window = window
26
+
27
+ def _sanitize_schema(self, schema: Any) -> Any:
28
+ """
29
+ Sanitize JSON Schema dict for Anthropic input_schema.
30
+
31
+ - Remove unsupported or risky keywords.
32
+ - Normalize 'type'.
33
+ - Ensure properties/items recursively valid.
34
+
35
+ :param schema: JSON Schema (dict or list)
36
+ :return: Sanitized JSON Schema (dict)
37
+ """
38
+ # 1) entry point: if list, take the first element
39
+ if isinstance(schema, list):
40
+ # If it's a list of schemas/types, take the first one (after filtering out empty ones)
41
+ return self._sanitize_schema(schema[0]) if schema else {}
42
+
43
+ if not isinstance(schema, dict):
44
+ return schema
45
+
46
+ # 2) remove unsupported keys
47
+ banned = {
48
+ "unevaluatedProperties",
49
+ "$defs", "$ref", "$schema", "$id",
50
+ "examples", "readOnly", "writeOnly", "nullable",
51
+ "dependentSchemas", "dependentRequired",
52
+ "oneOf", "anyOf", "allOf", "patternProperties", "dependencies",
53
+ "additional_properties", # underscore
54
+ "additionalProperties", # camelCase
55
+ }
56
+ for k in list(schema.keys()):
57
+ if k in banned:
58
+ schema.pop(k, None)
59
+
60
+ # 3) normalize 'type'
61
+ t = schema.get("type")
62
+
63
+ # a) list of types -> take the first non-null
64
+ if isinstance(t, list):
65
+ t_no_null = [x for x in t if isinstance(x, str) and x.lower() != "null"]
66
+ schema["type"] = t_no_null[0] if t_no_null else "object"
67
+ t = schema["type"]
68
+
69
+ # b) if 'type' is not a string (e.g., dict), try to infer or remove it
70
+ if not isinstance(t, str):
71
+ if "properties" in schema:
72
+ schema["type"] = "object"
73
+ elif "items" in schema:
74
+ schema["type"] = "array"
75
+ elif "enum" in schema and isinstance(schema["enum"], list) and all(
76
+ isinstance(x, str) for x in schema["enum"]):
77
+ schema["type"] = "string"
78
+ else:
79
+ # no reasonable type — leave without 'type' and continue
80
+ schema.pop("type", None)
81
+ else:
82
+ schema["type"] = t.lower()
83
+
84
+ # Safe form of type for further comparisons
85
+ t_val = schema.get("type")
86
+ type_l = t_val.lower() if isinstance(t_val, str) else ""
87
+
88
+ # 4) enum only for string
89
+ if "enum" in schema and type_l != "string":
90
+ schema.pop("enum", None)
91
+
92
+ # 5) Object
93
+ if type_l == "object":
94
+ props = schema.get("properties")
95
+ if not isinstance(props, dict):
96
+ props = {}
97
+ clean_props: Dict[str, Any] = {}
98
+ for pname, pval in props.items():
99
+ clean_props[pname] = self._sanitize_schema(pval)
100
+ schema["properties"] = clean_props
101
+
102
+ req = schema.get("required")
103
+ if not (isinstance(req, list) and all(isinstance(x, str) for x in req) and len(req) > 0):
104
+ schema.pop("required", None)
105
+
106
+ # 6) Array
107
+ elif type_l == "array":
108
+ items = schema.get("items")
109
+ if isinstance(items, list):
110
+ items = items[0] if items else {"type": "string"}
111
+ if not isinstance(items, dict):
112
+ items = {"type": "string"}
113
+ schema["items"] = self._sanitize_schema(items)
114
+
115
+ # 7) Recursion over remaining nestings,
116
+ # but skip 'properties' and 'items' — we've already sanitized them
117
+ for k, v in list(schema.items()):
118
+ if k in ("properties", "items"):
119
+ continue
120
+ if isinstance(v, dict):
121
+ schema[k] = self._sanitize_schema(v)
122
+ elif isinstance(v, list):
123
+ schema[k] = [self._sanitize_schema(x) for x in v]
124
+
125
+ return schema
126
+
127
+ def prepare(self, model: ModelItem, functions: list) -> List[dict]:
128
+ """
129
+ Prepare Anthropic tool definitions: [{"name","description","input_schema"}].
130
+
131
+ :param model: ModelItem
132
+ :param functions: List of app function dicts
133
+ :return: List of tool dicts for Anthropic
134
+ """
135
+ if not functions or not isinstance(functions, list):
136
+ return []
137
+
138
+ tools: List[dict] = []
139
+ for fn in functions:
140
+ name = str(fn.get("name") or "").strip()
141
+ if not name:
142
+ continue
143
+ desc = fn.get("desc") or ""
144
+
145
+ params: Optional[dict] = {}
146
+ if fn.get("params"):
147
+ try:
148
+ params = json.loads(fn["params"])
149
+ except Exception:
150
+ params = {}
151
+ params = self._sanitize_schema(params or {})
152
+ if not params.get("type"):
153
+ params["type"] = "object"
154
+
155
+ tools.append({
156
+ "name": name,
157
+ "description": desc,
158
+ "input_schema": params or {"type": "object"},
159
+ })
160
+
161
+ return tools
162
+
163
+ def build_remote_tools(self, model: ModelItem = None) -> List[dict]:
164
+ """
165
+ Build Anthropic server tools (remote tools) based on config flags.
166
+ Currently supports: Web Search tool.
167
+
168
+ Returns a list of tool dicts to be appended to 'tools' in messages.create.
169
+
170
+ :param model: ModelItem
171
+ :return: List of remote tool dicts
172
+ """
173
+ cfg = self.window.core.config
174
+ tools: List[dict] = []
175
+
176
+ # sonnet-3.5 is not supported
177
+ if model and model.id and model.id.startswith("sonnet-3.5"):
178
+ return tools
179
+
180
+ # Web Search tool
181
+ if cfg.get("remote_tools.anthropic.web_search"):
182
+ ttype = cfg.get("remote_tools.anthropic.web_search.type", "web_search_20250305") # stable as of docs
183
+ tname = "web_search"
184
+
185
+ tool_def: Dict[str, Any] = {
186
+ "type": ttype,
187
+ "name": tname,
188
+ }
189
+
190
+ # Optional params
191
+ max_uses = cfg.get("remote_tools.anthropic.web_search.max_uses")
192
+ if isinstance(max_uses, int) and max_uses > 0:
193
+ tool_def["max_uses"] = max_uses
194
+
195
+ def parse_csv_list(key: str) -> list:
196
+ raw = cfg.get(key, "")
197
+ if not raw:
198
+ return []
199
+ if isinstance(raw, list):
200
+ return [str(x).strip() for x in raw if str(x).strip()]
201
+ return [s.strip() for s in str(raw).split(",") if s.strip()]
202
+
203
+ allowed = parse_csv_list("remote_tools.anthropic.web_search.allowed_domains")
204
+ blocked = parse_csv_list("remote_tools.anthropic.web_search.blocked_domains")
205
+ if allowed:
206
+ tool_def["allowed_domains"] = allowed
207
+ elif blocked:
208
+ tool_def["blocked_domains"] = blocked
209
+
210
+ # Location (approximate)
211
+ loc_city = cfg.get("remote_tools.anthropic.web_search.user_location.city")
212
+ loc_region = cfg.get("remote_tools.anthropic.web_search.user_location.region")
213
+ loc_country = cfg.get("remote_tools.anthropic.web_search.user_location.country")
214
+ loc_tz = cfg.get("remote_tools.anthropic.web_search.user_location.timezone")
215
+ if any([loc_city, loc_region, loc_country, loc_tz]):
216
+ tool_def["user_location"] = {
217
+ "type": "approximate",
218
+ "city": str(loc_city) if loc_city else None,
219
+ "region": str(loc_region) if loc_region else None,
220
+ "country": str(loc_country) if loc_country else None,
221
+ "timezone": str(loc_tz) if loc_tz else None,
222
+ }
223
+ # remove None fields
224
+ tool_def["user_location"] = {k: v for k, v in tool_def["user_location"].items() if v is not None}
225
+
226
+ tools.append(tool_def)
227
+
228
+ return tools
229
+
230
+ def merge_tools_dedup(self, primary: List[dict], secondary: List[dict]) -> List[dict]:
231
+ """
232
+ Remove duplicate tools by name, preserving order:
233
+
234
+ - First from primary list
235
+ - Then from secondary list if name not already present
236
+
237
+ :param primary: Primary list of tool dicts
238
+ :param secondary: Secondary list of tool dicts
239
+ :return: Merged list of tool dicts without duplicates
240
+ """
241
+ result: List[dict] = []
242
+ seen = set()
243
+ for t in primary or []:
244
+ n = t.get("name")
245
+ if n and n not in seen:
246
+ seen.add(n)
247
+ result.append(t)
248
+ for t in secondary or []:
249
+ n = t.get("name")
250
+ if not n or n in seen:
251
+ continue
252
+ seen.add(n)
253
+ result.append(t)
254
+ return result
255
+
256
+ def get_all_tools(self, model: ModelItem, functions: list) -> List[dict]:
257
+ """
258
+ Get combined list of all tools (app functions + remote tools) for Anthropic.
259
+
260
+ :param model: ModelItem
261
+ :param functions: List of app function dicts
262
+ :return: Combined list of tool dicts
263
+ """
264
+ base_tools = self.prepare(model, functions)
265
+ remote_tools = self.build_remote_tools(model)
266
+ return self.merge_tools_dedup(base_tools, remote_tools)