lybic-guiagents 0.2.2__py3-none-any.whl → 0.2.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lybic-guiagents might be problematic. Click here for more details.
- gui_agents/__init__.py +1 -1
- gui_agents/agents/Backend/LybicBackend.py +25 -19
- gui_agents/agents/agent_s.py +292 -97
- gui_agents/agents/grounding.py +43 -6
- gui_agents/agents/manager.py +113 -18
- gui_agents/agents/stream_manager.py +163 -0
- gui_agents/agents/worker.py +60 -35
- gui_agents/cli_app.py +16 -5
- gui_agents/core/knowledge.py +36 -5
- gui_agents/grpc_app.py +784 -0
- gui_agents/proto/__init__.py +3 -0
- gui_agents/proto/pb/__init__.py +4 -0
- gui_agents/tools/model.md +351 -0
- gui_agents/tools/tools.py +80 -39
- gui_agents/tools/tools_config.json +101 -0
- gui_agents/tools/tools_config_cn.json +101 -0
- gui_agents/tools/tools_config_en.json +101 -0
- {lybic_guiagents-0.2.2.dist-info → lybic_guiagents-0.2.3.dist-info}/METADATA +86 -8
- {lybic_guiagents-0.2.2.dist-info → lybic_guiagents-0.2.3.dist-info}/RECORD +23 -16
- lybic_guiagents-0.2.3.dist-info/entry_points.txt +3 -0
- gui_agents/lybic_client/__init__.py +0 -0
- gui_agents/lybic_client/lybic_client.py +0 -88
- {lybic_guiagents-0.2.2.dist-info → lybic_guiagents-0.2.3.dist-info}/WHEEL +0 -0
- {lybic_guiagents-0.2.2.dist-info → lybic_guiagents-0.2.3.dist-info}/licenses/LICENSE +0 -0
- {lybic_guiagents-0.2.2.dist-info → lybic_guiagents-0.2.3.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,351 @@
|
|
|
1
|
+
# Supported Model Providers and Model Lists
|
|
2
|
+
|
|
3
|
+
## LLM Model Providers
|
|
4
|
+
|
|
5
|
+
### 1. OpenAI
|
|
6
|
+
|
|
7
|
+
**Provider**
|
|
8
|
+
|
|
9
|
+
- `openai`
|
|
10
|
+
|
|
11
|
+
**Supported Models:**
|
|
12
|
+
|
|
13
|
+
- `gpt-4.1`
|
|
14
|
+
- `gpt-4.1-mini`
|
|
15
|
+
- `gpt-4.1-nano`
|
|
16
|
+
- `gpt-4.5-preview`
|
|
17
|
+
- `gpt-4o`
|
|
18
|
+
- `gpt-4o-realtime-preview`
|
|
19
|
+
- `gpt-4o-mini`
|
|
20
|
+
- `o1`
|
|
21
|
+
- `o1-pro`
|
|
22
|
+
- `o1-mini`
|
|
23
|
+
- `o3`
|
|
24
|
+
- `o3-pro`
|
|
25
|
+
- `o3-mini`
|
|
26
|
+
- `o4-mini`
|
|
27
|
+
|
|
28
|
+
**Embedding Models:**
|
|
29
|
+
|
|
30
|
+
- `text-embedding-3-small`
|
|
31
|
+
- `text-embedding-3-large`
|
|
32
|
+
- `text-embedding-ada-002`
|
|
33
|
+
|
|
34
|
+
📚 **Reference Link:** <https://platform.openai.com/docs/pricing>
|
|
35
|
+
|
|
36
|
+
---
|
|
37
|
+
|
|
38
|
+
### 2. Anthropic Claude
|
|
39
|
+
|
|
40
|
+
**Provider**
|
|
41
|
+
|
|
42
|
+
- `anthropic`
|
|
43
|
+
|
|
44
|
+
**Supported Models:**
|
|
45
|
+
|
|
46
|
+
- `claude-opus-4-20250514`
|
|
47
|
+
- `claude-sonnet-4-20250514`
|
|
48
|
+
- `claude-3-7-sonnet-20250219`
|
|
49
|
+
- `claude-3-5-sonnet-20241022`
|
|
50
|
+
- `claude-3-5-haiku-20241022`
|
|
51
|
+
|
|
52
|
+
📚 **Reference Link:** <https://www.anthropic.com/api>
|
|
53
|
+
|
|
54
|
+
---
|
|
55
|
+
|
|
56
|
+
### 3. AWS Bedrock
|
|
57
|
+
|
|
58
|
+
**Provider**
|
|
59
|
+
|
|
60
|
+
- `bedrock`
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
**Supported Claude Models:**
|
|
64
|
+
|
|
65
|
+
- `Claude-Opus-4`
|
|
66
|
+
- `Claude-Sonnet-4`
|
|
67
|
+
- `Claude-Sonnet-3.7`
|
|
68
|
+
- `Claude-Sonnet-3.5`
|
|
69
|
+
|
|
70
|
+
📚 **Reference Link:** <https://aws.amazon.com/bedrock/>
|
|
71
|
+
|
|
72
|
+
---
|
|
73
|
+
|
|
74
|
+
### 4. Google Gemini
|
|
75
|
+
|
|
76
|
+
**Provider**
|
|
77
|
+
|
|
78
|
+
- `gemini`
|
|
79
|
+
|
|
80
|
+
**Supported Models:**
|
|
81
|
+
|
|
82
|
+
- `gemini-2.5-pro`
|
|
83
|
+
- `gemini-2.5-flash`
|
|
84
|
+
- `gemini-2.0-flash`
|
|
85
|
+
- `gemini-1.5-pro`
|
|
86
|
+
- `gemini-1.5-flash`
|
|
87
|
+
|
|
88
|
+
**Embedding Models:**
|
|
89
|
+
|
|
90
|
+
- `gemini-embedding-001`
|
|
91
|
+
|
|
92
|
+
📚 **Reference Link:** <https://ai.google.dev/gemini-api/docs/pricing>
|
|
93
|
+
|
|
94
|
+
---
|
|
95
|
+
|
|
96
|
+
### 5. Groq
|
|
97
|
+
|
|
98
|
+
**Provider**
|
|
99
|
+
|
|
100
|
+
- `groq`
|
|
101
|
+
|
|
102
|
+
**Supported Models:**
|
|
103
|
+
|
|
104
|
+
- `Kimi-K2-Instruct`
|
|
105
|
+
- `Llama-4-Scout-17B-16E-Instruct`
|
|
106
|
+
- `Llama-4-Maverick-17B-128E-Instruct`
|
|
107
|
+
- `Llama-Guard-4-12B`
|
|
108
|
+
- `DeepSeek-R1-Distill-Llama-70B`
|
|
109
|
+
- `Qwen3-32B`
|
|
110
|
+
- `Llama-3.3-70B-Instruct`
|
|
111
|
+
|
|
112
|
+
📚 **Reference Link:** <https://groq.com/pricing>
|
|
113
|
+
|
|
114
|
+
---
|
|
115
|
+
|
|
116
|
+
### 6. Monica (Proxy Platform)
|
|
117
|
+
|
|
118
|
+
**Provider**
|
|
119
|
+
|
|
120
|
+
- `monica`
|
|
121
|
+
|
|
122
|
+
**OpenAI Models:**
|
|
123
|
+
|
|
124
|
+
- `gpt-4.1`
|
|
125
|
+
- `gpt-4.1-mini`
|
|
126
|
+
- `gpt-4.1-nano`
|
|
127
|
+
- `gpt-4o-2024-11-20`
|
|
128
|
+
- `gpt-4o-mini-2024-07-18`
|
|
129
|
+
- `o4-mini`
|
|
130
|
+
- `o3`
|
|
131
|
+
|
|
132
|
+
**Anthropic Claude Models:**
|
|
133
|
+
|
|
134
|
+
- `claude-opus-4-20250514`
|
|
135
|
+
- `claude-sonnet-4-20250514`
|
|
136
|
+
- `claude-3-7-sonnet-latest`
|
|
137
|
+
- `claude-3-5-sonnet-20241022`
|
|
138
|
+
- `claude-3-5-sonnet-20240620`
|
|
139
|
+
- `claude-3-5-haiku-20241022`
|
|
140
|
+
- `claude-3-opus-20240229`
|
|
141
|
+
- `claude-3-haiku-20240307`
|
|
142
|
+
|
|
143
|
+
**Google Gemini Models:**
|
|
144
|
+
|
|
145
|
+
- `gemini-2.5-pro-preview-03-25`
|
|
146
|
+
- `gemini-2.5-flash-lite`
|
|
147
|
+
- `gemini-2.5-flash-preview-05-20`
|
|
148
|
+
- `gemini-2.0-flash-001`
|
|
149
|
+
- `gemini-1.5-pro-002`
|
|
150
|
+
- `gemini-1.5-flash-002`
|
|
151
|
+
|
|
152
|
+
**DeepSeek Models:**
|
|
153
|
+
|
|
154
|
+
- `deepseek-reasoner`
|
|
155
|
+
- `deepseek-chat`
|
|
156
|
+
|
|
157
|
+
**Meta Llama Models:**
|
|
158
|
+
|
|
159
|
+
- `llama-3-8b-instruct`
|
|
160
|
+
- `llama-3.1-8b-instruct`
|
|
161
|
+
- `llama-3.3-70b-instruct`
|
|
162
|
+
- `llama-3-70b-instruct`
|
|
163
|
+
- `llama-3.1-405b-instruct`
|
|
164
|
+
|
|
165
|
+
**xAI Grok Models:**
|
|
166
|
+
|
|
167
|
+
- `grok-3-beta`
|
|
168
|
+
- `grok-beta`
|
|
169
|
+
|
|
170
|
+
📚 **Reference Link:** <https://platform.monica.im/docs/en/models-and-pricing>
|
|
171
|
+
|
|
172
|
+
---
|
|
173
|
+
|
|
174
|
+
### 7. OpenRouter (Proxy Platform)
|
|
175
|
+
|
|
176
|
+
**Provider**
|
|
177
|
+
|
|
178
|
+
- `openrouter`
|
|
179
|
+
|
|
180
|
+
**OpenAI Models:**
|
|
181
|
+
|
|
182
|
+
- `gpt-4.1`
|
|
183
|
+
- `gpt-4.1-mini`
|
|
184
|
+
- `o1`
|
|
185
|
+
- `o1-pro`
|
|
186
|
+
- `o1-mini`
|
|
187
|
+
- `o3`
|
|
188
|
+
- `o3-pro`
|
|
189
|
+
- `o3-mini`
|
|
190
|
+
- `o4-mini`
|
|
191
|
+
|
|
192
|
+
**xAI Grok Models:**
|
|
193
|
+
|
|
194
|
+
- `grok-4`
|
|
195
|
+
- `grok-3`
|
|
196
|
+
- `grok-3-mini`
|
|
197
|
+
|
|
198
|
+
**Anthropic Claude Models:**
|
|
199
|
+
|
|
200
|
+
- `claude-opus-4`
|
|
201
|
+
- `claude-sonnet-4`
|
|
202
|
+
|
|
203
|
+
**Google Gemini Models:**
|
|
204
|
+
|
|
205
|
+
- `gemini-2.5-flash`
|
|
206
|
+
- `gemini-2.5-pro`
|
|
207
|
+
|
|
208
|
+
📚 **Reference Link:** <https://openrouter.ai/models>
|
|
209
|
+
|
|
210
|
+
---
|
|
211
|
+
|
|
212
|
+
### 8. Azure OpenAI
|
|
213
|
+
|
|
214
|
+
**Provider**
|
|
215
|
+
|
|
216
|
+
- `azure`
|
|
217
|
+
|
|
218
|
+
|
|
219
|
+
**Supported Models:**
|
|
220
|
+
|
|
221
|
+
- `gpt-4.1`
|
|
222
|
+
- `gpt-4.1-mini`
|
|
223
|
+
- `gpt-4.1-nano`
|
|
224
|
+
- `o1`
|
|
225
|
+
- `o3`
|
|
226
|
+
- `o4-mini`
|
|
227
|
+
|
|
228
|
+
📚 **Reference Link:** <https://azure.microsoft.com/en-us/pricing/details/cognitive-services/openai-service/>
|
|
229
|
+
|
|
230
|
+
---
|
|
231
|
+
|
|
232
|
+
### 9. DeepSeek
|
|
233
|
+
|
|
234
|
+
**Provider**
|
|
235
|
+
|
|
236
|
+
- `deepseek`
|
|
237
|
+
|
|
238
|
+
**Supported Models:**
|
|
239
|
+
|
|
240
|
+
- `deepseek-chat`
|
|
241
|
+
- `deepseek-reasoner`
|
|
242
|
+
|
|
243
|
+
📚 **Reference Link:** <https://platform.deepseek.com/>
|
|
244
|
+
|
|
245
|
+
---
|
|
246
|
+
|
|
247
|
+
### 10. Alibaba Cloud Qwen
|
|
248
|
+
|
|
249
|
+
**Supported Models:**
|
|
250
|
+
|
|
251
|
+
- `qwen-max-latest`
|
|
252
|
+
- `qwen-plus-latest`
|
|
253
|
+
- `qwen-turbo-latest`
|
|
254
|
+
- `qwen-vl-max-latest` (Grounding)
|
|
255
|
+
- `qwen-vl-plus-latest` (Grounding)
|
|
256
|
+
|
|
257
|
+
**Embedding Models:**
|
|
258
|
+
|
|
259
|
+
- `text-embedding-v4`
|
|
260
|
+
- `text-embedding-v3`
|
|
261
|
+
|
|
262
|
+
📚 **Reference Link:** <https://bailian.console.aliyun.com/?tab=doc#/doc/?type=model&url=https%3A%2F%2Fhelp.aliyun.com%2Fdocument_detail%2F2840914.html&renderType=iframe>
|
|
263
|
+
|
|
264
|
+
---
|
|
265
|
+
|
|
266
|
+
### 11. ByteDance Doubao
|
|
267
|
+
|
|
268
|
+
**Supported Models:**
|
|
269
|
+
|
|
270
|
+
- `doubao-seed-1-6-flash-250615`
|
|
271
|
+
- `doubao-seed-1-6-thinking-250715`
|
|
272
|
+
- `doubao-seed-1-6-250615`
|
|
273
|
+
- `doubao-1.5-vision-pro-250328` (Grounding)
|
|
274
|
+
- `doubao-1-5-thinking-vision-pro-250428` (Grounding)
|
|
275
|
+
- `doubao-1-5-ui-tars-250428` (Grounding)
|
|
276
|
+
|
|
277
|
+
**Embedding Models:**
|
|
278
|
+
|
|
279
|
+
- `doubao-embedding-large-text-250515`
|
|
280
|
+
- `doubao-embedding-text-240715`
|
|
281
|
+
|
|
282
|
+
📚 **Reference Link:** <https://console.volcengine.com/ark/region:ark+cn-beijing/model?vendor=Bytedance&view=LIST_VIEW>
|
|
283
|
+
|
|
284
|
+
---
|
|
285
|
+
|
|
286
|
+
### 12. Zhipu GLM
|
|
287
|
+
|
|
288
|
+
**Supported Models:**
|
|
289
|
+
|
|
290
|
+
- `GLM-4-Plus`
|
|
291
|
+
- `GLM-4-Air-250414`
|
|
292
|
+
- `GLM-4-AirX` (Grounding)
|
|
293
|
+
- `GLM-4V-Plus-0111` (Grounding)
|
|
294
|
+
|
|
295
|
+
**Embedding Models:**
|
|
296
|
+
|
|
297
|
+
- `Embedding-3`
|
|
298
|
+
- `Embedding-2`
|
|
299
|
+
|
|
300
|
+
📚 **Reference Link:** <https://open.bigmodel.cn/pricing>
|
|
301
|
+
|
|
302
|
+
---
|
|
303
|
+
|
|
304
|
+
### 13. SiliconFlow
|
|
305
|
+
|
|
306
|
+
**Supported Models:**
|
|
307
|
+
|
|
308
|
+
- `Kimi-K2-Instruct`
|
|
309
|
+
- `DeepSeek-V3`
|
|
310
|
+
- `DeepSeek-R1`
|
|
311
|
+
- `Qwen3-32B`
|
|
312
|
+
|
|
313
|
+
📚 **Reference Link:** <https://cloud.siliconflow.cn/sft-d1pi8rbk20jc73c62gm0/models>
|
|
314
|
+
|
|
315
|
+
---
|
|
316
|
+
|
|
317
|
+
## 🔤 Dedicated Embedding Providers
|
|
318
|
+
|
|
319
|
+
### 14. Jina AI
|
|
320
|
+
|
|
321
|
+
**Embedding Models:**
|
|
322
|
+
|
|
323
|
+
- `jina-embeddings-v4`
|
|
324
|
+
- `jina-embeddings-v3`
|
|
325
|
+
|
|
326
|
+
📚 **Reference Link:** <https://jina.ai/embeddings>
|
|
327
|
+
|
|
328
|
+
---
|
|
329
|
+
|
|
330
|
+
## 🔍 AI Search Engines
|
|
331
|
+
|
|
332
|
+
### 15. Bocha AI
|
|
333
|
+
|
|
334
|
+
**Service Type:** AI Research & Search
|
|
335
|
+
|
|
336
|
+
📚 **Reference Link:** <https://open.bochaai.com/overview>
|
|
337
|
+
|
|
338
|
+
---
|
|
339
|
+
|
|
340
|
+
### 16. Exa
|
|
341
|
+
|
|
342
|
+
**Service Type:** AI Research & Search
|
|
343
|
+
|
|
344
|
+
**Pricing Model:**
|
|
345
|
+
|
|
346
|
+
- $5.00 / 1k agent searches
|
|
347
|
+
- $5.00 / 1k exa-research agent page reads
|
|
348
|
+
- $10.00 / 1k exa-research-pro agent page reads
|
|
349
|
+
- $5.00 / 1M reasoning tokens
|
|
350
|
+
|
|
351
|
+
📚 **Reference Link:** <https://dashboard.exa.ai/home>
|
gui_agents/tools/tools.py
CHANGED
|
@@ -23,6 +23,11 @@ class BaseTool(ABC):
|
|
|
23
23
|
|
|
24
24
|
@classmethod
|
|
25
25
|
def _load_prompts_dict(cls):
|
|
26
|
+
"""
|
|
27
|
+
Lazily load and cache the prompts dictionary on the class using thread-safe double-checked locking.
|
|
28
|
+
|
|
29
|
+
If the prompts module cannot be loaded, sets `_prompts_dict` to an empty dict and logs an error.
|
|
30
|
+
"""
|
|
26
31
|
if cls._prompts_dict is None:
|
|
27
32
|
with cls._prompts_dict_lock:
|
|
28
33
|
if cls._prompts_dict is None:
|
|
@@ -33,13 +38,22 @@ class BaseTool(ABC):
|
|
|
33
38
|
logger.error(f"Failed to load prompts from prompts.py: {e}")
|
|
34
39
|
cls._prompts_dict = {}
|
|
35
40
|
|
|
36
|
-
def __init__(self, provider: str, model_name: str, tool_name: str):
|
|
41
|
+
def __init__(self, provider: str, model_name: str, tool_name: str, **kwargs):
|
|
37
42
|
"""
|
|
38
|
-
Initialize the base tool.
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
+
Initialize the base tool, populate engine parameters from provided arguments, load the tool prompt, and create the LLMAgent instance used for LLM calls.
|
|
44
|
+
|
|
45
|
+
Parameters:
|
|
46
|
+
provider (str): API provider identifier (e.g., "gemini", "openai"); used as the engine_type in engine parameters.
|
|
47
|
+
model_name (str): Model identifier to use (e.g., "gemini-2.5-pro"); stored as the model in engine parameters.
|
|
48
|
+
tool_name (str): Tool key used to look up the system prompt template.
|
|
49
|
+
|
|
50
|
+
Keyword Arguments:
|
|
51
|
+
api_key, base_url, endpoint_url, azure_endpoint, api_version: If present, each is copied into engine parameters and logged as set for the tool.
|
|
52
|
+
Any other kwargs: Forwarded into engine parameters as-is.
|
|
53
|
+
|
|
54
|
+
Notes:
|
|
55
|
+
- Loads the prompt template for the tool and stores it on the instance.
|
|
56
|
+
- Constructs self.engine_params and instantiates self.llm_agent with the system prompt.
|
|
43
57
|
"""
|
|
44
58
|
self.provider = provider
|
|
45
59
|
self.model_name = model_name
|
|
@@ -51,9 +65,26 @@ class BaseTool(ABC):
|
|
|
51
65
|
"engine_type": provider,
|
|
52
66
|
"model": model_name
|
|
53
67
|
}
|
|
68
|
+
|
|
69
|
+
auth_keys = ['api_key', 'base_url', 'endpoint_url', 'azure_endpoint', 'api_version']
|
|
70
|
+
for key in auth_keys:
|
|
71
|
+
if key in kwargs:
|
|
72
|
+
self.engine_params[key] = kwargs[key]
|
|
73
|
+
logger.info(f"Setting {key} for tool '{tool_name}' with provider '{provider}'")
|
|
74
|
+
|
|
75
|
+
for key, value in kwargs.items():
|
|
76
|
+
if key not in auth_keys:
|
|
77
|
+
self.engine_params[key] = value
|
|
78
|
+
|
|
54
79
|
self.llm_agent = LLMAgent(engine_params=self.engine_params, system_prompt=self._prompt_template)
|
|
55
80
|
|
|
56
81
|
def _get_prompt_template(self) -> str:
|
|
82
|
+
"""
|
|
83
|
+
Return the prompt template associated with this tool from the class-level prompts cache.
|
|
84
|
+
|
|
85
|
+
Returns:
|
|
86
|
+
(prompt_template (str)): The prompt template for this tool's name, or an empty string if the tool has no name or no template is available.
|
|
87
|
+
"""
|
|
57
88
|
if self.tool_name is None:
|
|
58
89
|
return ""
|
|
59
90
|
prompts = self.__class__._prompts_dict
|
|
@@ -170,14 +201,16 @@ class ToolFactory:
|
|
|
170
201
|
class WebSearchTool(BaseTool):
|
|
171
202
|
"""Tool for performing web searches."""
|
|
172
203
|
|
|
173
|
-
def __init__(self, provider: str, model_name: str, tool_name: str):
|
|
204
|
+
def __init__(self, provider: str, model_name: str, tool_name: str, base_url='', api_key=''):
|
|
174
205
|
"""
|
|
175
|
-
Initialize the
|
|
206
|
+
Initialize the WebSearchTool and configure its WebSearchAgent.
|
|
176
207
|
|
|
177
|
-
|
|
178
|
-
provider: API provider
|
|
179
|
-
model_name: Model
|
|
180
|
-
tool_name:
|
|
208
|
+
Parameters:
|
|
209
|
+
provider (str): Identifier of the search API provider (e.g., "bocha", "exa").
|
|
210
|
+
model_name (str): Model identifier to include in engine configuration.
|
|
211
|
+
tool_name (str): Tool name or prompt key associated with this tool.
|
|
212
|
+
base_url (str, optional): Custom endpoint URL for the search service.
|
|
213
|
+
api_key (str, optional): API key or credential for authenticating with the search service.
|
|
181
214
|
"""
|
|
182
215
|
self.provider = provider
|
|
183
216
|
|
|
@@ -445,18 +478,21 @@ class ActionGeneratorTool(BaseTool):
|
|
|
445
478
|
|
|
446
479
|
def __init__(self, provider: str, model_name: str, tool_name: str, **kwargs):
|
|
447
480
|
"""
|
|
448
|
-
|
|
481
|
+
Create an ActionGeneratorTool and configure optional web search support.
|
|
449
482
|
|
|
450
|
-
|
|
451
|
-
provider: API provider
|
|
452
|
-
model_name: Model
|
|
453
|
-
tool_name:
|
|
454
|
-
**kwargs: Additional
|
|
455
|
-
enable_search:
|
|
456
|
-
search_provider: Provider for web search
|
|
457
|
-
search_model: Model for web search
|
|
483
|
+
Parameters:
|
|
484
|
+
provider (str): Name of the API provider to use for the tool.
|
|
485
|
+
model_name (str): Model identifier used by the underlying LLM engine.
|
|
486
|
+
tool_name (str): Tool key used to select the prompt template.
|
|
487
|
+
**kwargs: Additional configuration options:
|
|
488
|
+
enable_search (bool): If True, a WebSearchTool will be created and attached to the tool as `self.search_tool`. Defaults to False.
|
|
489
|
+
search_provider (str): Provider to use for the optional web search. Defaults to "bocha".
|
|
490
|
+
search_model (str): Model identifier to use for the optional web search. Defaults to an empty string.
|
|
491
|
+
|
|
492
|
+
Side effects:
|
|
493
|
+
Sets `self.enable_search` and, when `enable_search` is True, initializes `self.search_tool` with a WebSearchTool instance and logs the enabling of web search.
|
|
458
494
|
"""
|
|
459
|
-
super().__init__(provider, model_name, tool_name)
|
|
495
|
+
super().__init__(provider, model_name, tool_name, **kwargs)
|
|
460
496
|
|
|
461
497
|
# Extract search-related parameters
|
|
462
498
|
self.enable_search = kwargs.get("enable_search", False)
|
|
@@ -511,18 +547,19 @@ class FastActionGeneratorTool(BaseTool):
|
|
|
511
547
|
|
|
512
548
|
def __init__(self, provider: str, model_name: str, tool_name: str, **kwargs):
|
|
513
549
|
"""
|
|
514
|
-
Initialize the
|
|
550
|
+
Initialize the FastActionGeneratorTool and optionally enable web search augmentation.
|
|
515
551
|
|
|
516
|
-
|
|
517
|
-
provider: API provider name
|
|
518
|
-
model_name: Model
|
|
519
|
-
tool_name:
|
|
520
|
-
**kwargs: Additional
|
|
521
|
-
enable_search:
|
|
522
|
-
search_provider: Provider for web search (
|
|
523
|
-
search_model: Model for web search (
|
|
552
|
+
Parameters:
|
|
553
|
+
provider (str): API provider name used to configure the underlying LLM/engine.
|
|
554
|
+
model_name (str): Model identifier to use for generation.
|
|
555
|
+
tool_name (str): Tool key used to select the prompt template.
|
|
556
|
+
**kwargs: Additional keyword arguments. Recognized keys:
|
|
557
|
+
enable_search (bool): If true, instantiate a WebSearchTool to augment requests with search results.
|
|
558
|
+
search_provider (str): Provider name for the optional web search (default "bocha").
|
|
559
|
+
search_model (str): Model name for the optional web search (default "").
|
|
560
|
+
Any other kwargs are forwarded to BaseTool for engine/auth configuration.
|
|
524
561
|
"""
|
|
525
|
-
super().__init__(provider, model_name, tool_name)
|
|
562
|
+
super().__init__(provider, model_name, tool_name, **kwargs)
|
|
526
563
|
|
|
527
564
|
# Extract search-related parameters
|
|
528
565
|
self.enable_search = kwargs.get("enable_search", False)
|
|
@@ -592,14 +629,16 @@ class FastActionGeneratorTool(BaseTool):
|
|
|
592
629
|
class EmbeddingTool(BaseTool):
|
|
593
630
|
"""Tool for generating text embeddings."""
|
|
594
631
|
|
|
595
|
-
def __init__(self, provider: str, model_name: str, tool_name: str):
|
|
632
|
+
def __init__(self, provider: str, model_name: str, tool_name: str, base_url='', api_key=''):
|
|
596
633
|
"""
|
|
597
|
-
|
|
634
|
+
Create and configure an EmbeddingTool backed by an EmbeddingAgent.
|
|
598
635
|
|
|
599
|
-
|
|
600
|
-
provider:
|
|
601
|
-
model_name:
|
|
602
|
-
tool_name:
|
|
636
|
+
Parameters:
|
|
637
|
+
provider (str): Name of the embedding service provider (e.g., "openai", "gemini").
|
|
638
|
+
model_name (str): Embedding model identifier to use.
|
|
639
|
+
tool_name (str): Tool key used to look up prompts or register the tool.
|
|
640
|
+
base_url (str, optional): Custom endpoint URL for the provider; defaults to ''.
|
|
641
|
+
api_key (str, optional): API key or credential for authenticating with the provider; defaults to ''.
|
|
603
642
|
"""
|
|
604
643
|
self.provider = provider
|
|
605
644
|
self.model_name = model_name
|
|
@@ -608,7 +647,9 @@ class EmbeddingTool(BaseTool):
|
|
|
608
647
|
# Create EmbeddingAgent instance
|
|
609
648
|
self.engine_params = {
|
|
610
649
|
"engine_type": provider,
|
|
611
|
-
"embedding_model": model_name
|
|
650
|
+
"embedding_model": model_name,
|
|
651
|
+
"base_url": base_url,
|
|
652
|
+
"api_key": api_key
|
|
612
653
|
}
|
|
613
654
|
|
|
614
655
|
# Initialize EmbeddingAgent
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
{
|
|
2
|
+
"tools": [
|
|
3
|
+
{
|
|
4
|
+
"tool_name": "websearch",
|
|
5
|
+
"provider": "bocha",
|
|
6
|
+
"model_name": ""
|
|
7
|
+
},
|
|
8
|
+
{
|
|
9
|
+
"tool_name": "context_fusion",
|
|
10
|
+
"provider": "doubao",
|
|
11
|
+
"model_name": "doubao-seed-1-6-flash-250615"
|
|
12
|
+
},
|
|
13
|
+
{
|
|
14
|
+
"tool_name": "subtask_planner",
|
|
15
|
+
"provider": "doubao",
|
|
16
|
+
"model_name": "doubao-seed-1-6-250615"
|
|
17
|
+
},
|
|
18
|
+
{
|
|
19
|
+
"tool_name": "traj_reflector",
|
|
20
|
+
"provider": "doubao",
|
|
21
|
+
"model_name": "doubao-seed-1-6-flash-250615"
|
|
22
|
+
},
|
|
23
|
+
{
|
|
24
|
+
"tool_name": "memory_retrival",
|
|
25
|
+
"provider": "doubao",
|
|
26
|
+
"model_name": "doubao-seed-1-6-flash-250615"
|
|
27
|
+
},
|
|
28
|
+
{
|
|
29
|
+
"tool_name": "grounding",
|
|
30
|
+
"provider": "doubao",
|
|
31
|
+
"model_name": "doubao-1-5-ui-tars-250428"
|
|
32
|
+
},
|
|
33
|
+
{
|
|
34
|
+
"tool_name": "evaluator",
|
|
35
|
+
"provider": "doubao",
|
|
36
|
+
"model_name": "doubao-seed-1-6-flash-250615"
|
|
37
|
+
},
|
|
38
|
+
{
|
|
39
|
+
"tool_name": "action_generator",
|
|
40
|
+
"provider": "doubao",
|
|
41
|
+
"model_name": "doubao-seed-1-6-250615",
|
|
42
|
+
"enable_search": false,
|
|
43
|
+
"search_provider": "bocha",
|
|
44
|
+
"search_model": ""
|
|
45
|
+
},
|
|
46
|
+
{
|
|
47
|
+
"tool_name": "action_generator_with_takeover",
|
|
48
|
+
"provider": "doubao",
|
|
49
|
+
"model_name": "doubao-seed-1-6-250615",
|
|
50
|
+
"enable_search": false,
|
|
51
|
+
"search_provider": "bocha",
|
|
52
|
+
"search_model": ""
|
|
53
|
+
},
|
|
54
|
+
{
|
|
55
|
+
"tool_name": "fast_action_generator",
|
|
56
|
+
"provider": "doubao",
|
|
57
|
+
"model_name": "doubao-1-5-ui-tars-250428",
|
|
58
|
+
"enable_search": false,
|
|
59
|
+
"search_provider": "bocha",
|
|
60
|
+
"search_model": ""
|
|
61
|
+
},
|
|
62
|
+
{
|
|
63
|
+
"tool_name": "fast_action_generator_with_takeover",
|
|
64
|
+
"provider": "doubao",
|
|
65
|
+
"model_name": "doubao-1-5-ui-tars-250428",
|
|
66
|
+
"enable_search": false,
|
|
67
|
+
"search_provider": "bocha",
|
|
68
|
+
"search_model": ""
|
|
69
|
+
},
|
|
70
|
+
{
|
|
71
|
+
"tool_name": "dag_translator",
|
|
72
|
+
"provider": "doubao",
|
|
73
|
+
"model_name": "doubao-seed-1-6-250615"
|
|
74
|
+
},
|
|
75
|
+
{
|
|
76
|
+
"tool_name": "embedding",
|
|
77
|
+
"provider": "doubao",
|
|
78
|
+
"model_name": "doubao-embedding-text-240715"
|
|
79
|
+
},
|
|
80
|
+
{
|
|
81
|
+
"tool_name": "query_formulator",
|
|
82
|
+
"provider": "doubao",
|
|
83
|
+
"model_name": "doubao-seed-1-6-flash-250615"
|
|
84
|
+
},
|
|
85
|
+
{
|
|
86
|
+
"tool_name": "narrative_summarization",
|
|
87
|
+
"provider": "doubao",
|
|
88
|
+
"model_name": "doubao-seed-1-6-flash-250615"
|
|
89
|
+
},
|
|
90
|
+
{
|
|
91
|
+
"tool_name": "text_span",
|
|
92
|
+
"provider": "doubao",
|
|
93
|
+
"model_name": "doubao-seed-1-6-flash-250615"
|
|
94
|
+
},
|
|
95
|
+
{
|
|
96
|
+
"tool_name": "episode_summarization",
|
|
97
|
+
"provider": "doubao",
|
|
98
|
+
"model_name": "doubao-seed-1-6-flash-250615"
|
|
99
|
+
}
|
|
100
|
+
]
|
|
101
|
+
}
|