pygpt-net 2.6.22__py3-none-any.whl → 2.6.24__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. pygpt_net/CHANGELOG.txt +16 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/controller/agent/llama.py +3 -0
  4. pygpt_net/controller/chat/response.py +6 -1
  5. pygpt_net/controller/files/files.py +24 -55
  6. pygpt_net/controller/theme/theme.py +3 -3
  7. pygpt_net/core/agents/observer/evaluation.py +2 -2
  8. pygpt_net/core/agents/runners/loop.py +1 -0
  9. pygpt_net/core/attachments/context.py +4 -4
  10. pygpt_net/core/bridge/bridge.py +2 -0
  11. pygpt_net/core/filesystem/opener.py +261 -0
  12. pygpt_net/core/filesystem/url.py +13 -10
  13. pygpt_net/core/idx/chat.py +1 -1
  14. pygpt_net/core/idx/indexing.py +3 -3
  15. pygpt_net/core/idx/llm.py +61 -2
  16. pygpt_net/core/platforms/platforms.py +5 -4
  17. pygpt_net/data/config/config.json +21 -3
  18. pygpt_net/data/config/models.json +3 -3
  19. pygpt_net/data/config/settings.json +18 -0
  20. pygpt_net/data/css/web-blocks.dark.css +7 -1
  21. pygpt_net/data/css/web-blocks.light.css +5 -2
  22. pygpt_net/data/css/web-chatgpt.dark.css +7 -1
  23. pygpt_net/data/css/web-chatgpt.light.css +3 -0
  24. pygpt_net/data/css/web-chatgpt_wide.dark.css +7 -1
  25. pygpt_net/data/css/web-chatgpt_wide.light.css +3 -0
  26. pygpt_net/data/locale/locale.de.ini +47 -0
  27. pygpt_net/data/locale/locale.en.ini +50 -1
  28. pygpt_net/data/locale/locale.es.ini +47 -0
  29. pygpt_net/data/locale/locale.fr.ini +47 -0
  30. pygpt_net/data/locale/locale.it.ini +47 -0
  31. pygpt_net/data/locale/locale.pl.ini +47 -0
  32. pygpt_net/data/locale/locale.uk.ini +47 -0
  33. pygpt_net/data/locale/locale.zh.ini +47 -0
  34. pygpt_net/provider/agents/llama_index/codeact_workflow.py +8 -7
  35. pygpt_net/provider/agents/llama_index/planner_workflow.py +11 -10
  36. pygpt_net/provider/agents/llama_index/supervisor_workflow.py +9 -8
  37. pygpt_net/provider/agents/openai/agent_b2b.py +30 -17
  38. pygpt_net/provider/agents/openai/agent_planner.py +29 -29
  39. pygpt_net/provider/agents/openai/agent_with_experts_feedback.py +21 -23
  40. pygpt_net/provider/agents/openai/agent_with_feedback.py +21 -23
  41. pygpt_net/provider/agents/openai/bot_researcher.py +25 -30
  42. pygpt_net/provider/agents/openai/evolve.py +37 -39
  43. pygpt_net/provider/agents/openai/supervisor.py +16 -18
  44. pygpt_net/provider/core/config/patch.py +20 -1
  45. pygpt_net/provider/llms/anthropic.py +5 -4
  46. pygpt_net/provider/llms/google.py +2 -2
  47. pygpt_net/ui/layout/toolbox/agent_llama.py +2 -3
  48. pygpt_net/ui/widget/tabs/layout.py +6 -4
  49. pygpt_net/ui/widget/tabs/output.py +348 -13
  50. pygpt_net/ui/widget/textarea/input.py +74 -8
  51. {pygpt_net-2.6.22.dist-info → pygpt_net-2.6.24.dist-info}/METADATA +34 -25
  52. {pygpt_net-2.6.22.dist-info → pygpt_net-2.6.24.dist-info}/RECORD +55 -54
  53. {pygpt_net-2.6.22.dist-info → pygpt_net-2.6.24.dist-info}/LICENSE +0 -0
  54. {pygpt_net-2.6.22.dist-info → pygpt_net-2.6.24.dist-info}/WHEEL +0 -0
  55. {pygpt_net-2.6.22.dist-info → pygpt_net-2.6.24.dist-info}/entry_points.txt +0 -0
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.24 03:00:00 #
9
+ # Updated Date: 2025.08.26 01:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from dataclasses import dataclass
@@ -15,8 +15,6 @@ from typing import Dict, Any, Tuple, Literal, Optional
15
15
  from agents import (
16
16
  Agent as OpenAIAgent,
17
17
  Runner,
18
- RunConfig,
19
- ModelSettings,
20
18
  TResponseInputItem,
21
19
  )
22
20
 
@@ -31,9 +29,9 @@ from pygpt_net.item.ctx import CtxItem
31
29
  from pygpt_net.item.model import ModelItem
32
30
  from pygpt_net.item.preset import PresetItem
33
31
 
34
- from pygpt_net.provider.gpt.agents.client import get_custom_model_provider, set_openai_env
35
- from pygpt_net.provider.gpt.agents.remote_tools import get_remote_tools, is_computer_tool, append_tools
32
+ from pygpt_net.provider.gpt.agents.remote_tools import append_tools
36
33
  from pygpt_net.provider.gpt.agents.response import StreamHandler
34
+ from pygpt_net.utils import trans
37
35
 
38
36
  from ..base import BaseAgent
39
37
  from ...gpt.agents.experts import get_experts
@@ -284,9 +282,9 @@ class Agent(BaseAgent):
284
282
  evaluator_result = await Runner.run(evaluator, input_items)
285
283
  result: EvaluationFeedback = evaluator_result.final_output
286
284
 
287
- info = f"\n___\n**Evaluator score: {result.score}**\n\n"
285
+ info = f"\n___\n**{trans('agent.eval.score')}: {result.score}**\n\n"
288
286
  if result.score == "pass":
289
- info += "\n\n**Response is good enough, exiting.**\n"
287
+ info += f"\n\n**{trans('agent.eval.score.good')}**\n"
290
288
  if use_partial_ctx:
291
289
  ctx = bridge.on_next_ctx(
292
290
  ctx=ctx,
@@ -302,7 +300,7 @@ class Agent(BaseAgent):
302
300
  final_output += info
303
301
  break
304
302
 
305
- info += "\n\n**Re-running with feedback**\n\n" + f"Feedback: {result.feedback}\n___\n"
303
+ info += f"\n\n**{trans('agent.eval.next')}**\n\nFeedback: {result.feedback}\n___\n"
306
304
  input_items.append({"content": f"Feedback: {result.feedback}", "role": "user"})
307
305
 
308
306
  if use_partial_ctx:
@@ -330,53 +328,53 @@ class Agent(BaseAgent):
330
328
  """
331
329
  return {
332
330
  "base": {
333
- "label": "Base agent",
331
+ "label": trans("agent.option.section.base"),
334
332
  "options": {
335
333
  "prompt": {
336
334
  "type": "textarea",
337
- "label": "Prompt",
338
- "description": "Prompt for base agent",
335
+ "label": trans("agent.option.prompt"),
336
+ "description": trans("agent.option.prompt.base.desc"),
339
337
  "default": self.PROMPT,
340
338
  },
341
339
  "allow_local_tools": {
342
340
  "type": "bool",
343
- "label": "Allow local tools",
344
- "description": "Allow usage of local tools for this agent",
341
+ "label": trans("agent.option.tools.local"),
342
+ "description": trans("agent.option.tools.local.desc"),
345
343
  "default": False,
346
344
  },
347
345
  "allow_remote_tools": {
348
346
  "type": "bool",
349
- "label": "Allow remote tools",
350
- "description": "Allow usage of remote tools for this agent",
347
+ "label": trans("agent.option.tools.remote"),
348
+ "description": trans("agent.option.tools.remote.desc"),
351
349
  "default": False,
352
350
  },
353
351
  }
354
352
  },
355
353
  "feedback": {
356
- "label": "Feedback",
354
+ "label": trans("agent.option.section.feedback"),
357
355
  "options": {
358
356
  "model": {
359
- "label": "Model",
357
+ "label": trans("agent.option.model"),
360
358
  "type": "combo",
361
359
  "use": "models",
362
360
  "default": "gpt-4o",
363
361
  },
364
362
  "prompt": {
365
363
  "type": "textarea",
366
- "label": "Prompt",
367
- "description": "Prompt for feedback evaluation",
364
+ "label": trans("agent.option.prompt"),
365
+ "description": trans("agent.option.prompt.feedback.desc"),
368
366
  "default": self.PROMPT_FEEDBACK,
369
367
  },
370
368
  "allow_local_tools": {
371
369
  "type": "bool",
372
- "label": "Allow local tools",
373
- "description": "Allow usage of local tools for this agent",
370
+ "label": trans("agent.option.tools.local"),
371
+ "description": trans("agent.option.tools.local.desc"),
374
372
  "default": False,
375
373
  },
376
374
  "allow_remote_tools": {
377
375
  "type": "bool",
378
- "label": "Allow remote tools",
379
- "description": "Allow usage of remote tools for this agent",
376
+ "label": trans("agent.option.tools.remote"),
377
+ "description": trans("agent.option.tools.remote.desc"),
380
378
  "default": False,
381
379
  },
382
380
  }
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.24 03:00:00 #
9
+ # Updated Date: 2025.08.26 01:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from dataclasses import dataclass
@@ -15,8 +15,6 @@ from typing import Dict, Any, Tuple, Literal, Optional
15
15
  from agents import (
16
16
  Agent as OpenAIAgent,
17
17
  Runner,
18
- RunConfig,
19
- ModelSettings,
20
18
  TResponseInputItem,
21
19
  )
22
20
 
@@ -31,9 +29,9 @@ from pygpt_net.item.ctx import CtxItem
31
29
  from pygpt_net.item.model import ModelItem
32
30
  from pygpt_net.item.preset import PresetItem
33
31
 
34
- from pygpt_net.provider.gpt.agents.client import get_custom_model_provider, set_openai_env
35
- from pygpt_net.provider.gpt.agents.remote_tools import get_remote_tools, is_computer_tool, append_tools
32
+ from pygpt_net.provider.gpt.agents.remote_tools import append_tools
36
33
  from pygpt_net.provider.gpt.agents.response import StreamHandler
34
+ from pygpt_net.utils import trans
37
35
 
38
36
  from ..base import BaseAgent
39
37
  from ...gpt.agents.experts import get_experts
@@ -284,9 +282,9 @@ class Agent(BaseAgent):
284
282
  evaluator_result = await Runner.run(evaluator, input_items)
285
283
  result: EvaluationFeedback = evaluator_result.final_output
286
284
 
287
- info = f"\n___\n**Evaluator score: {result.score}**\n\n"
285
+ info = f"\n___\n**{trans('agent.eval.score')}: {result.score}**\n\n"
288
286
  if result.score == "pass":
289
- info += "\n\n**Response is good enough, exiting.**\n"
287
+ info += f"\n\n**{trans('agent.eval.score.good')}**\n"
290
288
  if use_partial_ctx:
291
289
  ctx = bridge.on_next_ctx(
292
290
  ctx=ctx,
@@ -302,7 +300,7 @@ class Agent(BaseAgent):
302
300
  final_output += info
303
301
  break
304
302
 
305
- info += "\n\n**Re-running with feedback**\n\n" + f"Feedback: {result.feedback}\n___\n"
303
+ info += f"\n\n**{trans('agent.eval.next')}**\n\nFeedback: {result.feedback}\n___\n"
306
304
  input_items.append({"content": f"Feedback: {result.feedback}", "role": "user"})
307
305
 
308
306
  if use_partial_ctx:
@@ -330,53 +328,53 @@ class Agent(BaseAgent):
330
328
  """
331
329
  return {
332
330
  "base": {
333
- "label": "Base agent",
331
+ "label": trans("agent.option.section.base"),
334
332
  "options": {
335
333
  "prompt": {
336
334
  "type": "textarea",
337
- "label": "Prompt",
338
- "description": "Prompt for base agent",
335
+ "label": trans("agent.option.prompt"),
336
+ "description": trans("agent.option.prompt.base.desc"),
339
337
  "default": self.PROMPT,
340
338
  },
341
339
  "allow_local_tools": {
342
340
  "type": "bool",
343
- "label": "Allow local tools",
344
- "description": "Allow usage of local tools for this agent",
341
+ "label": trans("agent.option.tools.local"),
342
+ "description": trans("agent.option.tools.local.desc"),
345
343
  "default": False,
346
344
  },
347
345
  "allow_remote_tools": {
348
346
  "type": "bool",
349
- "label": "Allow remote tools",
350
- "description": "Allow usage of remote tools for this agent",
347
+ "label": trans("agent.option.tools.remote"),
348
+ "description": trans("agent.option.tools.remote.desc"),
351
349
  "default": False,
352
350
  },
353
351
  }
354
352
  },
355
353
  "feedback": {
356
- "label": "Feedback",
354
+ "label": trans("agent.option.section.feedback"),
357
355
  "options": {
358
356
  "model": {
359
- "label": "Model",
357
+ "label": trans("agent.option.model"),
360
358
  "type": "combo",
361
359
  "use": "models",
362
360
  "default": "gpt-4o",
363
361
  },
364
362
  "prompt": {
365
363
  "type": "textarea",
366
- "label": "Prompt",
367
- "description": "Prompt for feedback evaluation",
364
+ "label": trans("agent.option.prompt"),
365
+ "description": trans("agent.option.prompt.feedback.desc"),
368
366
  "default": self.PROMPT_FEEDBACK,
369
367
  },
370
368
  "allow_local_tools": {
371
369
  "type": "bool",
372
- "label": "Allow local tools",
373
- "description": "Allow usage of local tools for this agent",
370
+ "label": trans("agent.option.tools.local"),
371
+ "description": trans("agent.option.tools.local.desc"),
374
372
  "default": False,
375
373
  },
376
374
  "allow_remote_tools": {
377
375
  "type": "bool",
378
- "label": "Allow remote tools",
379
- "description": "Allow usage of remote tools for this agent",
376
+ "label": trans("agent.option.tools.remote"),
377
+ "description": trans("agent.option.tools.remote.desc"),
380
378
  "default": False,
381
379
  },
382
380
  }
@@ -6,16 +6,13 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.24 03:00:00 #
9
+ # Updated Date: 2025.08.26 01:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Dict, Any, Tuple, Union, Optional
13
13
 
14
14
  from agents import (
15
15
  Agent as OpenAIAgent,
16
- Runner,
17
- RunConfig,
18
- ModelSettings,
19
16
  )
20
17
 
21
18
  from pygpt_net.core.agents.bridge import ConnectionContext
@@ -27,11 +24,9 @@ from pygpt_net.core.types import (
27
24
 
28
25
  from pygpt_net.item.ctx import CtxItem
29
26
  from pygpt_net.item.model import ModelItem
30
- from pygpt_net.item.preset import PresetItem
31
27
 
32
- from pygpt_net.provider.gpt.agents.client import get_custom_model_provider, set_openai_env
33
28
  from pygpt_net.provider.gpt.agents.remote_tools import append_tools
34
- from pygpt_net.provider.gpt.agents.response import StreamHandler
29
+ from pygpt_net.utils import trans
35
30
 
36
31
  from ..base import BaseAgent
37
32
  from .bots.research_bot.manager import ResearchManager
@@ -207,82 +202,82 @@ class Agent(BaseAgent):
207
202
  """
208
203
  return {
209
204
  "writer": {
210
- "label": "Base agent",
205
+ "label": trans("agent.option.section.base"),
211
206
  "options": {
212
207
  "prompt": {
213
208
  "type": "textarea",
214
- "label": "Prompt",
215
- "description": "Prompt for base agent",
209
+ "label": trans("agent.option.prompt"),
210
+ "description": trans("agent.option.prompt.base.desc"),
216
211
  "default": self.PROMPT_WRITER,
217
212
  },
218
213
  "allow_local_tools": {
219
214
  "type": "bool",
220
- "label": "Allow local tools",
221
- "description": "Allow usage of local tools for this agent",
215
+ "label": trans("agent.option.tools.local"),
216
+ "description": trans("agent.option.tools.local.desc"),
222
217
  "default": False,
223
218
  },
224
219
  "allow_remote_tools": {
225
220
  "type": "bool",
226
- "label": "Allow remote tools",
227
- "description": "Allow usage of remote tools for this agent",
221
+ "label": trans("agent.option.tools.remote"),
222
+ "description": trans("agent.option.tools.remote.desc"),
228
223
  "default": False,
229
224
  },
230
225
  }
231
226
  },
232
227
  "planner": {
233
- "label": "Planner",
228
+ "label": trans("agent.option.section.planner"),
234
229
  "options": {
235
230
  "model": {
236
- "label": "Model",
231
+ "label": trans("agent.option.model"),
237
232
  "type": "combo",
238
233
  "use": "models",
239
234
  "default": "gpt-4o",
240
235
  },
241
236
  "prompt": {
242
237
  "type": "textarea",
243
- "label": "Prompt",
244
- "description": "Prompt for planner agent",
238
+ "label": trans("agent.option.prompt"),
239
+ "description": trans("agent.option.prompt.planner.desc"),
245
240
  "default": self.PROMPT_PLANNER,
246
241
  },
247
242
  "allow_local_tools": {
248
243
  "type": "bool",
249
- "label": "Allow local tools",
250
- "description": "Allow usage of local tools for this agent",
244
+ "label": trans("agent.option.tools.local"),
245
+ "description": trans("agent.option.tools.local.desc"),
251
246
  "default": False,
252
247
  },
253
248
  "allow_remote_tools": {
254
249
  "type": "bool",
255
- "label": "Allow remote tools",
256
- "description": "Allow usage of remote tools for this agent",
250
+ "label": trans("agent.option.tools.remote"),
251
+ "description": trans("agent.option.tools.remote.desc"),
257
252
  "default": False,
258
253
  },
259
254
  }
260
255
  },
261
256
  "search": {
262
- "label": "Search",
257
+ "label": trans("agent.option.section.search"),
263
258
  "options": {
264
259
  "model": {
265
- "label": "Model",
260
+ "label": trans("agent.option.model"),
266
261
  "type": "combo",
267
262
  "use": "models",
268
263
  "default": "gpt-4o",
269
264
  },
270
265
  "prompt": {
271
266
  "type": "textarea",
272
- "label": "Prompt",
273
- "description": "Prompt for search agent",
267
+ "label": trans("agent.option.prompt"),
268
+ "description": trans("agent.option.prompt.search.desc"),
274
269
  "default": self.PROMPT_SEARCH,
275
270
  },
276
271
  "allow_local_tools": {
277
272
  "type": "bool",
278
- "label": "Allow local tools",
279
- "description": "Allow usage of local tools for this agent",
273
+ "label": trans("agent.option.tools.local"),
274
+ "description": trans("agent.option.tools.local.desc"),
280
275
  "default": False,
281
276
  },
282
277
  "allow_remote_tools": {
283
278
  "type": "bool",
284
- "label": "Allow remote tools",
285
- "description": "Allow usage of remote tools for this agent",
279
+ "label": trans("agent.option.tools.remote"),
280
+ "description": trans("agent.option.tools.remote.desc"),
286
281
  "default": True,
287
282
  },
288
283
  }
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.24 03:00:00 #
9
+ # Updated Date: 2025.08.26 01:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import copy
@@ -16,8 +16,6 @@ from typing import Dict, Any, Tuple, Literal, Optional
16
16
  from agents import (
17
17
  Agent as OpenAIAgent,
18
18
  Runner,
19
- RunConfig,
20
- ModelSettings,
21
19
  TResponseInputItem,
22
20
  )
23
21
 
@@ -32,9 +30,9 @@ from pygpt_net.item.ctx import CtxItem
32
30
  from pygpt_net.item.model import ModelItem
33
31
  from pygpt_net.item.preset import PresetItem
34
32
 
35
- from pygpt_net.provider.gpt.agents.client import get_custom_model_provider, set_openai_env
36
- from pygpt_net.provider.gpt.agents.remote_tools import get_remote_tools, is_computer_tool, append_tools
33
+ from pygpt_net.provider.gpt.agents.remote_tools import append_tools
37
34
  from pygpt_net.provider.gpt.agents.response import StreamHandler
35
+ from pygpt_net.utils import trans
38
36
 
39
37
  from ..base import BaseAgent
40
38
  from ...gpt.agents.experts import get_experts
@@ -379,7 +377,7 @@ class Agent(BaseAgent):
379
377
  )
380
378
 
381
379
  if num_generation >= max_generations > 0:
382
- info = f"\n\n**Max generations reached ({max_generations}), exiting.**\n"
380
+ info = f"\n\n**{trans('agent.evolve.maxgen_limit')}**\n"
383
381
  ctx.stream = info
384
382
  bridge.on_step(ctx, False)
385
383
  final_output += info
@@ -390,7 +388,7 @@ class Agent(BaseAgent):
390
388
  handler = StreamHandler(window, bridge)
391
389
  begin = True
392
390
  while True:
393
- ctx.stream = f"\n\n\n\n**Generation {num_generation}**\n\n"
391
+ ctx.stream = f"\n\n\n\n**{trans('agent.evolve.generation')} {num_generation}**\n\n"
394
392
  bridge.on_step(ctx, begin)
395
393
  handler.begin = False
396
394
  begin = False
@@ -403,7 +401,7 @@ class Agent(BaseAgent):
403
401
  parents[j],
404
402
  **parent_kwargs
405
403
  )
406
- ctx.stream = f"\n\n**Running agent {j} ...**\n\n"
404
+ ctx.stream = f"\n\n**{trans('agent.evolve.running')} {j} ...**\n\n"
407
405
  bridge.on_step(ctx)
408
406
  handler.reset()
409
407
  async for event in results[j].stream_events():
@@ -432,7 +430,7 @@ class Agent(BaseAgent):
432
430
 
433
431
  handler.to_buffer(results[choose].final_output)
434
432
  final_output = handler.buffer
435
- ctx.stream = f"**Winner: agent {result.answer_number}**\n\n"
433
+ ctx.stream = f"**{trans('agent.evolve.winner')} {result.answer_number}**\n\n"
436
434
  bridge.on_step(ctx)
437
435
 
438
436
  if bridge.stopped():
@@ -445,9 +443,9 @@ class Agent(BaseAgent):
445
443
  evaluator_result = await Runner.run(evaluator, input_items)
446
444
  result: EvaluationFeedback = evaluator_result.final_output
447
445
 
448
- info = f"\n___\n**Evaluator score: {result.score}**\n\n"
446
+ info = f"\n___\n**{trans('agent.eval.score')}: {result.score}**\n\n"
449
447
  if result.score == "pass":
450
- info += "\n\n**Response is good enough, exiting.**\n"
448
+ info += f"\n\n**{trans('agent.eval.score.good')}.**\n"
451
449
  if use_partial_ctx:
452
450
  ctx = bridge.on_next_ctx(
453
451
  ctx=ctx,
@@ -463,9 +461,9 @@ class Agent(BaseAgent):
463
461
  final_output += info
464
462
  break
465
463
  else:
466
- info = f"\n___\n**Evaluator score: {result.score}**\n\n"
464
+ info = f"\n___\n**{trans('agent.eval.score')}: {result.score}**\n\n"
467
465
 
468
- info += "\n\n**Re-running with feedback**\n\n" + f"Feedback: {result.feedback}\n___\n"
466
+ info += f"\n\n**{trans('agent.eval.next')}**\n\nFeedback: {result.feedback}\n___\n"
469
467
  input_items.append({"content": f"Feedback: {result.feedback}", "role": "user"})
470
468
 
471
469
  if use_partial_ctx:
@@ -483,7 +481,7 @@ class Agent(BaseAgent):
483
481
  handler.to_buffer(info)
484
482
 
485
483
  if num_generation >= max_generations > 0:
486
- info = f"\n\n**Max generations reached ({max_generations}), exiting.**\n"
484
+ info = f"\n\n**{trans('agent.evolve.maxgen_limit')}**\n"
487
485
  ctx.stream = info
488
486
  bridge.on_step(ctx, False)
489
487
  final_output += info
@@ -502,94 +500,94 @@ class Agent(BaseAgent):
502
500
  """
503
501
  return {
504
502
  "base": {
505
- "label": "Base agent",
503
+ "label": trans("agent.option.section.base"),
506
504
  "options": {
507
505
  "num_parents": {
508
506
  "type": "int",
509
- "label": "Num of parents",
507
+ "label": trans("agent.evolve.option.num_parents"),
510
508
  "min": 1,
511
509
  "default": 2,
512
510
  },
513
511
  "max_generations": {
514
512
  "type": "int",
515
- "label": "Max generations",
513
+ "label": trans("agent.evolve.option.max_generations"),
516
514
  "min": 1,
517
515
  "default": 10,
518
516
  },
519
517
  "prompt": {
520
518
  "type": "textarea",
521
- "label": "Prompt",
522
- "description": "Prompt for base agent",
519
+ "label": trans("agent.option.prompt"),
520
+ "description": trans("agent.option.prompt.desc"),
523
521
  "default": self.PROMPT,
524
522
  },
525
523
  "allow_local_tools": {
526
524
  "type": "bool",
527
- "label": "Allow local tools",
528
- "description": "Allow usage of local tools for this agent",
525
+ "label": trans("agent.option.tools.local"),
526
+ "description": trans("agent.option.tools.local.desc"),
529
527
  "default": False,
530
528
  },
531
529
  "allow_remote_tools": {
532
530
  "type": "bool",
533
- "label": "Allow remote tools",
534
- "description": "Allow usage of remote tools for this agent",
531
+ "label": trans("agent.option.tools.remote"),
532
+ "description": trans("agent.option.tools.remote.desc"),
535
533
  "default": False,
536
534
  },
537
535
  }
538
536
  },
539
537
  "chooser": {
540
- "label": "Chooser",
538
+ "label": trans("agent.option.section.chooser"),
541
539
  "options": {
542
540
  "model": {
543
- "label": "Model",
541
+ "label": trans("agent.option.model"),
544
542
  "type": "combo",
545
543
  "use": "models",
546
544
  "default": "gpt-4o",
547
545
  },
548
546
  "prompt": {
549
547
  "type": "textarea",
550
- "label": "Prompt",
551
- "description": "Prompt for chooser agent",
548
+ "label": trans("agent.option.prompt"),
549
+ "description": trans("agent.option.prompt.chooser.desc"),
552
550
  "default": self.PROMPT_CHOOSE,
553
551
  },
554
552
  "allow_local_tools": {
555
553
  "type": "bool",
556
- "label": "Allow local tools",
557
- "description": "Allow usage of local tools for this agent",
554
+ "label": trans("agent.option.tools.local"),
555
+ "description": trans("agent.option.tools.local.desc"),
558
556
  "default": False,
559
557
  },
560
558
  "allow_remote_tools": {
561
559
  "type": "bool",
562
- "label": "Allow remote tools",
563
- "description": "Allow usage of remote tools for this agent",
560
+ "label": trans("agent.option.tools.remote"),
561
+ "description": trans("agent.option.tools.remote.desc"),
564
562
  "default": False,
565
563
  },
566
564
  }
567
565
  },
568
566
  "feedback": {
569
- "label": "Feedback",
567
+ "label": trans("agent.option.section.feedback"),
570
568
  "options": {
571
569
  "model": {
572
- "label": "Model",
570
+ "label": trans("agent.option.model"),
573
571
  "type": "combo",
574
572
  "use": "models",
575
573
  "default": "gpt-4o",
576
574
  },
577
575
  "prompt": {
578
576
  "type": "textarea",
579
- "label": "Prompt",
580
- "description": "Prompt for feedback evaluation",
577
+ "label": trans("agent.option.prompt"),
578
+ "description": trans("agent.option.prompt.feedback.desc"),
581
579
  "default": self.PROMPT_FEEDBACK,
582
580
  },
583
581
  "allow_local_tools": {
584
582
  "type": "bool",
585
- "label": "Allow local tools",
586
- "description": "Allow usage of local tools for this agent",
583
+ "label": trans("agent.option.tools.local"),
584
+ "description": trans("agent.option.tools.local.desc"),
587
585
  "default": False,
588
586
  },
589
587
  "allow_remote_tools": {
590
588
  "type": "bool",
591
- "label": "Allow remote tools",
592
- "description": "Allow usage of remote tools for this agent",
589
+ "label": trans("agent.option.tools.remote"),
590
+ "description": trans("agent.option.tools.remote.desc"),
593
591
  "default": False,
594
592
  },
595
593
  }