pygpt-net 2.6.23__py3-none-any.whl → 2.6.25__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. pygpt_net/CHANGELOG.txt +14 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/controller/chat/response.py +6 -5
  4. pygpt_net/controller/config/placeholder.py +3 -1
  5. pygpt_net/controller/model/importer.py +28 -5
  6. pygpt_net/core/agents/runners/loop.py +36 -3
  7. pygpt_net/core/attachments/context.py +4 -4
  8. pygpt_net/core/idx/chat.py +1 -1
  9. pygpt_net/core/idx/indexing.py +3 -3
  10. pygpt_net/core/idx/llm.py +61 -2
  11. pygpt_net/data/config/config.json +41 -4
  12. pygpt_net/data/config/models.json +3 -3
  13. pygpt_net/data/config/settings.json +56 -1
  14. pygpt_net/data/locale/locale.de.ini +46 -0
  15. pygpt_net/data/locale/locale.en.ini +53 -1
  16. pygpt_net/data/locale/locale.es.ini +46 -0
  17. pygpt_net/data/locale/locale.fr.ini +46 -0
  18. pygpt_net/data/locale/locale.it.ini +46 -0
  19. pygpt_net/data/locale/locale.pl.ini +47 -1
  20. pygpt_net/data/locale/locale.uk.ini +46 -0
  21. pygpt_net/data/locale/locale.zh.ini +46 -0
  22. pygpt_net/provider/agents/llama_index/codeact_workflow.py +8 -7
  23. pygpt_net/provider/agents/llama_index/planner_workflow.py +11 -10
  24. pygpt_net/provider/agents/llama_index/supervisor_workflow.py +9 -8
  25. pygpt_net/provider/agents/openai/agent_b2b.py +30 -17
  26. pygpt_net/provider/agents/openai/agent_planner.py +29 -29
  27. pygpt_net/provider/agents/openai/agent_with_experts_feedback.py +21 -23
  28. pygpt_net/provider/agents/openai/agent_with_feedback.py +21 -23
  29. pygpt_net/provider/agents/openai/bot_researcher.py +25 -30
  30. pygpt_net/provider/agents/openai/evolve.py +37 -39
  31. pygpt_net/provider/agents/openai/supervisor.py +16 -18
  32. pygpt_net/provider/core/config/patch.py +45 -1
  33. pygpt_net/provider/llms/anthropic.py +38 -7
  34. pygpt_net/provider/llms/azure_openai.py +9 -4
  35. pygpt_net/provider/llms/deepseek_api.py +36 -3
  36. pygpt_net/provider/llms/google.py +9 -3
  37. pygpt_net/provider/llms/hugging_face_api.py +9 -3
  38. pygpt_net/provider/llms/hugging_face_router.py +17 -3
  39. pygpt_net/provider/llms/llama_index/x_ai/__init__.py +0 -0
  40. pygpt_net/provider/llms/llama_index/x_ai/embedding.py +71 -0
  41. pygpt_net/provider/llms/local.py +25 -1
  42. pygpt_net/provider/llms/mistral.py +29 -1
  43. pygpt_net/provider/llms/ollama.py +3 -1
  44. pygpt_net/provider/llms/openai.py +7 -2
  45. pygpt_net/provider/llms/x_ai.py +19 -3
  46. pygpt_net/ui/widget/textarea/input.py +3 -3
  47. {pygpt_net-2.6.23.dist-info → pygpt_net-2.6.25.dist-info}/METADATA +54 -28
  48. {pygpt_net-2.6.23.dist-info → pygpt_net-2.6.25.dist-info}/RECORD +51 -49
  49. {pygpt_net-2.6.23.dist-info → pygpt_net-2.6.25.dist-info}/LICENSE +0 -0
  50. {pygpt_net-2.6.23.dist-info → pygpt_net-2.6.25.dist-info}/WHEEL +0 -0
  51. {pygpt_net-2.6.23.dist-info → pygpt_net-2.6.25.dist-info}/entry_points.txt +0 -0
pygpt_net/CHANGELOG.txt CHANGED
@@ -1,3 +1,17 @@
1
+ 2.6.25 (2025-08-26)
2
+
3
+ - Fixed the empty agent ID issue in OpenAI Agents evaluation.
4
+ - Added the ability to select a custom model for evaluation.
5
+ - Added embedding providers: Anthropic, Deepseek, MistralAI, xAI, VoyageAI.
6
+
7
+ 2.6.24 (2025-08-26)
8
+
9
+ - Added a new option: LlamaIndex -> Embeddings -> Default embedding providers for attachments.
10
+ - The same model provider is now used for both embedding and RAG query in attachment indexing.
11
+ - Translations have been added to Agents.
12
+ - Fixed fetching Anthropic models list.
13
+ - Added Google GenAI Embeddings.
14
+
1
15
  2.6.23 (2025-08-25)
2
16
 
3
17
  - Added an inline "Add a new chat" button to the right of the tabs.
pygpt_net/__init__.py CHANGED
@@ -6,15 +6,15 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.25 00:00:00 #
9
+ # Updated Date: 2025.08.26 00:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  __author__ = "Marcin Szczygliński"
13
13
  __copyright__ = "Copyright 2025, Marcin Szczygliński"
14
14
  __credits__ = ["Marcin Szczygliński"]
15
15
  __license__ = "MIT"
16
- __version__ = "2.6.23"
17
- __build__ = "2025-08-25"
16
+ __version__ = "2.6.25"
17
+ __build__ = "2025-08-26"
18
18
  __maintainer__ = "Marcin Szczygliński"
19
19
  __github__ = "https://github.com/szczyglis-dev/py-gpt"
20
20
  __report__ = "https://github.com/szczyglis-dev/py-gpt/issues"
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.24 02:00:00 #
9
+ # Updated Date: 2025.08.26 01:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Dict, Any
@@ -264,15 +264,16 @@ class Response:
264
264
  if global_mode not in self.AGENT_MODES_ALLOWED:
265
265
  return # no agent mode, nothing to do
266
266
 
267
+ # agent evaluation finish
268
+ if ctx.extra is not None and (isinstance(ctx.extra, dict) and "agent_eval_finish" in ctx.extra):
269
+ controller.agent.llama.on_end(ctx)
270
+ return
271
+
267
272
  # not agent final response
268
273
  if ctx.extra is None or (isinstance(ctx.extra, dict) and "agent_finish" not in ctx.extra):
269
274
  self.window.update_status(trans("status.agent.reasoning"))
270
275
  controller.chat.common.lock_input() # lock input, re-enable stop button
271
276
 
272
- if ctx.extra is not None and (isinstance(ctx.extra, dict) and "agent_eval_finish" in ctx.extra):
273
- controller.agent.llama.on_end(ctx)
274
- return
275
-
276
277
  # agent final response
277
278
  if ctx.extra is not None and (isinstance(ctx.extra, dict) and "agent_finish" in ctx.extra):
278
279
  controller.agent.llama.on_finish(ctx) # evaluate response and continue if needed
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.15 23:00:00 #
9
+ # Updated Date: 2025.08.26 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Dict, Any, List
@@ -337,6 +337,8 @@ class Placeholder:
337
337
  items[mid] = model.name
338
338
 
339
339
  data: List[Dict[str, str]] = []
340
+ if "allow_empty" in params and params["allow_empty"] is True:
341
+ data.append({'_': '---'})
340
342
  providers = self.window.core.llm.get_choices()
341
343
  if not providers:
342
344
  for mid, name in sorted(items.items(), key=lambda kv: kv[1].lower()):
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.24 23:00:00 #
9
+ # Updated Date: 2025.08.26 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import copy
@@ -391,7 +391,7 @@ class Importer:
391
391
  'type': 'str'
392
392
  }
393
393
  ]
394
- elif self.provider == "openai":
394
+ elif self.provider in ["openai", "azure_openai"]:
395
395
  m.tool_calls = True
396
396
  m.llama_index['env'] = [
397
397
  {
@@ -429,12 +429,35 @@ class Importer:
429
429
  'type': 'str'
430
430
  }
431
431
  ]
432
+ elif self.provider == "mistral_ai":
433
+ m.tool_calls = True
434
+ m.llama_index['args'] = [
435
+ {
436
+ 'name': 'api_key',
437
+ 'value': '{api_key_mistral}',
438
+ 'type': 'str'
439
+ }
440
+ ]
441
+ elif self.provider == "local_ai":
442
+ m.tool_calls = True
443
+ m.llama_index['env'] = [
444
+ {
445
+ 'name': 'OPENAI_API_KEY',
446
+ 'value': '{api_key}',
447
+ 'type': 'str'
448
+ },
449
+ {
450
+ 'name': 'OPENAI_API_BASE',
451
+ 'value': '{api_endpoint}',
452
+ 'type': 'str'
453
+ }
454
+ ]
432
455
  elif self.provider == "x_ai":
433
456
  m.tool_calls = True
434
457
  m.llama_index['env'] = [
435
458
  {
436
459
  'name': 'OPENAI_API_KEY',
437
- 'value': '{api_key_xai',
460
+ 'value': '{api_key_xai}',
438
461
  'type': 'str'
439
462
  },
440
463
  {
@@ -587,9 +610,9 @@ class Importer:
587
610
  :return: Dict with keys and values
588
611
  """
589
612
  excluded = [
590
- "azure_openai",
613
+ #"azure_openai",
591
614
  "huggingface_api",
592
- "mistral_ai",
615
+ #"mistral_ai",
593
616
  "local_ai",
594
617
  "perplexity",
595
618
  ]
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.24 02:00:00 #
9
+ # Updated Date: 2025.08.26 19:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Optional, List
@@ -92,9 +92,11 @@ class Loop(BaseRunner):
92
92
 
93
93
  ctx = context.ctx
94
94
  self.send_response(ctx, signals, KernelEvent.APPEND_BEGIN) # lock input, show stop btn
95
+
95
96
  history = context.history
96
97
  tools = self.window.core.agents.observer.evaluation.get_tools()
97
98
  mode = self.window.core.config.get('agent.llama.loop.mode', "score")
99
+
98
100
  prompt = ""
99
101
  if mode == "score":
100
102
  prompt = self.window.core.agents.observer.evaluation.get_prompt_score(history)
@@ -106,8 +108,18 @@ class Loop(BaseRunner):
106
108
  self.next_instruction = "" # reset
107
109
  self.prev_score = -1 # reset
108
110
 
111
+ # select evaluation model
112
+ eval_model = ctx.model
113
+ custom_model = self.window.core.config.get('agent.llama.eval_model', None)
114
+ if custom_model and custom_model != "_":
115
+ eval_model = custom_model
116
+
117
+ if self.is_verbose():
118
+ print("[Evaluation] Prompt:", prompt)
119
+ print("[Evaluation] Running with model:", eval_model)
120
+
109
121
  # run agent once
110
- self.run_once(prompt, tools, ctx.model) # tool will update evaluation
122
+ self.run_once(prompt, tools, eval_model) # tool will update evaluation
111
123
  return self.handle_evaluation(ctx, self.next_instruction, self.prev_score, signals)
112
124
 
113
125
  def handle_evaluation(
@@ -134,11 +146,17 @@ class Loop(BaseRunner):
134
146
  score=str(score)
135
147
  )
136
148
  self.set_status(signals, msg)
149
+
150
+ if self.is_verbose():
151
+ print("[Evaluation] Score:", score)
152
+
137
153
  if score < 0:
138
154
  self.send_response(ctx, signals, KernelEvent.APPEND_END)
139
155
  self.set_idle(signals)
140
156
  return True
141
157
  good_score = self.window.core.config.get("agent.llama.loop.score", 75)
158
+ if self.is_verbose():
159
+ print("[Evaluation] Score needed:", good_score)
142
160
  if score >= good_score != 0:
143
161
  msg = "{status_finished} {score_label}: {score}%".format(
144
162
  status_finished=trans('status.finished'),
@@ -146,6 +164,8 @@ class Loop(BaseRunner):
146
164
  score=str(score)
147
165
  )
148
166
  ctx.extra["agent_eval_finish"] = True
167
+ if self.is_verbose():
168
+ print("[Evaluation] Stopping. Finish with score:", score)
149
169
  self.send_response(ctx, signals, KernelEvent.APPEND_END, msg=msg)
150
170
  self.set_idle(signals)
151
171
  return True
@@ -182,5 +202,18 @@ class Loop(BaseRunner):
182
202
  "agent_idx": preset.idx,
183
203
  "agent_provider": preset.agent_provider,
184
204
  }
205
+ if preset.agent_openai:
206
+ extra["agent_provider"] = preset.agent_provider_openai
207
+ if self.is_verbose():
208
+ print("[Evaluation] Instruction:", instruction)
209
+ print("[Evaluation] Running next step...")
185
210
  context.model = self.window.core.models.get(self.window.core.config.get('model'))
186
- return self.window.core.agents.runner.call(context, extra, signals)
211
+ return self.window.core.agents.runner.call(context, extra, signals)
212
+
213
+ def is_verbose(self) -> bool:
214
+ """
215
+ Check if verbose mode is enabled
216
+
217
+ :return: True if verbose mode is enabled
218
+ """
219
+ return self.window.core.config.get("agent.llama.verbose", False)
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.07.13 01:00:00 #
9
+ # Updated Date: 2025.08.26 01:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import copy
@@ -496,12 +496,12 @@ class Context:
496
496
  :param documents: list of documents (optional)
497
497
  :return: list of doc IDs
498
498
  """
499
- model = None
499
+ model, model_item = self.get_selected_model("query")
500
500
  doc_ids = []
501
501
  if type == AttachmentItem.TYPE_FILE:
502
- doc_ids = self.window.core.idx.indexing.index_attachment(source, idx_path, model, documents)
502
+ doc_ids = self.window.core.idx.indexing.index_attachment(source, idx_path, model_item, documents)
503
503
  elif type == AttachmentItem.TYPE_URL:
504
- doc_ids = self.window.core.idx.indexing.index_attachment_web(source, idx_path, model, documents)
504
+ doc_ids = self.window.core.idx.indexing.index_attachment_web(source, idx_path, model_item, documents)
505
505
  if self.is_verbose():
506
506
  print("Attachments: indexed. Doc IDs: {}".format(doc_ids))
507
507
  return doc_ids
@@ -656,7 +656,7 @@ class Chat:
656
656
  """
657
657
  if model is None:
658
658
  model = self.window.core.models.from_defaults()
659
- llm, embed_model = self.window.core.idx.llm.get_service_context(model=model, stream=False)
659
+ llm, embed_model = self.window.core.idx.llm.get_service_context(model=model, stream=False, auto_embed=True)
660
660
  index = self.storage.get_ctx_idx(path, llm, embed_model)
661
661
 
662
662
  # 1. try to retrieve directly from index
@@ -1070,7 +1070,7 @@ class Indexing:
1070
1070
  if model is None:
1071
1071
  model = self.window.core.models.from_defaults()
1072
1072
 
1073
- llm, embed_model = self.window.core.idx.llm.get_service_context(model=model, stream=False)
1073
+ llm, embed_model = self.window.core.idx.llm.get_service_context(model=model, stream=False, auto_embed=True)
1074
1074
  index = self.window.core.idx.storage.get_ctx_idx(
1075
1075
  index_path,
1076
1076
  llm=llm,
@@ -1078,7 +1078,7 @@ class Indexing:
1078
1078
  ) # get or create ctx index
1079
1079
 
1080
1080
  idx = f"tmp:{index_path}" # tmp index id
1081
- self.window.core.idx.log(f"Indexing to context attachment index: {idx}...")
1081
+ self.window.core.idx.log(f"Indexing to context attachment index: {idx}... using model: {model.id}")
1082
1082
 
1083
1083
  doc_ids = []
1084
1084
  if documents is None:
@@ -1112,7 +1112,7 @@ class Indexing:
1112
1112
  if model is None:
1113
1113
  model = self.window.core.models.from_defaults()
1114
1114
 
1115
- llm, embed_model = self.window.core.idx.llm.get_service_context(model=model, stream=False)
1115
+ llm, embed_model = self.window.core.idx.llm.get_service_context(model=model, stream=False, auto_embed=True)
1116
1116
  index = self.window.core.idx.storage.get_ctx_idx(index_path, llm, embed_model) # get or create ctx index
1117
1117
 
1118
1118
  idx = f"tmp:{index_path}" # tmp index id
pygpt_net/core/idx/llm.py CHANGED
@@ -19,7 +19,7 @@ from llama_index.llms.openai import OpenAI
19
19
 
20
20
  from pygpt_net.core.types import (
21
21
  MODE_LLAMA_INDEX,
22
- MODEL_DEFAULT_MINI,
22
+ MODEL_DEFAULT_MINI, MODE_CHAT,
23
23
  )
24
24
  from pygpt_net.item.model import ModelItem
25
25
 
@@ -128,14 +128,73 @@ class Llm:
128
128
  self,
129
129
  model: Optional[ModelItem] = None,
130
130
  stream: bool = False,
131
+ auto_embed: bool = False,
131
132
  ):
132
133
  """
133
134
  Get service context + embeddings provider
134
135
 
135
136
  :param model: Model item (for query)
136
137
  :param stream: Stream mode (True to enable streaming)
138
+ :param auto_embed: Auto-detect embeddings provider based on model capabilities
137
139
  :return: Service context instance
138
140
  """
139
141
  llm = self.get(model=model, stream=stream)
140
- embed_model = self.get_embeddings_provider()
142
+ if not auto_embed:
143
+ embed_model = self.get_embeddings_provider()
144
+ else:
145
+ embed_model = self.get_custom_embed_provider(model=model)
141
146
  return llm, embed_model
147
+
148
+
149
+ def get_custom_embed_provider(self, model: Optional[ModelItem] = None) -> Optional[BaseEmbedding]:
150
+ """
151
+ Get custom embeddings provider based on model
152
+
153
+ :param model: Model item
154
+ :return: Embeddings provider instance or None
155
+ """
156
+ # base_embedding_provider = self.window.core.config.get("llama.idx.embeddings.provider", self.default_embed)
157
+ # if base_embedding_provider == model.provider:
158
+ # return self.get_embeddings_provider()
159
+
160
+ embed_model = None
161
+ args = []
162
+
163
+ # try to get custom args from config for the model provider
164
+ is_custom_provider = False
165
+ default = self.window.core.config.get("llama.idx.embeddings.default", [])
166
+ for item in default:
167
+ provider = item.get("provider", "")
168
+ if provider and provider == model.provider:
169
+ is_custom_provider = True
170
+ m = ModelItem()
171
+ m.provider = model.provider
172
+ client_args = self.window.core.models.prepare_client_args(MODE_CHAT, m)
173
+ model_name = item.get("model", "")
174
+ if not model_name:
175
+ model_name = model.id # fallback to model id if not set in config (Ollama, etc)
176
+ args = [
177
+ {
178
+ "name": "model_name",
179
+ "type": "str",
180
+ "value": model_name,
181
+ }
182
+ ]
183
+ if model.provider != "ollama":
184
+ args.append(
185
+ {
186
+ "name": "api_key",
187
+ "type": "str",
188
+ "value": client_args.get("api_key", ""),
189
+ }
190
+ )
191
+ break
192
+
193
+ if is_custom_provider:
194
+ embed_model = self.window.core.llm.llms[model.provider].get_embeddings_model(
195
+ window=self.window,
196
+ config=args,
197
+ )
198
+ if not embed_model:
199
+ embed_model = self.get_embeddings_provider()
200
+ return embed_model
@@ -1,8 +1,8 @@
1
1
  {
2
2
  "__meta__": {
3
- "version": "2.6.23",
4
- "app.version": "2.6.23",
5
- "updated_at": "2025-08-25T00:00:00"
3
+ "version": "2.6.25",
4
+ "app.version": "2.6.25",
5
+ "updated_at": "2025-08-26T00:00:00"
6
6
  },
7
7
  "access.audio.event.speech": false,
8
8
  "access.audio.event.speech.disabled": [],
@@ -52,6 +52,8 @@
52
52
  "agent.idx": "base",
53
53
  "agent.idx.auto_retrieve": false,
54
54
  "agent.iterations": 3,
55
+ "agent.llama.append_eval": false,
56
+ "agent.llama.eval_model": "_",
55
57
  "agent.llama.idx": "base",
56
58
  "agent.llama.loop.enabled": false,
57
59
  "agent.llama.loop.score": 75,
@@ -83,6 +85,7 @@
83
85
  "api_key_perplexity": "",
84
86
  "api_key_xai": "",
85
87
  "api_key_mistral": "",
88
+ "api_key_voyage": "",
86
89
  "api_proxy": "",
87
90
  "api_use_responses": true,
88
91
  "api_use_responses_llama": false,
@@ -238,7 +241,7 @@
238
241
  "llama.idx.embeddings.provider": "openai",
239
242
  "llama.idx.embeddings.args": [
240
243
  {
241
- "name": "model",
244
+ "name": "model_name",
242
245
  "value": "text-embedding-3-small",
243
246
  "type": "str"
244
247
  }
@@ -253,6 +256,40 @@
253
256
  "value": "{api_endpoint}"
254
257
  }
255
258
  ],
259
+ "llama.idx.embeddings.default": [
260
+ {
261
+ "provider": "anthropic",
262
+ "model": "voyage-3.5"
263
+ },
264
+ {
265
+ "provider": "deepseek_api",
266
+ "model": "voyage-3.5"
267
+ },
268
+ {
269
+ "provider": "google",
270
+ "model": "gemini-embedding-001"
271
+ },
272
+ {
273
+ "provider": "openai",
274
+ "model": "text-embedding-3-small"
275
+ },
276
+ {
277
+ "provider": "azure_openai",
278
+ "model": "text-embedding-3-small"
279
+ },
280
+ {
281
+ "provider": "mistral_ai",
282
+ "model": "mistral-embed"
283
+ },
284
+ {
285
+ "provider": "ollama",
286
+ "model": ""
287
+ },
288
+ {
289
+ "provider": "x_ai",
290
+ "model": ""
291
+ }
292
+ ],
256
293
  "llama.idx.embeddings.limit.rpm": 100,
257
294
  "llama.idx.excluded.ext": "3g2,3gp,7z,a,aac,aiff,alac,apk,apk,apng,app,ar,avif,bin,cab,class,deb,deb,dll,dmg,dmg,drv,dsd,dylib,dylib,ear,egg,elf,esd,exe,flac,flv,heic,heif,ico,img,iso,jar,ko,lib,lz,lz4,m2v,mpc,msi,nrg,o,ogg,ogv,pcm,pkg,pkg,psd,pyc,rar,rpm,rpm,so,so,svg,swm,sys,vdi,vhd,vhdx,vmdk,vob,war,whl,wim,wma,wmv,xz,zst",
258
295
  "llama.idx.excluded.force": false,
@@ -1,8 +1,8 @@
1
1
  {
2
2
  "__meta__": {
3
- "version": "2.6.23",
4
- "app.version": "2.6.23",
5
- "updated_at": "2025-08-25T23:07:35"
3
+ "version": "2.6.25",
4
+ "app.version": "2.6.25",
5
+ "updated_at": "2025-08-26T23:07:35"
6
6
  },
7
7
  "items": {
8
8
  "SpeakLeash/bielik-11b-v2.3-instruct:Q4_K_M": {
@@ -369,6 +369,25 @@
369
369
  "advanced": false,
370
370
  "tab": "Mistral AI"
371
371
  },
372
+ "api_key_voyage": {
373
+ "section": "api_keys",
374
+ "type": "text",
375
+ "slider": false,
376
+ "label": "settings.api_key.voyage",
377
+ "description": "settings.api_key.voyage.desc",
378
+ "value": "",
379
+ "min": null,
380
+ "max": null,
381
+ "multiplier": null,
382
+ "step": null,
383
+ "extra": {
384
+ "bold": true
385
+ },
386
+ "secret": true,
387
+ "persist": true,
388
+ "advanced": false,
389
+ "tab": "Voyage"
390
+ },
372
391
  "app.env": {
373
392
  "section": "general",
374
393
  "type": "dict",
@@ -1697,6 +1716,24 @@
1697
1716
  "advanced": false,
1698
1717
  "tab": "embeddings"
1699
1718
  },
1719
+ "llama.idx.embeddings.default": {
1720
+ "section": "llama-index",
1721
+ "type": "dict",
1722
+ "keys": {
1723
+ "provider": {
1724
+ "type": "combo",
1725
+ "use": "embeddings_providers"
1726
+ },
1727
+ "model": {
1728
+ "type": "str"
1729
+ }
1730
+ },
1731
+ "label": "settings.llama.idx.embeddings.default",
1732
+ "description": "settings.llama.idx.embeddings.default.desc",
1733
+ "value": [],
1734
+ "advanced": false,
1735
+ "tab": "embeddings"
1736
+ },
1700
1737
  "llama.idx.recursive": {
1701
1738
  "section": "llama-index",
1702
1739
  "type": "bool",
@@ -1917,6 +1954,24 @@
1917
1954
  "advanced": false,
1918
1955
  "tab": "llama"
1919
1956
  },
1957
+ "agent.llama.eval_model": {
1958
+ "section": "agent",
1959
+ "description": "settings.agent.llama.eval_model.desc",
1960
+ "type": "combo",
1961
+ "use": "models",
1962
+ "use_params": {
1963
+ "allow_empty": true
1964
+ },
1965
+ "slider": true,
1966
+ "label": "settings.agent.llama.eval_model",
1967
+ "value": null,
1968
+ "min": 0,
1969
+ "max": 100,
1970
+ "multiplier": 1,
1971
+ "step": 1,
1972
+ "advanced": false,
1973
+ "tab": "llama"
1974
+ },
1920
1975
  "agent.llama.append_eval": {
1921
1976
  "section": "agent",
1922
1977
  "description": "settings.agent.llama.append_eval.desc",
@@ -1930,7 +1985,7 @@
1930
1985
  "step": 1,
1931
1986
  "advanced": false,
1932
1987
  "tab": "llama"
1933
- },
1988
+ },
1934
1989
  "agent.openai.response.split": {
1935
1990
  "section": "agent",
1936
1991
  "description": "settings.agent.openai.response.split.desc",
@@ -72,7 +72,53 @@ action.use.read_cmd = Bitten, diese Datei zu lesen...
72
72
  action.video.open = Video oder Audio öffnen...
73
73
  action.video.play = Video oder Audio abspielen...
74
74
  action.video.transcribe = Ton transkribieren...
75
+ agent.coder.additional.label = Zusätzlicher Prompt
76
+ agent.coder.additional.prompt.desc = Zusätzlicher Prompt für Agent (wird zum Basis-Prompt hinzugefügt)
77
+ agent.coder.base.label = Basis-Prompt
78
+ agent.coder.base.prompt.desc = Code ausführen prompt (initial)
79
+ agent.eval.feedback = Feedback
80
+ agent.eval.next = Neu ausführen mit Feedback
81
+ agent.eval.score = Bewertungsnote
82
+ agent.eval.score.good = Antwort ist gut genug, beenden.
83
+ agent.evolve.generation = Generation
84
+ agent.evolve.maxgen_limit = Maximale Generationen erreicht, beenden.
85
+ agent.evolve.option.max_generations = Maximale Generationen
86
+ agent.evolve.option.num_parents = Anzahl der Eltern
87
+ agent.evolve.running = Laufender Agent
88
+ agent.evolve.winner = Gewinner: Agent
75
89
  agent.infinity.confirm.content = WARNUNG: Sie versuchen, eine unendliche Schleife zu starten! Dies kann zu einem hohen Tokenverbrauch führen. Sind Sie sicher, dass Sie fortfahren möchten?
90
+ agent.name.supervisor = Supervisor
91
+ agent.name.worker = Arbeiter
92
+ agent.option.model = Modell
93
+ agent.option.name = Name
94
+ agent.option.prompt = Prompt
95
+ agent.option.prompt.b1.desc = Prompt für Bot 1
96
+ agent.option.prompt.b2.desc = Prompt für Bot 2
97
+ agent.option.prompt.base.desc = Prompt für Basis-Agent
98
+ agent.option.prompt.chooser.desc = Prompt für Wähler-Agent
99
+ agent.option.prompt.feedback.desc = Prompt zur Feedback-Evaluierung
100
+ agent.option.prompt.planner.desc = Prompt für Planer-Agent
101
+ agent.option.prompt.search.desc = Prompt für Such-Agent
102
+ agent.option.prompt.supervisor.desc = Prompt für Supervisor
103
+ agent.option.prompt.worker.desc = Prompt für Arbeiter
104
+ agent.option.section.base = Basisagent
105
+ agent.option.section.chooser = Wähler
106
+ agent.option.section.feedback = Feedback
107
+ agent.option.section.planner = Planer
108
+ agent.option.section.search = Suche
109
+ agent.option.section.supervisor = Supervisor
110
+ agent.option.section.worker = Arbeiter
111
+ agent.option.section.writer = Schreiber
112
+ agent.option.tools.local = Lokale Werkzeuge zulassen
113
+ agent.option.tools.local.desc = Nutzung lokaler Werkzeuge für diesen Agenten zulassen
114
+ agent.option.tools.remote = Entfernte Werkzeuge zulassen
115
+ agent.option.tools.remote.desc = Nutzung entfernter Werkzeuge für diesen Agenten zulassen
116
+ agent.planner.plan.label = Planer (initial)
117
+ agent.planner.plan.prompt.desc = Initialer Plan-Prompt
118
+ agent.planner.refine.label = Planer (verfeinern)
119
+ agent.planner.refine.prompt.desc = Plan verfeinern prompt
120
+ agent.planner.step.label = Prompt ausführen
121
+ agent.planner.step.prompt.desc = Schritte ausführen prompt
76
122
  alert.preset.empty_id = Name ist erforderlich.
77
123
  alert.preset.no_chat_completion = Mindestens eine der Optionen: Chat, Vervollständigung, Bild oder Vision wird benötigt!
78
124
  alert.snap.file_manager = Snap erkannt. Bitte das Verzeichnis manuell in Ihrem Dateimanager öffnen: