pygpt-net 2.6.28__py3-none-any.whl → 2.6.30__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (115) hide show
  1. pygpt_net/CHANGELOG.txt +13 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/{container.py → app_core.py} +5 -6
  4. pygpt_net/controller/access/control.py +1 -9
  5. pygpt_net/controller/assistant/assistant.py +4 -4
  6. pygpt_net/controller/assistant/batch.py +7 -7
  7. pygpt_net/controller/assistant/files.py +4 -4
  8. pygpt_net/controller/assistant/threads.py +3 -3
  9. pygpt_net/controller/attachment/attachment.py +4 -7
  10. pygpt_net/controller/chat/common.py +1 -1
  11. pygpt_net/controller/chat/stream.py +961 -294
  12. pygpt_net/controller/chat/vision.py +11 -19
  13. pygpt_net/controller/config/placeholder.py +1 -1
  14. pygpt_net/controller/ctx/ctx.py +1 -1
  15. pygpt_net/controller/ctx/summarizer.py +1 -1
  16. pygpt_net/controller/mode/mode.py +21 -12
  17. pygpt_net/controller/plugins/settings.py +3 -2
  18. pygpt_net/controller/presets/editor.py +112 -99
  19. pygpt_net/controller/theme/common.py +2 -0
  20. pygpt_net/controller/theme/theme.py +6 -2
  21. pygpt_net/controller/ui/vision.py +4 -4
  22. pygpt_net/core/agents/legacy.py +2 -2
  23. pygpt_net/core/agents/runners/openai_workflow.py +2 -2
  24. pygpt_net/core/assistants/files.py +5 -5
  25. pygpt_net/core/assistants/store.py +4 -4
  26. pygpt_net/core/bridge/bridge.py +3 -3
  27. pygpt_net/core/bridge/worker.py +28 -9
  28. pygpt_net/core/debug/console/console.py +2 -2
  29. pygpt_net/core/debug/presets.py +2 -2
  30. pygpt_net/core/experts/experts.py +2 -2
  31. pygpt_net/core/idx/llm.py +21 -3
  32. pygpt_net/core/modes/modes.py +2 -2
  33. pygpt_net/core/presets/presets.py +3 -3
  34. pygpt_net/core/tokens/tokens.py +4 -4
  35. pygpt_net/core/types/mode.py +5 -2
  36. pygpt_net/core/vision/analyzer.py +1 -1
  37. pygpt_net/data/config/config.json +6 -3
  38. pygpt_net/data/config/models.json +75 -3
  39. pygpt_net/data/config/modes.json +3 -9
  40. pygpt_net/data/config/settings.json +112 -55
  41. pygpt_net/data/config/settings_section.json +2 -2
  42. pygpt_net/data/locale/locale.de.ini +2 -2
  43. pygpt_net/data/locale/locale.en.ini +9 -2
  44. pygpt_net/data/locale/locale.es.ini +2 -2
  45. pygpt_net/data/locale/locale.fr.ini +2 -2
  46. pygpt_net/data/locale/locale.it.ini +2 -2
  47. pygpt_net/data/locale/locale.pl.ini +3 -3
  48. pygpt_net/data/locale/locale.uk.ini +2 -2
  49. pygpt_net/data/locale/locale.zh.ini +2 -2
  50. pygpt_net/item/model.py +23 -3
  51. pygpt_net/plugin/openai_dalle/plugin.py +4 -4
  52. pygpt_net/plugin/openai_vision/plugin.py +12 -13
  53. pygpt_net/provider/agents/openai/agent.py +5 -5
  54. pygpt_net/provider/agents/openai/agent_b2b.py +5 -5
  55. pygpt_net/provider/agents/openai/agent_planner.py +5 -6
  56. pygpt_net/provider/agents/openai/agent_with_experts.py +5 -5
  57. pygpt_net/provider/agents/openai/agent_with_experts_feedback.py +4 -4
  58. pygpt_net/provider/agents/openai/agent_with_feedback.py +4 -4
  59. pygpt_net/provider/agents/openai/bot_researcher.py +2 -2
  60. pygpt_net/provider/agents/openai/bots/research_bot/agents/planner_agent.py +1 -1
  61. pygpt_net/provider/agents/openai/bots/research_bot/agents/search_agent.py +1 -1
  62. pygpt_net/provider/agents/openai/bots/research_bot/agents/writer_agent.py +1 -1
  63. pygpt_net/provider/agents/openai/evolve.py +5 -5
  64. pygpt_net/provider/agents/openai/supervisor.py +4 -4
  65. pygpt_net/provider/api/__init__.py +27 -0
  66. pygpt_net/provider/api/anthropic/__init__.py +68 -0
  67. pygpt_net/provider/api/google/__init__.py +262 -0
  68. pygpt_net/provider/api/google/audio.py +114 -0
  69. pygpt_net/provider/api/google/chat.py +552 -0
  70. pygpt_net/provider/api/google/image.py +287 -0
  71. pygpt_net/provider/api/google/tools.py +222 -0
  72. pygpt_net/provider/api/google/vision.py +129 -0
  73. pygpt_net/provider/{gpt → api/openai}/__init__.py +2 -2
  74. pygpt_net/provider/{gpt → api/openai}/agents/computer.py +1 -1
  75. pygpt_net/provider/{gpt → api/openai}/agents/experts.py +1 -1
  76. pygpt_net/provider/{gpt → api/openai}/agents/response.py +1 -1
  77. pygpt_net/provider/{gpt → api/openai}/assistants.py +1 -1
  78. pygpt_net/provider/{gpt → api/openai}/chat.py +15 -8
  79. pygpt_net/provider/{gpt → api/openai}/completion.py +1 -1
  80. pygpt_net/provider/{gpt → api/openai}/image.py +1 -1
  81. pygpt_net/provider/{gpt → api/openai}/remote_tools.py +1 -1
  82. pygpt_net/provider/{gpt → api/openai}/responses.py +34 -20
  83. pygpt_net/provider/{gpt → api/openai}/store.py +2 -2
  84. pygpt_net/provider/{gpt → api/openai}/vision.py +1 -1
  85. pygpt_net/provider/{gpt → api/openai}/worker/assistants.py +4 -4
  86. pygpt_net/provider/{gpt → api/openai}/worker/importer.py +10 -10
  87. pygpt_net/provider/audio_input/openai_whisper.py +1 -1
  88. pygpt_net/provider/audio_output/google_tts.py +12 -0
  89. pygpt_net/provider/audio_output/openai_tts.py +1 -1
  90. pygpt_net/provider/core/config/patch.py +11 -0
  91. pygpt_net/provider/core/model/patch.py +9 -0
  92. pygpt_net/provider/core/preset/json_file.py +2 -4
  93. pygpt_net/provider/llms/anthropic.py +2 -5
  94. pygpt_net/provider/llms/base.py +4 -3
  95. pygpt_net/provider/llms/openai.py +1 -1
  96. pygpt_net/provider/loaders/hub/image_vision/base.py +1 -1
  97. pygpt_net/ui/dialog/preset.py +71 -55
  98. pygpt_net/ui/main.py +6 -4
  99. pygpt_net/utils.py +9 -0
  100. {pygpt_net-2.6.28.dist-info → pygpt_net-2.6.30.dist-info}/METADATA +42 -48
  101. {pygpt_net-2.6.28.dist-info → pygpt_net-2.6.30.dist-info}/RECORD +115 -107
  102. /pygpt_net/provider/{gpt → api/openai}/agents/__init__.py +0 -0
  103. /pygpt_net/provider/{gpt → api/openai}/agents/client.py +0 -0
  104. /pygpt_net/provider/{gpt → api/openai}/agents/remote_tools.py +0 -0
  105. /pygpt_net/provider/{gpt → api/openai}/agents/utils.py +0 -0
  106. /pygpt_net/provider/{gpt → api/openai}/audio.py +0 -0
  107. /pygpt_net/provider/{gpt → api/openai}/computer.py +0 -0
  108. /pygpt_net/provider/{gpt → api/openai}/container.py +0 -0
  109. /pygpt_net/provider/{gpt → api/openai}/summarizer.py +0 -0
  110. /pygpt_net/provider/{gpt → api/openai}/tools.py +0 -0
  111. /pygpt_net/provider/{gpt → api/openai}/utils.py +0 -0
  112. /pygpt_net/provider/{gpt → api/openai}/worker/__init__.py +0 -0
  113. {pygpt_net-2.6.28.dist-info → pygpt_net-2.6.30.dist-info}/LICENSE +0 -0
  114. {pygpt_net-2.6.28.dist-info → pygpt_net-2.6.30.dist-info}/WHEEL +0 -0
  115. {pygpt_net-2.6.28.dist-info → pygpt_net-2.6.30.dist-info}/entry_points.txt +0 -0
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.02 20:00:00 #
9
+ # Updated Date: 2025.08.28 09:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import json
@@ -16,7 +16,11 @@ from typing import Optional, Dict, Any, List
16
16
  from pygpt_net.core.types import (
17
17
  MODE_CHAT,
18
18
  MODE_VISION,
19
- MODE_AUDIO, MULTIMODAL_IMAGE, MODE_RESEARCH, MODE_COMPLETION, OPENAI_DISABLE_TOOLS,
19
+ MODE_AUDIO,
20
+ MULTIMODAL_IMAGE,
21
+ MODE_RESEARCH,
22
+ MODE_COMPLETION,
23
+ OPENAI_DISABLE_TOOLS,
20
24
  )
21
25
  from pygpt_net.core.bridge.context import BridgeContext, MultimodalContext
22
26
  from pygpt_net.item.ctx import CtxItem
@@ -66,7 +70,7 @@ class Chat:
66
70
  user_name = ctx.input_name # from ctx
67
71
  ai_name = ctx.output_name # from ctx
68
72
 
69
- client = self.window.core.gpt.get_client(mode, context.model)
73
+ client = self.window.core.api.openai.get_client(mode, context.model)
70
74
 
71
75
  # build chat messages
72
76
  messages = self.build(
@@ -94,7 +98,7 @@ class Chat:
94
98
  response_kwargs = {}
95
99
 
96
100
  # tools / functions
97
- tools = self.window.core.gpt.tools.prepare(model, functions)
101
+ tools = self.window.core.api.openai.tools.prepare(model, functions)
98
102
 
99
103
  # fix: o1 compatibility
100
104
  if (model.id is not None
@@ -138,6 +142,9 @@ class Chat:
138
142
  "format": "wav"
139
143
  }
140
144
 
145
+ if stream:
146
+ response_kwargs['stream_options'] = {"include_usage": True}
147
+
141
148
  response = client.chat.completions.create(
142
149
  messages=messages,
143
150
  model=model.id,
@@ -188,7 +195,7 @@ class Chat:
188
195
  max_ctx_tokens = self.window.core.config.get('max_total_tokens') # max context window
189
196
 
190
197
  # fit to max model tokens
191
- if max_ctx_tokens > model.ctx and model.ctx > 0:
198
+ if max_ctx_tokens > model.ctx > 0:
192
199
  max_ctx_tokens = model.ctx
193
200
 
194
201
  # input tokens: reset
@@ -319,12 +326,12 @@ class Chat:
319
326
  if not is_tool_output: # append current prompt only if not tool output
320
327
  content = str(prompt)
321
328
  if model.is_image_input():
322
- content = self.window.core.gpt.vision.build_content(
329
+ content = self.window.core.api.openai.vision.build_content(
323
330
  content=content,
324
331
  attachments=attachments,
325
332
  )
326
333
  if model.is_audio_input():
327
- content = self.window.core.gpt.audio.build_content(
334
+ content = self.window.core.api.openai.audio.build_content(
328
335
  content=content,
329
336
  multimodal_ctx=multimodal_ctx,
330
337
  )
@@ -367,7 +374,7 @@ class Chat:
367
374
  output = response.choices[0].text.strip()
368
375
  elif mode in [
369
376
  MODE_CHAT,
370
- MODE_VISION,
377
+ # MODE_VISION,
371
378
  MODE_RESEARCH,
372
379
  ]:
373
380
  if response.choices[0]:
@@ -76,7 +76,7 @@ class Completion:
76
76
  if user_name is not None and user_name != '':
77
77
  stop = [user_name + ':']
78
78
 
79
- client = self.window.core.gpt.get_client()
79
+ client = self.window.core.api.openai.get_client()
80
80
 
81
81
  # fix for deprecated OpenAI davinci models
82
82
  if model_id.startswith('text-davinci'):
@@ -74,7 +74,7 @@ class Image:
74
74
  # worker
75
75
  self.worker = ImageWorker()
76
76
  self.worker.window = self.window
77
- self.worker.client = self.window.core.gpt.get_client()
77
+ self.worker.client = self.window.core.api.openai.get_client()
78
78
  self.worker.ctx = ctx
79
79
  self.worker.mode = sub_mode # mode can be "generate" or "edit"
80
80
  self.worker.attachments = attachments # attachments for edit mode
@@ -113,7 +113,7 @@ class RemoteTools:
113
113
  # extend local tools with remote tools
114
114
  if enabled["computer_use"]:
115
115
  if not model.id in OPENAI_REMOTE_TOOL_DISABLE_COMPUTER_USE:
116
- tools.append(self.window.core.gpt.computer.get_tool())
116
+ tools.append(self.window.core.api.openai.computer.get_tool())
117
117
  else:
118
118
  if not model.id in OPENAI_REMOTE_TOOL_DISABLE_WEB_SEARCH:
119
119
  if enabled["web_search"]:
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.05 00:00:00 #
9
+ # Updated Date: 2025.08.28 09:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import base64
@@ -92,7 +92,7 @@ class Responses:
92
92
  user_name = ctx.input_name # from ctx
93
93
  ai_name = ctx.output_name # from ctx
94
94
 
95
- client = self.window.core.gpt.get_client(mode, model)
95
+ client = self.window.core.api.openai.get_client(mode, model)
96
96
 
97
97
  # build chat messages
98
98
  messages = self.build(
@@ -122,7 +122,7 @@ class Responses:
122
122
  response_kwargs = {}
123
123
 
124
124
  # tools / functions
125
- tools = self.window.core.gpt.tools.prepare_responses_api(model, functions)
125
+ tools = self.window.core.api.openai.tools.prepare_responses_api(model, functions)
126
126
 
127
127
  # extra arguments, o3 only
128
128
  if model.extra and "reasoning_effort" in model.extra:
@@ -130,7 +130,7 @@ class Responses:
130
130
  response_kwargs['reasoning']['effort'] = model.extra["reasoning_effort"]
131
131
 
132
132
  # append remote tools
133
- tools = self.window.core.gpt.remote_tools.append_to_tools(
133
+ tools = self.window.core.api.openai.remote_tools.append_to_tools(
134
134
  mode=mode,
135
135
  model=model,
136
136
  stream=stream,
@@ -250,20 +250,31 @@ class Responses:
250
250
  used_tokens,
251
251
  max_ctx_tokens,
252
252
  )
253
+
254
+ has_response_id_in_last_item = False
255
+ if items and len(items) > 0:
256
+ last_item = items[-1]
257
+ if last_item and last_item.msg_id:
258
+ has_response_id_in_last_item = True
259
+
253
260
  for item in items:
254
261
  # input
255
262
  if item.final_input is not None and item.final_input != "":
256
- messages.append({
257
- "role": "user",
258
- "content": item.final_input,
259
- })
263
+ if not has_response_id_in_last_item:
264
+ messages.append({
265
+ "role": "user",
266
+ "content": item.final_input,
267
+ })
260
268
 
261
269
  # output
262
270
  if item.final_output is not None and item.final_output != "":
263
- msg = {
264
- "role": "assistant",
265
- "content": item.final_output,
266
- }
271
+ if not has_response_id_in_last_item:
272
+ msg = {
273
+ "role": "assistant",
274
+ "content": item.final_output,
275
+ }
276
+ else:
277
+ msg = {}
267
278
  # append previous audio ID
268
279
  if MODE_AUDIO in model.mode:
269
280
  if item.audio_id:
@@ -281,7 +292,9 @@ class Responses:
281
292
  msg["audio"] = {
282
293
  "id": self.audio_prev_id
283
294
  }
284
- messages.append(msg)
295
+
296
+ if msg:
297
+ messages.append(msg)
285
298
 
286
299
  # ---- tool output ----
287
300
  is_tool_output = False # reset tool output flag
@@ -340,7 +353,7 @@ class Responses:
340
353
 
341
354
  # computer call output
342
355
  elif output_type == "computer_call":
343
- base64img = self.window.core.gpt.vision.get_attachment(attachments)
356
+ base64img = self.window.core.api.openai.vision.get_attachment(attachments)
344
357
  if base64img and "call_id" in tool_call:
345
358
  if tool_call["call_id"]:
346
359
  # tool output
@@ -382,13 +395,13 @@ class Responses:
382
395
  if (model.is_image_input()
383
396
  and mode != MODE_COMPUTER
384
397
  and not model.id.startswith("computer-use")):
385
- content = self.window.core.gpt.vision.build_content(
398
+ content = self.window.core.api.openai.vision.build_content(
386
399
  content=content,
387
400
  attachments=attachments,
388
401
  responses_api=True,
389
402
  )
390
403
  if model.is_audio_input():
391
- content = self.window.core.gpt.audio.build_content(
404
+ content = self.window.core.api.openai.audio.build_content(
392
405
  content=content,
393
406
  multimodal_ctx=multimodal_ctx,
394
407
  )
@@ -404,6 +417,7 @@ class Responses:
404
417
  messages,
405
418
  model.id,
406
419
  )
420
+
407
421
  return messages
408
422
 
409
423
  def reset_tokens(self):
@@ -431,7 +445,7 @@ class Responses:
431
445
 
432
446
  if mode in [
433
447
  MODE_CHAT,
434
- MODE_VISION,
448
+ # MODE_VISION,
435
449
  MODE_RESEARCH,
436
450
  MODE_COMPUTER,
437
451
  ]:
@@ -499,7 +513,7 @@ class Responses:
499
513
  id = output.id
500
514
  call_id = output.call_id
501
515
  action = output.action
502
- tool_calls, is_call = self.window.core.gpt.computer.handle_action(
516
+ tool_calls, is_call = self.window.core.api.openai.computer.handle_action(
503
517
  id=id,
504
518
  call_id=call_id,
505
519
  action=action,
@@ -567,7 +581,7 @@ class Responses:
567
581
  if files:
568
582
  self.window.core.debug.info("[chat] Container files found, downloading...")
569
583
  try:
570
- self.window.core.gpt.container.download_files(ctx, files)
584
+ self.window.core.api.openai.container.download_files(ctx, files)
571
585
  except Exception as e:
572
586
  self.window.core.debug.error(f"[chat] Error downloading container files: {e}")
573
587
 
@@ -631,7 +645,7 @@ class Responses:
631
645
  if files:
632
646
  self.window.core.debug.info("[chat] Container files found, downloading...")
633
647
  try:
634
- self.window.core.gpt.container.download_files(ctx, files)
648
+ self.window.core.api.openai.container.download_files(ctx, files)
635
649
  except Exception as e:
636
650
  self.window.core.debug.error(f"[chat] Error downloading container files: {e}")
637
651
 
@@ -30,7 +30,7 @@ class Store:
30
30
 
31
31
  :return: OpenAI client
32
32
  """
33
- return self.window.core.gpt.get_client()
33
+ return self.window.core.api.openai.get_client()
34
34
 
35
35
  def log(
36
36
  self,
@@ -92,7 +92,7 @@ class Store:
92
92
  :param file_id: file ID
93
93
  :param path: path to save file
94
94
  """
95
- client = self.window.core.gpt.get_client()
95
+ client = self.window.core.api.openai.get_client()
96
96
  content = client.files.content(file_id)
97
97
  data = content.read()
98
98
  with open(path, 'wb', ) as f:
@@ -54,7 +54,7 @@ class Vision:
54
54
  attachments = context.attachments
55
55
  model = context.model
56
56
  model_id = model.id
57
- client = self.window.core.gpt.get_client()
57
+ client = self.window.core.api.openai.get_client()
58
58
 
59
59
  # extra API kwargs
60
60
  response_kwargs = {}
@@ -565,7 +565,7 @@ class Worker(QRunnable):
565
565
  """
566
566
  try:
567
567
  if self.stream: # stream mode
568
- run = self.window.core.gpt.assistants.run_create_stream(
568
+ run = self.window.core.api.openai.assistants.run_create_stream(
569
569
  self.signals,
570
570
  self.ctx,
571
571
  self.thread_id,
@@ -575,7 +575,7 @@ class Worker(QRunnable):
575
575
  )
576
576
  else:
577
577
  # not stream mode
578
- run = self.window.core.gpt.assistants.run_create(
578
+ run = self.window.core.api.openai.assistants.run_create(
579
579
  self.thread_id,
580
580
  self.assistant_id,
581
581
  self.model,
@@ -596,7 +596,7 @@ class Worker(QRunnable):
596
596
  :return: result
597
597
  """
598
598
  try:
599
- response = self.window.core.gpt.assistants.msg_send(
599
+ response = self.window.core.api.openai.assistants.msg_send(
600
600
  self.thread_id,
601
601
  self.prompt,
602
602
  self.file_ids,
@@ -615,7 +615,7 @@ class Worker(QRunnable):
615
615
  :return: result
616
616
  """
617
617
  try:
618
- run = self.window.core.gpt.assistants.run_submit_tool(self.ctx, self.tools_outputs)
618
+ run = self.window.core.api.openai.assistants.run_submit_tool(self.ctx, self.tools_outputs)
619
619
  if run is not None:
620
620
  self.ctx.run_id = run.id # update run id
621
621
  self.signals.finished.emit(self.ctx, run, False) # continue status check
@@ -237,7 +237,7 @@ class ImportWorker(QRunnable):
237
237
  self.log("Importing assistants...")
238
238
  self.window.core.assistants.clear()
239
239
  items = self.window.core.assistants.get_all()
240
- self.window.core.gpt.assistants.import_all(items, callback=self.callback)
240
+ self.window.core.api.openai.assistants.import_all(items, callback=self.callback)
241
241
  self.window.core.assistants.items = items
242
242
  self.window.core.assistants.save()
243
243
 
@@ -266,7 +266,7 @@ class ImportWorker(QRunnable):
266
266
  self.log("Importing vector stores...")
267
267
  self.window.core.assistants.store.clear()
268
268
  items = {}
269
- self.window.core.gpt.store.import_stores(items, callback=self.callback)
269
+ self.window.core.api.openai.store.import_stores(items, callback=self.callback)
270
270
  self.window.core.assistants.store.import_items(items)
271
271
  if not silent:
272
272
  self.signals.finished.emit("vector_stores", self.store_id, len(items))
@@ -285,7 +285,7 @@ class ImportWorker(QRunnable):
285
285
  """
286
286
  try:
287
287
  self.log("Truncating stores...")
288
- num = self.window.core.gpt.store.remove_all(callback=self.callback)
288
+ num = self.window.core.api.openai.store.remove_all(callback=self.callback)
289
289
  self.window.core.assistants.store.items = {}
290
290
  self.window.core.assistants.store.save()
291
291
  if not silent:
@@ -336,12 +336,12 @@ class ImportWorker(QRunnable):
336
336
  self.log("Truncating all files...")
337
337
  self.window.core.assistants.files.truncate() # clear all files
338
338
  # remove all files in API
339
- num = self.window.core.gpt.store.remove_files(callback=self.callback)
339
+ num = self.window.core.api.openai.store.remove_files(callback=self.callback)
340
340
  else:
341
341
  self.log("Truncating files for store: {}".format(self.store_id))
342
342
  self.window.core.assistants.files.truncate(self.store_id) # clear store files, remove from stores / DB
343
343
  # remove store files in API
344
- num = self.window.core.gpt.store.remove_store_files(
344
+ num = self.window.core.api.openai.store.remove_store_files(
345
345
  self.store_id,
346
346
  callback=self.callback,
347
347
  )
@@ -365,14 +365,14 @@ class ImportWorker(QRunnable):
365
365
  self.log("Uploading files...")
366
366
  for file in self.files:
367
367
  try:
368
- file_id = self.window.core.gpt.store.upload(file)
368
+ file_id = self.window.core.api.openai.store.upload(file)
369
369
  if file_id is not None:
370
- stored_file = self.window.core.gpt.store.add_file(
370
+ stored_file = self.window.core.api.openai.store.add_file(
371
371
  self.store_id,
372
372
  file_id,
373
373
  )
374
374
  if stored_file is not None:
375
- data = self.window.core.gpt.store.get_file(file_id)
375
+ data = self.window.core.api.openai.store.get_file(file_id)
376
376
  self.window.core.assistants.files.insert(self.store_id, data) # insert to DB
377
377
  msg = "Uploaded file: {}/{}".format((num + 1), len(self.files))
378
378
  self.signals.status.emit("upload_files", msg)
@@ -403,11 +403,11 @@ class ImportWorker(QRunnable):
403
403
  if self.store_id is None:
404
404
  self.log("Importing all files...")
405
405
  self.window.core.assistants.files.truncate_local() # clear local DB (all)
406
- num = self.window.core.gpt.store.import_stores_files(self.callback) # import all files
406
+ num = self.window.core.api.openai.store.import_stores_files(self.callback) # import all files
407
407
  else:
408
408
  self.log("Importing files for store: {}".format(self.store_id))
409
409
  self.window.core.assistants.files.truncate_local(self.store_id) # clear local DB (all)
410
- items = self.window.core.gpt.store.import_store_files(
410
+ items = self.window.core.api.openai.store.import_store_files(
411
411
  self.store_id,
412
412
  [],
413
413
  callback=self.callback,
@@ -43,7 +43,7 @@ class OpenAIWhisper(BaseProvider):
43
43
  :param path: path to audio file to transcribe
44
44
  :return: transcribed text
45
45
  """
46
- client = self.plugin.window.core.gpt.get_client()
46
+ client = self.plugin.window.core.api.openai.get_client()
47
47
  with open(path, "rb") as audio_file:
48
48
  return client.audio.transcriptions.create(
49
49
  model=self.plugin.get_option_value('whisper_model'),
@@ -58,6 +58,18 @@ class GoogleTextToSpeech(BaseProvider):
58
58
  "Voices": "https://cloud.google.com/text-to-speech/docs/voices"
59
59
  },
60
60
  )
61
+ self.plugin.add_option(
62
+ "google_voice_native",
63
+ type="text",
64
+ value="Kore",
65
+ label="Voice (Gemini API)",
66
+ tab="google",
67
+ description="Specify voice for Gemini API (supported voices may differ)",
68
+ tooltip="Voice name",
69
+ urls={
70
+ "Voices": "https://ai.google.dev/gemini-api/docs/speech-generation"
71
+ },
72
+ )
61
73
  self.plugin.add_option(
62
74
  "google_lang",
63
75
  type="text",
@@ -60,7 +60,7 @@ class OpenAITextToSpeech(BaseProvider):
60
60
  :param text: text to speech
61
61
  :return: path to generated audio file or None if audio playback is handled here
62
62
  """
63
- client = self.plugin.window.core.gpt.get_client()
63
+ client = self.plugin.window.core.api.openai.get_client()
64
64
  output_file = self.plugin.output_file
65
65
  voice = self.plugin.get_option_value('openai_voice')
66
66
  model = self.plugin.get_option_value('openai_model')
@@ -2355,6 +2355,17 @@ class Patch:
2355
2355
  self.window.core.updater.patch_css('web-chatgpt_wide.light.css', True)
2356
2356
  updated = True
2357
2357
 
2358
+ # < 2.6.30
2359
+ if old < parse_version("2.6.30"):
2360
+ print("Migrating config from < 2.6.30...")
2361
+ if "api_native_google" not in data:
2362
+ data["api_native_google"] = True
2363
+ if "remote_tools.google.web_search" not in data:
2364
+ data["remote_tools.google.web_search"] = True
2365
+ if "remote_tools.google.code_interpreter" not in data:
2366
+ data["remote_tools.google.code_interpreter"] = False
2367
+ updated = True
2368
+
2358
2369
  # update file
2359
2370
  migrated = False
2360
2371
  if updated:
@@ -763,6 +763,15 @@ class Patch:
763
763
  model.mode.append(MODE_AGENT_OPENAI)
764
764
  updated = True
765
765
 
766
+ # < 2.6.30 <--- add Google Imagen models
767
+ if old < parse_version("2.6.30"):
768
+ print("Migrating models from < 2.6.30...")
769
+ if "imagen-3.0-generate-002" not in data:
770
+ data["imagen-3.0-generate-002"] = base_data["imagen-3.0-generate-002"]
771
+ if "imagen-4.0-generate-001" not in data:
772
+ data["imagen-4.0-generate-001"] = base_data["imagen-4.0-generate-001"]
773
+ updated = True
774
+
766
775
  # update file
767
776
  if updated:
768
777
  data = dict(sorted(data.items()))
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.23 15:00:00 #
9
+ # Updated Date: 2025.08.28 09:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import json
@@ -192,7 +192,7 @@ class JsonFileProvider(BaseProvider):
192
192
  MODE_CHAT: item.chat,
193
193
  MODE_COMPLETION: item.completion,
194
194
  MODE_IMAGE: item.img,
195
- MODE_VISION: item.vision,
195
+ # MODE_VISION: item.vision,
196
196
  # MODE_LANGCHAIN: item.langchain,
197
197
  MODE_ASSISTANT: item.assistant,
198
198
  MODE_LLAMA_INDEX: item.llama_index,
@@ -250,8 +250,6 @@ class JsonFileProvider(BaseProvider):
250
250
  item.llama_index = data[MODE_LLAMA_INDEX]
251
251
  if MODE_RESEARCH in data:
252
252
  item.research = data[MODE_RESEARCH]
253
- if MODE_VISION in data:
254
- item.vision = data[MODE_VISION]
255
253
 
256
254
  if 'agent_provider' in data:
257
255
  item.agent_provider = data['agent_provider']
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.26 19:00:00 #
9
+ # Updated Date: 2025.08.28 09:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import List, Dict, Optional
@@ -93,10 +93,7 @@ class AnthropicLLM(BaseLLM):
93
93
  :param window: window instance
94
94
  :return: list of models
95
95
  """
96
- import anthropic
97
- client = anthropic.Anthropic(
98
- api_key=window.core.config.get('api_key_anthropic', "")
99
- )
96
+ client = window.core.api.anthropic.get_client()
100
97
  models_list = client.models.list()
101
98
  items = []
102
99
  if models_list.data:
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.02 20:00:00 #
9
+ # Updated Date: 2025.08.28 09:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import os
@@ -18,7 +18,8 @@ from llama_index.core.multi_modal_llms import MultiModalLLM as LlamaMultiModalLL
18
18
 
19
19
  from pygpt_net.core.types import (
20
20
  MODE_LANGCHAIN,
21
- MODE_LLAMA_INDEX, MODE_CHAT,
21
+ MODE_LLAMA_INDEX,
22
+ MODE_CHAT,
22
23
  )
23
24
  from pygpt_net.item.model import ModelItem
24
25
  from pygpt_net.utils import parse_args
@@ -221,7 +222,7 @@ class BaseLLM:
221
222
  """
222
223
  model = ModelItem()
223
224
  model.provider = self.id
224
- return window.core.gpt.get_client(
225
+ return window.core.api.openai.get_client(
225
226
  mode=MODE_CHAT,
226
227
  model=model,
227
228
  )
@@ -99,7 +99,7 @@ class OpenAILLM(BaseLLM):
99
99
 
100
100
  if window.core.config.get('api_use_responses_llama', False):
101
101
  tools = []
102
- tools = window.core.gpt.remote_tools.append_to_tools(
102
+ tools = window.core.api.openai.remote_tools.append_to_tools(
103
103
  mode=MODE_LLAMA_INDEX,
104
104
  model=model,
105
105
  stream=stream,
@@ -145,7 +145,7 @@ class ImageVisionLLMReader(BaseReader):
145
145
  image = image.convert("RGB")
146
146
  image_str = img_2_b64(image)
147
147
 
148
- client = self._window.core.gpt.get_client()
148
+ client = self._window.core.api.openai.get_client()
149
149
  encoded = self._encode_image(str(file))
150
150
  content = [
151
151
  {