pygpt-net 2.6.28__py3-none-any.whl → 2.6.30__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (115) hide show
  1. pygpt_net/CHANGELOG.txt +13 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/{container.py → app_core.py} +5 -6
  4. pygpt_net/controller/access/control.py +1 -9
  5. pygpt_net/controller/assistant/assistant.py +4 -4
  6. pygpt_net/controller/assistant/batch.py +7 -7
  7. pygpt_net/controller/assistant/files.py +4 -4
  8. pygpt_net/controller/assistant/threads.py +3 -3
  9. pygpt_net/controller/attachment/attachment.py +4 -7
  10. pygpt_net/controller/chat/common.py +1 -1
  11. pygpt_net/controller/chat/stream.py +961 -294
  12. pygpt_net/controller/chat/vision.py +11 -19
  13. pygpt_net/controller/config/placeholder.py +1 -1
  14. pygpt_net/controller/ctx/ctx.py +1 -1
  15. pygpt_net/controller/ctx/summarizer.py +1 -1
  16. pygpt_net/controller/mode/mode.py +21 -12
  17. pygpt_net/controller/plugins/settings.py +3 -2
  18. pygpt_net/controller/presets/editor.py +112 -99
  19. pygpt_net/controller/theme/common.py +2 -0
  20. pygpt_net/controller/theme/theme.py +6 -2
  21. pygpt_net/controller/ui/vision.py +4 -4
  22. pygpt_net/core/agents/legacy.py +2 -2
  23. pygpt_net/core/agents/runners/openai_workflow.py +2 -2
  24. pygpt_net/core/assistants/files.py +5 -5
  25. pygpt_net/core/assistants/store.py +4 -4
  26. pygpt_net/core/bridge/bridge.py +3 -3
  27. pygpt_net/core/bridge/worker.py +28 -9
  28. pygpt_net/core/debug/console/console.py +2 -2
  29. pygpt_net/core/debug/presets.py +2 -2
  30. pygpt_net/core/experts/experts.py +2 -2
  31. pygpt_net/core/idx/llm.py +21 -3
  32. pygpt_net/core/modes/modes.py +2 -2
  33. pygpt_net/core/presets/presets.py +3 -3
  34. pygpt_net/core/tokens/tokens.py +4 -4
  35. pygpt_net/core/types/mode.py +5 -2
  36. pygpt_net/core/vision/analyzer.py +1 -1
  37. pygpt_net/data/config/config.json +6 -3
  38. pygpt_net/data/config/models.json +75 -3
  39. pygpt_net/data/config/modes.json +3 -9
  40. pygpt_net/data/config/settings.json +112 -55
  41. pygpt_net/data/config/settings_section.json +2 -2
  42. pygpt_net/data/locale/locale.de.ini +2 -2
  43. pygpt_net/data/locale/locale.en.ini +9 -2
  44. pygpt_net/data/locale/locale.es.ini +2 -2
  45. pygpt_net/data/locale/locale.fr.ini +2 -2
  46. pygpt_net/data/locale/locale.it.ini +2 -2
  47. pygpt_net/data/locale/locale.pl.ini +3 -3
  48. pygpt_net/data/locale/locale.uk.ini +2 -2
  49. pygpt_net/data/locale/locale.zh.ini +2 -2
  50. pygpt_net/item/model.py +23 -3
  51. pygpt_net/plugin/openai_dalle/plugin.py +4 -4
  52. pygpt_net/plugin/openai_vision/plugin.py +12 -13
  53. pygpt_net/provider/agents/openai/agent.py +5 -5
  54. pygpt_net/provider/agents/openai/agent_b2b.py +5 -5
  55. pygpt_net/provider/agents/openai/agent_planner.py +5 -6
  56. pygpt_net/provider/agents/openai/agent_with_experts.py +5 -5
  57. pygpt_net/provider/agents/openai/agent_with_experts_feedback.py +4 -4
  58. pygpt_net/provider/agents/openai/agent_with_feedback.py +4 -4
  59. pygpt_net/provider/agents/openai/bot_researcher.py +2 -2
  60. pygpt_net/provider/agents/openai/bots/research_bot/agents/planner_agent.py +1 -1
  61. pygpt_net/provider/agents/openai/bots/research_bot/agents/search_agent.py +1 -1
  62. pygpt_net/provider/agents/openai/bots/research_bot/agents/writer_agent.py +1 -1
  63. pygpt_net/provider/agents/openai/evolve.py +5 -5
  64. pygpt_net/provider/agents/openai/supervisor.py +4 -4
  65. pygpt_net/provider/api/__init__.py +27 -0
  66. pygpt_net/provider/api/anthropic/__init__.py +68 -0
  67. pygpt_net/provider/api/google/__init__.py +262 -0
  68. pygpt_net/provider/api/google/audio.py +114 -0
  69. pygpt_net/provider/api/google/chat.py +552 -0
  70. pygpt_net/provider/api/google/image.py +287 -0
  71. pygpt_net/provider/api/google/tools.py +222 -0
  72. pygpt_net/provider/api/google/vision.py +129 -0
  73. pygpt_net/provider/{gpt → api/openai}/__init__.py +2 -2
  74. pygpt_net/provider/{gpt → api/openai}/agents/computer.py +1 -1
  75. pygpt_net/provider/{gpt → api/openai}/agents/experts.py +1 -1
  76. pygpt_net/provider/{gpt → api/openai}/agents/response.py +1 -1
  77. pygpt_net/provider/{gpt → api/openai}/assistants.py +1 -1
  78. pygpt_net/provider/{gpt → api/openai}/chat.py +15 -8
  79. pygpt_net/provider/{gpt → api/openai}/completion.py +1 -1
  80. pygpt_net/provider/{gpt → api/openai}/image.py +1 -1
  81. pygpt_net/provider/{gpt → api/openai}/remote_tools.py +1 -1
  82. pygpt_net/provider/{gpt → api/openai}/responses.py +34 -20
  83. pygpt_net/provider/{gpt → api/openai}/store.py +2 -2
  84. pygpt_net/provider/{gpt → api/openai}/vision.py +1 -1
  85. pygpt_net/provider/{gpt → api/openai}/worker/assistants.py +4 -4
  86. pygpt_net/provider/{gpt → api/openai}/worker/importer.py +10 -10
  87. pygpt_net/provider/audio_input/openai_whisper.py +1 -1
  88. pygpt_net/provider/audio_output/google_tts.py +12 -0
  89. pygpt_net/provider/audio_output/openai_tts.py +1 -1
  90. pygpt_net/provider/core/config/patch.py +11 -0
  91. pygpt_net/provider/core/model/patch.py +9 -0
  92. pygpt_net/provider/core/preset/json_file.py +2 -4
  93. pygpt_net/provider/llms/anthropic.py +2 -5
  94. pygpt_net/provider/llms/base.py +4 -3
  95. pygpt_net/provider/llms/openai.py +1 -1
  96. pygpt_net/provider/loaders/hub/image_vision/base.py +1 -1
  97. pygpt_net/ui/dialog/preset.py +71 -55
  98. pygpt_net/ui/main.py +6 -4
  99. pygpt_net/utils.py +9 -0
  100. {pygpt_net-2.6.28.dist-info → pygpt_net-2.6.30.dist-info}/METADATA +42 -48
  101. {pygpt_net-2.6.28.dist-info → pygpt_net-2.6.30.dist-info}/RECORD +115 -107
  102. /pygpt_net/provider/{gpt → api/openai}/agents/__init__.py +0 -0
  103. /pygpt_net/provider/{gpt → api/openai}/agents/client.py +0 -0
  104. /pygpt_net/provider/{gpt → api/openai}/agents/remote_tools.py +0 -0
  105. /pygpt_net/provider/{gpt → api/openai}/agents/utils.py +0 -0
  106. /pygpt_net/provider/{gpt → api/openai}/audio.py +0 -0
  107. /pygpt_net/provider/{gpt → api/openai}/computer.py +0 -0
  108. /pygpt_net/provider/{gpt → api/openai}/container.py +0 -0
  109. /pygpt_net/provider/{gpt → api/openai}/summarizer.py +0 -0
  110. /pygpt_net/provider/{gpt → api/openai}/tools.py +0 -0
  111. /pygpt_net/provider/{gpt → api/openai}/utils.py +0 -0
  112. /pygpt_net/provider/{gpt → api/openai}/worker/__init__.py +0 -0
  113. {pygpt_net-2.6.28.dist-info → pygpt_net-2.6.30.dist-info}/LICENSE +0 -0
  114. {pygpt_net-2.6.28.dist-info → pygpt_net-2.6.30.dist-info}/WHEEL +0 -0
  115. {pygpt_net-2.6.28.dist-info → pygpt_net-2.6.30.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,287 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.08.28 20:00:00 #
10
+ # ================================================== #
11
+
12
+ from typing import Optional, Dict, Any, List
13
+ from google import genai
14
+ from google.genai import types as gtypes
15
+ from PySide6.QtCore import QObject, Signal, QRunnable, Slot
16
+ import base64, datetime, os, requests
17
+
18
+ from pygpt_net.core.events import KernelEvent
19
+ from pygpt_net.core.bridge.context import BridgeContext
20
+ from pygpt_net.item.ctx import CtxItem
21
+ from pygpt_net.utils import trans
22
+
23
+
24
+ class Image:
25
+
26
+ MODE_GENERATE = "generate"
27
+ MODE_EDIT = "edit"
28
+
29
+ def __init__(self, window=None):
30
+ self.window = window
31
+ self.worker = None
32
+
33
+ def generate(
34
+ self,
35
+ context: BridgeContext,
36
+ extra: Optional[Dict[str, Any]] = None,
37
+ sync: bool = True
38
+ ) -> bool:
39
+ """
40
+ Generate image(s) using Google GenAI API
41
+
42
+ :param context: BridgeContext
43
+ :param extra: Extra parameters (num, inline)
44
+ :param sync: Run synchronously if True
45
+ :return: bool
46
+ """
47
+ extra = extra or {}
48
+ ctx = context.ctx or CtxItem()
49
+ model = context.model
50
+ prompt = context.prompt
51
+ num = int(extra.get("num", 1))
52
+ inline = bool(extra.get("inline", False))
53
+
54
+ prompt_model = self.window.core.models.from_defaults()
55
+ tmp = self.window.core.config.get('img_prompt_model')
56
+ if self.window.core.models.has(tmp):
57
+ prompt_model = self.window.core.models.get(tmp)
58
+
59
+ worker = ImageWorker()
60
+ worker.window = self.window
61
+ worker.client = self.window.core.api.google.get_client()
62
+ worker.ctx = ctx
63
+ worker.model = model.id
64
+ worker.input_prompt = prompt
65
+ worker.model_prompt = prompt_model
66
+ worker.system_prompt = self.window.core.prompt.get('img')
67
+ worker.raw = self.window.core.config.get('img_raw')
68
+ worker.num = num
69
+ worker.inline = inline
70
+
71
+ if self.window.core.config.has('img_resolution'):
72
+ worker.resolution = self.window.core.config.get('img_resolution') or "1024x1024"
73
+
74
+ self.worker = worker
75
+ self.worker.signals.finished.connect(self.window.core.image.handle_finished)
76
+ self.worker.signals.finished_inline.connect(self.window.core.image.handle_finished_inline)
77
+ self.worker.signals.status.connect(self.window.core.image.handle_status)
78
+ self.worker.signals.error.connect(self.window.core.image.handle_error)
79
+
80
+ if sync or not self.window.controller.kernel.async_allowed(ctx):
81
+ self.worker.run()
82
+ return True
83
+
84
+ self.window.dispatch(KernelEvent(KernelEvent.STATE_BUSY, {"id": "img"}))
85
+ self.window.threadpool.start(self.worker)
86
+ return True
87
+
88
+
89
+ class ImageSignals(QObject):
90
+ finished = Signal(object, list, str) # ctx, paths, prompt
91
+ finished_inline = Signal(object, list, str) # ctx, paths, prompt
92
+ status = Signal(object) # message
93
+ error = Signal(object) # exception
94
+
95
+
96
+ class ImageWorker(QRunnable):
97
+ def __init__(self, *args, **kwargs):
98
+ super().__init__()
99
+ self.signals = ImageSignals()
100
+ self.window = None
101
+ self.client: Optional[genai.Client] = None
102
+ self.ctx: Optional[CtxItem] = None
103
+ self.model = "imagen-4.0-generate-001"
104
+ self.model_prompt = None
105
+ self.input_prompt = ""
106
+ self.system_prompt = ""
107
+ self.inline = False
108
+ self.raw = False
109
+ self.num = 1
110
+ self.resolution = "1024x1024" # used to derive aspect ratio for Imagen
111
+
112
+ @Slot()
113
+ def run(self):
114
+ try:
115
+ # Optional prompt enhancement
116
+ if not self.raw and not not self.inline:
117
+ try:
118
+ self.signals.status.emit(trans('img.status.prompt.wait'))
119
+ bridge_context = BridgeContext(
120
+ prompt=self.input_prompt,
121
+ system_prompt=self.system_prompt,
122
+ model=self.model_prompt,
123
+ max_tokens=200,
124
+ temperature=1.0,
125
+ )
126
+ ev = KernelEvent(KernelEvent.CALL, {'context': bridge_context, 'extra': {}})
127
+ self.window.dispatch(ev)
128
+ resp = ev.data.get('response')
129
+ if resp:
130
+ self.input_prompt = resp
131
+ except Exception as e:
132
+ self.signals.error.emit(e)
133
+ self.signals.status.emit(trans('img.status.prompt.error') + ": " + str(e))
134
+
135
+ self.signals.status.emit(trans('img.status.generating') + f": {self.input_prompt}...")
136
+
137
+ paths: List[str] = []
138
+ if self._is_imagen(self.model):
139
+ # Imagen: generate_images
140
+ resp = self._imagen_generate(self.input_prompt, self.num, self.resolution)
141
+ imgs = getattr(resp, "generated_images", None) or []
142
+ for idx, gi in enumerate(imgs[: self.num]):
143
+ data = self._extract_imagen_bytes(gi)
144
+ p = self._save(idx, data)
145
+ if p:
146
+ paths.append(p)
147
+ else:
148
+ # Gemini image preview: generate_content -> parts[].inline_data.data
149
+ resp = self.client.models.generate_content(
150
+ model=self.model,
151
+ contents=[self.input_prompt],
152
+ )
153
+ from PIL import Image as PILImage
154
+ from io import BytesIO
155
+ cands = getattr(resp, "candidates", None) or []
156
+ saved = 0
157
+ for cand in cands:
158
+ parts = getattr(getattr(cand, "content", None), "parts", None) or []
159
+ for part in parts:
160
+ inline = getattr(part, "inline_data", None)
161
+ if inline and getattr(inline, "data", None):
162
+ data = inline.data
163
+ p = self._save(saved, data)
164
+ if p:
165
+ paths.append(p)
166
+ saved += 1
167
+ if saved >= self.num:
168
+ break
169
+ if saved >= self.num:
170
+ break
171
+
172
+ if self.inline:
173
+ self.signals.finished_inline.emit(self.ctx, paths, self.input_prompt)
174
+ else:
175
+ self.signals.finished.emit(self.ctx, paths, self.input_prompt)
176
+ except Exception as e:
177
+ self.signals.error.emit(e)
178
+ finally:
179
+ self._cleanup()
180
+
181
+ def _is_imagen(self, model_id: str) -> bool:
182
+ """
183
+ Check if model_id is an Imagen model
184
+
185
+ :param model_id: Model ID
186
+ :return: True if Imagen model
187
+ """
188
+ return "imagen" in str(model_id).lower()
189
+
190
+ def _imagen_generate(self, prompt: str, num: int, resolution: str):
191
+ """
192
+ Call Imagen generate_images with config (number_of_images, optional aspect_ratio).
193
+
194
+ :param prompt: Prompt text
195
+ :param num: Number of images to generate
196
+ :param resolution: Resolution string, e.g. "1024x1024"
197
+ :return: GenerateImagesResponse
198
+ """
199
+ aspect = self._aspect_from_resolution(resolution) # "1:1", "3:4", …
200
+ cfg = gtypes.GenerateImagesConfig(number_of_images=num)
201
+ if aspect:
202
+ cfg.aspect_ratio = aspect
203
+ return self.client.models.generate_images(
204
+ model=self.model,
205
+ prompt=prompt,
206
+ config=cfg,
207
+ )
208
+
209
+ def _aspect_from_resolution(self, resolution: str) -> Optional[str]:
210
+ """
211
+ Derive aspect ratio string from resolution.
212
+
213
+ :param resolution: Resolution string, e.g. "1024x1024"
214
+ :return: Aspect ratio string, e.g. "1:1", "3:4", or None if unknown
215
+ """
216
+ try:
217
+ w, h = [int(x) for x in resolution.lower().split("x")]
218
+ # Reduce to small set supported in docs
219
+ ratios = {(1, 1): "1:1", (3, 4): "3:4", (4, 3): "4:3", (9, 16): "9:16", (16, 9): "16:9"}
220
+ # Find nearest
221
+ from math import gcd
222
+ g = gcd(w, h)
223
+ key = (w // g, h // g)
224
+ return ratios.get(key)
225
+ except Exception:
226
+ return None
227
+
228
+ def _extract_imagen_bytes(self, generated_image) -> Optional[bytes]:
229
+ """
230
+ Extract bytes from Imagen generated image object.
231
+
232
+ :param generated_image: GeneratedImage object
233
+ :return: Image bytes or None
234
+ """
235
+ img = getattr(generated_image, "image", None)
236
+ if not img:
237
+ return None
238
+ data = getattr(img, "image_bytes", None)
239
+ if isinstance(data, (bytes, bytearray)):
240
+ return bytes(data)
241
+ if isinstance(data, str):
242
+ try:
243
+ return base64.b64decode(data)
244
+ except Exception:
245
+ return None
246
+ # fallback: url/uri if present
247
+ url = getattr(img, "url", None) or getattr(img, "uri", None)
248
+ if url:
249
+ try:
250
+ r = requests.get(url, timeout=30)
251
+ if r.status_code == 200:
252
+ return r.content
253
+ except Exception:
254
+ pass
255
+ return None
256
+
257
+ def _save(self, idx: int, data: Optional[bytes]) -> Optional[str]:
258
+ """
259
+ Save image bytes to file and return path.
260
+
261
+ :param idx: Image index (for filename)
262
+ :param data: Image bytes
263
+ :return: Path string or None
264
+ """
265
+ if not data:
266
+ return None
267
+ name = (
268
+ datetime.date.today().strftime("%Y-%m-%d") + "_" +
269
+ datetime.datetime.now().strftime("%H-%M-%S") + "-" +
270
+ self.window.core.image.make_safe_filename(self.input_prompt) + "-" +
271
+ str(idx + 1) + ".png"
272
+ )
273
+ path = os.path.join(self.window.core.config.get_user_dir("img"), name)
274
+ self.signals.status.emit(trans('img.status.downloading') + f" ({idx + 1} / {self.num}) -> {path}")
275
+ if self.window.core.image.save_image(path, data):
276
+ return path
277
+ return None
278
+
279
+ def _cleanup(self):
280
+ """Cleanup resources"""
281
+ sig = self.signals
282
+ self.signals = None
283
+ if sig is not None:
284
+ try:
285
+ sig.deleteLater()
286
+ except RuntimeError:
287
+ pass
@@ -0,0 +1,222 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.08.28 20:00:00 #
10
+ # ================================================== #
11
+
12
+ import json
13
+ from typing import List, Any, Dict, Optional
14
+
15
+ from google.genai import types as gtypes
16
+ from pygpt_net.item.model import ModelItem
17
+
18
+
19
+ class Tools:
20
+ def __init__(self, window=None):
21
+ """
22
+ Tools mapper for Google GenAI
23
+
24
+ :param window: Window instance
25
+ """
26
+ self.window = window
27
+
28
+ # -------- SANITIZER --------
29
+ def _sanitize_schema(self, schema: Any) -> Any:
30
+ """
31
+ Sanitize JSON Schema dict by removing unsupported keywords and normalizing types.
32
+
33
+ 1. Remove unsupported keywords like additionalProperties, patternProperties,
34
+ dependencies, oneOf, anyOf, allOf, $ref, $defs, examples, readOnly, writeOnly.
35
+ 2. Normalize 'type' to a single value (e.g., if it's a list, take the first non-null type).
36
+ 3. Ensure 'enum' is only present for string types.
37
+ 4. Recursively sanitize nested schemas in 'properties' and 'items'.
38
+ 5. Handle arrays by ensuring 'items' is a single schema.
39
+ 6. Handle objects by ensuring 'properties' is a dict and 'required' is a list of strings.
40
+
41
+ :param schema: Any JSON Schema as dict or list
42
+ :return: Sanitized schema dict
43
+ """
44
+ if isinstance(schema, list):
45
+ return self._sanitize_schema(schema[0]) if schema else {}
46
+
47
+ if not isinstance(schema, dict):
48
+ return schema
49
+
50
+ banned = {
51
+ "additionalProperties",
52
+ "additional_properties",
53
+ "unevaluatedProperties",
54
+ "patternProperties",
55
+ "dependencies",
56
+ "dependentSchemas",
57
+ "dependentRequired",
58
+ "oneOf",
59
+ "anyOf",
60
+ "allOf",
61
+ "$defs",
62
+ "$ref",
63
+ "$schema",
64
+ "$id",
65
+ "examples",
66
+ "readOnly",
67
+ "writeOnly",
68
+ "nullable",
69
+ }
70
+ for k in list(schema.keys()):
71
+ if k in banned:
72
+ schema.pop(k, None)
73
+
74
+ # Union -> first non-null type
75
+ t = schema.get("type")
76
+ if isinstance(t, list):
77
+ t_no_null = [x for x in t if x != "null"]
78
+ schema["type"] = t_no_null[0] if t_no_null else "string"
79
+
80
+ # enum only for string
81
+ if "enum" in schema and schema.get("type") not in ("string", "STRING"):
82
+ schema.pop("enum", None)
83
+
84
+ # object
85
+ if (schema.get("type") or "").lower() == "object":
86
+ props = schema.get("properties")
87
+ if not isinstance(props, dict):
88
+ props = {}
89
+ clean_props: Dict[str, Any] = {}
90
+ for pname, pval in props.items():
91
+ clean_props[pname] = self._sanitize_schema(pval)
92
+ schema["properties"] = clean_props
93
+
94
+ req = schema.get("required")
95
+ if not isinstance(req, list) or not all(isinstance(x, str) for x in req):
96
+ schema.pop("required", None)
97
+ elif len(req) == 0:
98
+ schema.pop("required", None)
99
+
100
+ # array
101
+ if (schema.get("type") or "").lower() == "array":
102
+ items = schema.get("items")
103
+ if isinstance(items, list) and items:
104
+ items = items[0]
105
+ if not isinstance(items, dict):
106
+ items = {"type": "string"}
107
+ schema["items"] = self._sanitize_schema(items)
108
+
109
+ # recursive sanitize
110
+ for k, v in list(schema.items()):
111
+ if isinstance(v, dict):
112
+ schema[k] = self._sanitize_schema(v)
113
+ elif isinstance(v, list):
114
+ schema[k] = [self._sanitize_schema(x) for x in v]
115
+
116
+ return schema
117
+
118
+ # -------- CONVERTER to gtypes.Schema (UPPERCASE) --------
119
+ def _to_gschema(self, schema: Any) -> gtypes.Schema:
120
+ """
121
+ Convert sanitized dict -> google.genai.types.Schema.
122
+ Enforces UPPERCASE type names (OBJECT, ARRAY, STRING, NUMBER, INTEGER, BOOLEAN).
123
+
124
+ :param schema: Sanitized JSON Schema as dict
125
+ :return: gtypes.Schema
126
+ """
127
+ TYPE_MAP = {
128
+ "enum": "STRING",
129
+ "ENUM": "STRING",
130
+ "object": "OBJECT",
131
+ "dict": "OBJECT",
132
+ "array": "ARRAY",
133
+ "list": "ARRAY",
134
+ "string": "STRING",
135
+ "number": "NUMBER",
136
+ "float": "NUMBER",
137
+ "integer": "INTEGER",
138
+ "boolean": "BOOLEAN",
139
+ "int": "INTEGER",
140
+ "bool": "BOOLEAN",
141
+ "OBJECT": "OBJECT",
142
+ "DICT": "OBJECT",
143
+ "ARRAY": "ARRAY",
144
+ "LIST": "ARRAY",
145
+ "STRING": "STRING",
146
+ "NUMBER": "NUMBER",
147
+ "FLOAT": "NUMBER",
148
+ "INTEGER": "INTEGER",
149
+ "BOOLEAN": "BOOLEAN",
150
+ "INT": "INTEGER",
151
+ "BOOL": "BOOLEAN",
152
+ }
153
+
154
+ if isinstance(schema, gtypes.Schema):
155
+ return schema
156
+
157
+ if not isinstance(schema, dict):
158
+ return gtypes.Schema(type="STRING")
159
+
160
+ t = TYPE_MAP.get(str(schema.get("type", "OBJECT")).upper(), "OBJECT")
161
+ desc = schema.get("description")
162
+ fmt = schema.get("format")
163
+ enum = schema.get("enum") if isinstance(schema.get("enum"), list) else None
164
+ req = schema.get("required") if isinstance(schema.get("required"), list) else None
165
+
166
+ gs = gtypes.Schema(
167
+ type=t,
168
+ description=desc,
169
+ format=fmt,
170
+ enum=enum,
171
+ required=[x for x in (req or []) if isinstance(x, str)] or None,
172
+ )
173
+
174
+ props = schema.get("properties")
175
+ if isinstance(props, dict):
176
+ gs.properties = {k: self._to_gschema(v) for k, v in props.items()}
177
+
178
+ items = schema.get("items")
179
+ if isinstance(items, dict):
180
+ gs.items = self._to_gschema(items)
181
+
182
+ return gs
183
+
184
+ def prepare(self, model: ModelItem, functions: list) -> List[gtypes.Tool]:
185
+ """
186
+ Prepare Google Function Declarations (types.Tool) for google-genai.
187
+
188
+ :param model: ModelItem
189
+ :param functions: List of function definitions as dicts with 'name', 'desc', 'params' (JSON Schema)
190
+ :return: List of gtypes.Tool
191
+ """
192
+ if not functions or not isinstance(functions, list):
193
+ return []
194
+
195
+ fds: List[gtypes.FunctionDeclaration] = []
196
+ for function in functions:
197
+ name = str(function.get("name") or "").strip()
198
+ if not name:
199
+ continue
200
+
201
+ desc = function.get("desc") or ""
202
+ params: Optional[dict] = {}
203
+ if function.get("params"):
204
+ try:
205
+ params = json.loads(function["params"])
206
+ except Exception:
207
+ params = {}
208
+
209
+ params = self._sanitize_schema(params or {})
210
+ if not params.get("type"):
211
+ params["type"] = "object"
212
+
213
+ gschema = self._to_gschema(params or {"type": "object"})
214
+
215
+ fd = gtypes.FunctionDeclaration(
216
+ name=name,
217
+ description=desc,
218
+ parameters=gschema,
219
+ )
220
+ fds.append(fd)
221
+
222
+ return [gtypes.Tool(function_declarations=fds)] if fds else []
@@ -0,0 +1,129 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.08.28 20:00:00 #
10
+ # ================================================== #
11
+
12
+ import os
13
+ from typing import Optional, Dict, List, Union
14
+
15
+ from google.genai.types import Part
16
+
17
+ from pygpt_net.item.attachment import AttachmentItem
18
+ from pygpt_net.item.ctx import CtxItem
19
+
20
+
21
+ class Vision:
22
+ def __init__(self, window=None):
23
+ """
24
+ Vision helpers for Google GenAI
25
+
26
+ :param window: Window instance
27
+ """
28
+ self.window = window
29
+ self.attachments: Dict[str, str] = {}
30
+ self.urls: List[str] = []
31
+ self.input_tokens = 0
32
+
33
+ def build_parts(
34
+ self,
35
+ content: Union[str, list],
36
+ attachments: Optional[Dict[str, AttachmentItem]] = None,
37
+ ) -> List[Part]:
38
+ """
39
+ Build image parts from local attachments (inline bytes)
40
+
41
+ :param content: Message content (str or list)
42
+ :param attachments: Attachments dict (id -> AttachmentItem)
43
+ :return: List of Parts
44
+ """
45
+ parts: List[Part] = []
46
+ self.attachments = {}
47
+ self.urls = []
48
+
49
+ if attachments:
50
+ for id_, attachment in attachments.items():
51
+ if attachment.path and os.path.exists(attachment.path):
52
+ if self.is_image(attachment.path):
53
+ mime = self._guess_mime(attachment.path)
54
+ with open(attachment.path, "rb") as f:
55
+ data = f.read()
56
+ parts.append(Part.from_bytes(data=data, mime_type=mime))
57
+ self.attachments[id_] = attachment.path
58
+ attachment.consumed = True
59
+
60
+ return parts
61
+
62
+ def is_image(self, path: str) -> bool:
63
+ """
64
+ Check if path looks like an image
65
+
66
+ :param path: File path
67
+ :return: True if image, False otherwise
68
+ """
69
+ return path.lower().endswith(('.png', '.jpg', '.jpeg', '.tiff', '.bmp', '.gif', '.webp'))
70
+
71
+ def _guess_mime(self, path: str) -> str:
72
+ """
73
+ Guess mime type from file extension
74
+
75
+ :param path: File path
76
+ :return: Mime type string
77
+ """
78
+ ext = os.path.splitext(path)[1].lower().lstrip(".")
79
+ if ext in ("jpg", "jpeg"):
80
+ return "image/jpeg"
81
+ if ext == "png":
82
+ return "image/png"
83
+ if ext == "gif":
84
+ return "image/gif"
85
+ if ext == "bmp":
86
+ return "image/bmp"
87
+ if ext == "webp":
88
+ return "image/webp"
89
+ if ext == "tiff":
90
+ return "image/tiff"
91
+ return "image/jpeg"
92
+
93
+ def append_images(self, ctx: CtxItem):
94
+ """
95
+ Append sent images paths to context for UI/history
96
+
97
+ :param ctx: CtxItem
98
+ """
99
+ images = self.get_attachments()
100
+ if len(images) > 0:
101
+ ctx.images = self.window.core.filesystem.make_local_list(list(images.values()))
102
+
103
+ def get_attachments(self) -> Dict[str, str]:
104
+ """
105
+ Return attachments dict (id -> path)
106
+
107
+ :return: Dict of attachments
108
+ """
109
+ return self.attachments
110
+
111
+ def get_urls(self) -> List[str]:
112
+ """
113
+ Return image urls (unused here)
114
+
115
+ :return: List of URLs
116
+ """
117
+ return self.urls
118
+
119
+ def reset_tokens(self):
120
+ """Reset input tokens counter"""
121
+ self.input_tokens = 0
122
+
123
+ def get_used_tokens(self) -> int:
124
+ """
125
+ Return input tokens counter
126
+
127
+ :return: Number of input tokens
128
+ """
129
+ return self.input_tokens
@@ -39,7 +39,7 @@ from .tools import Tools
39
39
  from .vision import Vision
40
40
 
41
41
 
42
- class Gpt:
42
+ class ApiOpenAI:
43
43
 
44
44
  def __init__(self, window=None):
45
45
  """
@@ -281,7 +281,7 @@ class Gpt:
281
281
  # additional_kwargs["max_tokens"] = max_tokens
282
282
 
283
283
  # tools / functions
284
- tools = self.window.core.gpt.tools.prepare(model, functions)
284
+ tools = self.window.core.api.openai.tools.prepare(model, functions)
285
285
  if len(tools) > 0 and "disable_tools" not in extra:
286
286
  additional_kwargs["tools"] = tools
287
287
 
@@ -48,7 +48,7 @@ class LocalComputer(Computer):
48
48
 
49
49
  :return: Environment of the computer, such as "mac", "windows", "ubuntu", or "browser".
50
50
  """
51
- return self.window.core.gpt.computer.get_current_env()
51
+ return self.window.core.api.openai.computer.get_current_env()
52
52
 
53
53
  @property
54
54
  def dimensions(self) -> tuple[int, int]:
@@ -13,7 +13,7 @@ from agents import (
13
13
  from pygpt_net.item.model import ModelItem
14
14
  from pygpt_net.item.preset import PresetItem
15
15
 
16
- from pygpt_net.provider.gpt.agents.remote_tools import append_tools
16
+ from pygpt_net.provider.api.openai.agents.remote_tools import append_tools
17
17
 
18
18
 
19
19
  def get_experts(
@@ -169,7 +169,7 @@ class StreamHandler:
169
169
  self.files_handled = True
170
170
  self.window.core.debug.info("[chat] Container files found, downloading...")
171
171
  try:
172
- self.window.core.gpt.container.download_files(ctx, self.files)
172
+ self.window.core.api.openai.container.download_files(ctx, self.files)
173
173
  except Exception as e:
174
174
  self.window.core.debug.error(f"[chat] Error downloading container files: {e}")
175
175
 
@@ -36,7 +36,7 @@ class Assistants:
36
36
 
37
37
  :return: OpenAI client
38
38
  """
39
- return self.window.core.gpt.get_client()
39
+ return self.window.core.api.openai.get_client()
40
40
 
41
41
  def log(
42
42
  self,