pygpt-net 2.6.29__py3-none-any.whl → 2.6.30__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (113) hide show
  1. pygpt_net/CHANGELOG.txt +7 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/{container.py → app_core.py} +5 -6
  4. pygpt_net/controller/access/control.py +1 -9
  5. pygpt_net/controller/assistant/assistant.py +4 -4
  6. pygpt_net/controller/assistant/batch.py +7 -7
  7. pygpt_net/controller/assistant/files.py +4 -4
  8. pygpt_net/controller/assistant/threads.py +3 -3
  9. pygpt_net/controller/attachment/attachment.py +4 -7
  10. pygpt_net/controller/chat/common.py +1 -1
  11. pygpt_net/controller/chat/stream.py +961 -294
  12. pygpt_net/controller/chat/vision.py +11 -19
  13. pygpt_net/controller/config/placeholder.py +1 -1
  14. pygpt_net/controller/ctx/ctx.py +1 -1
  15. pygpt_net/controller/ctx/summarizer.py +1 -1
  16. pygpt_net/controller/mode/mode.py +21 -12
  17. pygpt_net/controller/plugins/settings.py +3 -2
  18. pygpt_net/controller/presets/editor.py +112 -99
  19. pygpt_net/controller/theme/theme.py +3 -2
  20. pygpt_net/controller/ui/vision.py +4 -4
  21. pygpt_net/core/agents/legacy.py +2 -2
  22. pygpt_net/core/agents/runners/openai_workflow.py +2 -2
  23. pygpt_net/core/assistants/files.py +5 -5
  24. pygpt_net/core/assistants/store.py +4 -4
  25. pygpt_net/core/bridge/bridge.py +3 -3
  26. pygpt_net/core/bridge/worker.py +28 -9
  27. pygpt_net/core/debug/console/console.py +2 -2
  28. pygpt_net/core/debug/presets.py +2 -2
  29. pygpt_net/core/experts/experts.py +2 -2
  30. pygpt_net/core/modes/modes.py +2 -2
  31. pygpt_net/core/presets/presets.py +3 -3
  32. pygpt_net/core/tokens/tokens.py +4 -4
  33. pygpt_net/core/types/mode.py +5 -2
  34. pygpt_net/core/vision/analyzer.py +1 -1
  35. pygpt_net/data/config/config.json +6 -3
  36. pygpt_net/data/config/models.json +75 -3
  37. pygpt_net/data/config/modes.json +3 -9
  38. pygpt_net/data/config/settings.json +89 -31
  39. pygpt_net/data/config/settings_section.json +2 -2
  40. pygpt_net/data/locale/locale.de.ini +2 -2
  41. pygpt_net/data/locale/locale.en.ini +9 -2
  42. pygpt_net/data/locale/locale.es.ini +2 -2
  43. pygpt_net/data/locale/locale.fr.ini +2 -2
  44. pygpt_net/data/locale/locale.it.ini +2 -2
  45. pygpt_net/data/locale/locale.pl.ini +3 -3
  46. pygpt_net/data/locale/locale.uk.ini +2 -2
  47. pygpt_net/data/locale/locale.zh.ini +2 -2
  48. pygpt_net/item/model.py +23 -3
  49. pygpt_net/plugin/openai_dalle/plugin.py +4 -4
  50. pygpt_net/plugin/openai_vision/plugin.py +12 -13
  51. pygpt_net/provider/agents/openai/agent.py +5 -5
  52. pygpt_net/provider/agents/openai/agent_b2b.py +5 -5
  53. pygpt_net/provider/agents/openai/agent_planner.py +5 -6
  54. pygpt_net/provider/agents/openai/agent_with_experts.py +5 -5
  55. pygpt_net/provider/agents/openai/agent_with_experts_feedback.py +4 -4
  56. pygpt_net/provider/agents/openai/agent_with_feedback.py +4 -4
  57. pygpt_net/provider/agents/openai/bot_researcher.py +2 -2
  58. pygpt_net/provider/agents/openai/bots/research_bot/agents/planner_agent.py +1 -1
  59. pygpt_net/provider/agents/openai/bots/research_bot/agents/search_agent.py +1 -1
  60. pygpt_net/provider/agents/openai/bots/research_bot/agents/writer_agent.py +1 -1
  61. pygpt_net/provider/agents/openai/evolve.py +5 -5
  62. pygpt_net/provider/agents/openai/supervisor.py +4 -4
  63. pygpt_net/provider/api/__init__.py +27 -0
  64. pygpt_net/provider/api/anthropic/__init__.py +68 -0
  65. pygpt_net/provider/api/google/__init__.py +262 -0
  66. pygpt_net/provider/api/google/audio.py +114 -0
  67. pygpt_net/provider/api/google/chat.py +552 -0
  68. pygpt_net/provider/api/google/image.py +287 -0
  69. pygpt_net/provider/api/google/tools.py +222 -0
  70. pygpt_net/provider/api/google/vision.py +129 -0
  71. pygpt_net/provider/{gpt → api/openai}/__init__.py +2 -2
  72. pygpt_net/provider/{gpt → api/openai}/agents/computer.py +1 -1
  73. pygpt_net/provider/{gpt → api/openai}/agents/experts.py +1 -1
  74. pygpt_net/provider/{gpt → api/openai}/agents/response.py +1 -1
  75. pygpt_net/provider/{gpt → api/openai}/assistants.py +1 -1
  76. pygpt_net/provider/{gpt → api/openai}/chat.py +15 -8
  77. pygpt_net/provider/{gpt → api/openai}/completion.py +1 -1
  78. pygpt_net/provider/{gpt → api/openai}/image.py +1 -1
  79. pygpt_net/provider/{gpt → api/openai}/remote_tools.py +1 -1
  80. pygpt_net/provider/{gpt → api/openai}/responses.py +34 -20
  81. pygpt_net/provider/{gpt → api/openai}/store.py +2 -2
  82. pygpt_net/provider/{gpt → api/openai}/vision.py +1 -1
  83. pygpt_net/provider/{gpt → api/openai}/worker/assistants.py +4 -4
  84. pygpt_net/provider/{gpt → api/openai}/worker/importer.py +10 -10
  85. pygpt_net/provider/audio_input/openai_whisper.py +1 -1
  86. pygpt_net/provider/audio_output/google_tts.py +12 -0
  87. pygpt_net/provider/audio_output/openai_tts.py +1 -1
  88. pygpt_net/provider/core/config/patch.py +11 -0
  89. pygpt_net/provider/core/model/patch.py +9 -0
  90. pygpt_net/provider/core/preset/json_file.py +2 -4
  91. pygpt_net/provider/llms/anthropic.py +2 -5
  92. pygpt_net/provider/llms/base.py +4 -3
  93. pygpt_net/provider/llms/openai.py +1 -1
  94. pygpt_net/provider/loaders/hub/image_vision/base.py +1 -1
  95. pygpt_net/ui/dialog/preset.py +71 -55
  96. pygpt_net/ui/main.py +6 -4
  97. pygpt_net/utils.py +9 -0
  98. {pygpt_net-2.6.29.dist-info → pygpt_net-2.6.30.dist-info}/METADATA +32 -44
  99. {pygpt_net-2.6.29.dist-info → pygpt_net-2.6.30.dist-info}/RECORD +113 -105
  100. /pygpt_net/provider/{gpt → api/openai}/agents/__init__.py +0 -0
  101. /pygpt_net/provider/{gpt → api/openai}/agents/client.py +0 -0
  102. /pygpt_net/provider/{gpt → api/openai}/agents/remote_tools.py +0 -0
  103. /pygpt_net/provider/{gpt → api/openai}/agents/utils.py +0 -0
  104. /pygpt_net/provider/{gpt → api/openai}/audio.py +0 -0
  105. /pygpt_net/provider/{gpt → api/openai}/computer.py +0 -0
  106. /pygpt_net/provider/{gpt → api/openai}/container.py +0 -0
  107. /pygpt_net/provider/{gpt → api/openai}/summarizer.py +0 -0
  108. /pygpt_net/provider/{gpt → api/openai}/tools.py +0 -0
  109. /pygpt_net/provider/{gpt → api/openai}/utils.py +0 -0
  110. /pygpt_net/provider/{gpt → api/openai}/worker/__init__.py +0 -0
  111. {pygpt_net-2.6.29.dist-info → pygpt_net-2.6.30.dist-info}/LICENSE +0 -0
  112. {pygpt_net-2.6.29.dist-info → pygpt_net-2.6.30.dist-info}/WHEEL +0 -0
  113. {pygpt_net-2.6.29.dist-info → pygpt_net-2.6.30.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,27 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.08.28 20:00:00 #
10
+ # ================================================== #
11
+
12
+ from .anthropic import ApiAnthropic
13
+ from .google import ApiGoogle
14
+ from .openai import ApiOpenAI
15
+
16
+ class Api:
17
+
18
+ def __init__(self, window=None):
19
+ """
20
+ API wrappers core
21
+
22
+ :param window: Window instance
23
+ """
24
+ self.window = window
25
+ self.anthropic = ApiAnthropic(window)
26
+ self.google = ApiGoogle(window)
27
+ self.openai = ApiOpenAI(window)
@@ -0,0 +1,68 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.08.28 09:00:00 #
10
+ # ================================================== #
11
+
12
+ from anthropic import Anthropic
13
+
14
+ from pygpt_net.core.types import (
15
+ MODE_CHAT,
16
+ )
17
+ from pygpt_net.item.model import ModelItem
18
+
19
+ class ApiAnthropic:
20
+
21
+ def __init__(self, window=None):
22
+ """
23
+ Anthropic API wrapper core
24
+
25
+ :param window: Window instance
26
+ """
27
+ self.window = window
28
+ self.client = None
29
+ self.locked = False
30
+
31
+ def get_client(
32
+ self,
33
+ mode: str = MODE_CHAT,
34
+ model: ModelItem = None
35
+ ) -> Anthropic:
36
+ """
37
+ Return Anthropic client
38
+
39
+ :param mode: Mode
40
+ :param model: Model
41
+ :return: Anthropic client
42
+ """
43
+ if self.client is not None:
44
+ try:
45
+ self.client.close() # close previous client if exists
46
+ except Exception as e:
47
+ self.window.core.debug.log(e)
48
+ print("Error closing previous Anthropic client:", e)
49
+ self.client = Anthropic(
50
+ api_key=self.window.core.config.get('api_key_anthropic', "")
51
+ )
52
+ return self.client
53
+
54
+ def stop(self):
55
+ """On global event stop"""
56
+ pass
57
+
58
+ def close(self):
59
+ """Close Anthropic client"""
60
+ if self.locked:
61
+ return
62
+ if self.client is not None:
63
+ try:
64
+ pass
65
+ # self.client.close()
66
+ except Exception as e:
67
+ self.window.core.debug.log(e)
68
+ print("Error closing Anthropic client:", e)
@@ -0,0 +1,262 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.08.28 20:00:00 #
10
+ # ================================================== #
11
+
12
+ from typing import Optional, Dict, Any
13
+
14
+ from google.genai import types as gtypes
15
+ from google import genai
16
+ from pygpt_net.core.types import (
17
+ MODE_ASSISTANT,
18
+ MODE_AUDIO,
19
+ MODE_CHAT,
20
+ MODE_COMPLETION,
21
+ MODE_IMAGE,
22
+ MODE_RESEARCH,
23
+ )
24
+ from pygpt_net.core.bridge.context import BridgeContext
25
+ from pygpt_net.item.model import ModelItem
26
+
27
+ from .chat import Chat
28
+ from .vision import Vision
29
+ from .tools import Tools
30
+ from .audio import Audio
31
+ from .image import Image
32
+
33
+
34
+ class ApiGoogle:
35
+ def __init__(self, window=None):
36
+ """
37
+ Google GenAI API SDK wrapper
38
+
39
+ :param window: Window instance
40
+ """
41
+ self.window = window
42
+ self.chat = Chat(window)
43
+ self.vision = Vision(window)
44
+ self.tools = Tools(window)
45
+ self.audio = Audio(window)
46
+ self.image = Image(window)
47
+ self.client: Optional[genai.Client] = None
48
+ self.locked = False
49
+ self.last_client_args: Optional[Dict[str, Any]] = None
50
+
51
+ def get_client(
52
+ self,
53
+ mode: str = MODE_CHAT,
54
+ model: ModelItem = None
55
+ ) -> genai.Client:
56
+ """
57
+ Get or create Google GenAI client
58
+
59
+ :param mode: Mode (chat, completion, image, etc.)
60
+ :param model: ModelItem
61
+ :return: genai.Client instance
62
+ """
63
+ if not model:
64
+ model = ModelItem()
65
+ model.provider = "google"
66
+ args = self.window.core.models.prepare_client_args(mode, model)
67
+ filtered = {}
68
+ if args.get("api_key"):
69
+ filtered["api_key"] = args["api_key"]
70
+ if self.client is None or self.last_client_args != filtered:
71
+ self.client = genai.Client(**filtered)
72
+ self.last_client_args = filtered
73
+ return self.client
74
+
75
+ def call(self, context: BridgeContext, extra: dict = None) -> bool:
76
+ """
77
+ Make an API call to Google GenAI
78
+
79
+ :param context: BridgeContext
80
+ :param extra: Extra parameters
81
+ :return: True if successful, False otherwise
82
+ """
83
+ mode = context.mode
84
+ model = context.model
85
+ stream = context.stream
86
+ ctx = context.ctx
87
+ ai_name = ctx.output_name if ctx else "assistant"
88
+
89
+ # No Responses API in google-genai
90
+ if ctx:
91
+ ctx.use_responses_api = False
92
+
93
+ used_tokens = 0
94
+ response = None
95
+
96
+ if mode in [MODE_COMPLETION, MODE_CHAT, MODE_AUDIO, MODE_RESEARCH]:
97
+ response = self.chat.send(context=context, extra=extra)
98
+ used_tokens = self.chat.get_used_tokens()
99
+ if ctx:
100
+ self.vision.append_images(ctx)
101
+
102
+ elif mode == MODE_IMAGE:
103
+ return self.image.generate(context=context, extra=extra)
104
+
105
+ elif mode == MODE_ASSISTANT:
106
+ return False # not implemented for Google
107
+
108
+ if stream:
109
+ if ctx:
110
+ ctx.stream = response
111
+ ctx.set_output("", ai_name)
112
+ ctx.input_tokens = used_tokens
113
+ return True
114
+
115
+ if response is None:
116
+ return False
117
+
118
+ if isinstance(response, dict) and "error" in response:
119
+ return False
120
+
121
+ if ctx:
122
+ ctx.ai_name = ai_name
123
+ self.chat.unpack_response(mode, response, ctx)
124
+ try:
125
+ import json
126
+ for tc in getattr(ctx, "tool_calls", []) or []:
127
+ fn = tc.get("function") or {}
128
+ args = fn.get("arguments")
129
+ if isinstance(args, str):
130
+ try:
131
+ fn["arguments"] = json.loads(args)
132
+ except Exception:
133
+ fn["arguments"] = {}
134
+ except Exception:
135
+ pass
136
+ return True
137
+
138
+ def quick_call(self, context: BridgeContext, extra: dict = None) -> str:
139
+ """
140
+ Make a quick API call to Google GenAI and return the output text
141
+
142
+ :param context: BridgeContext
143
+ :param extra: Extra parameters
144
+ :return: Output text
145
+ """
146
+ if context.request:
147
+ context.stream = False
148
+ context.mode = MODE_CHAT
149
+ self.locked = True
150
+ self.call(context, extra)
151
+ self.locked = False
152
+ return context.ctx.output
153
+
154
+ self.locked = True
155
+ try:
156
+ ctx = context.ctx
157
+ prompt = context.prompt
158
+ system_prompt = context.system_prompt
159
+ temperature = context.temperature
160
+ history = context.history
161
+ functions = context.external_functions
162
+ model = context.model or self.window.core.models.from_defaults()
163
+
164
+ client = self.get_client(MODE_CHAT, model)
165
+ tools = self.tools.prepare(model, functions)
166
+
167
+ """
168
+ # with remote tools
169
+ base_tools = self.tools.prepare(model, functions)
170
+ remote_tools = self.build_remote_tools(model)
171
+ tools = (base_tools or []) + (remote_tools or [])
172
+ """
173
+
174
+ inputs = self.chat.build_input(
175
+ prompt=prompt,
176
+ system_prompt=system_prompt,
177
+ model=model,
178
+ history=history,
179
+ attachments=context.attachments,
180
+ multimodal_ctx=context.multimodal_ctx,
181
+ )
182
+ cfg = genai.types.GenerateContentConfig(
183
+ temperature=temperature if temperature is not None else self.window.core.config.get('temperature'),
184
+ top_p=self.window.core.config.get('top_p'),
185
+ max_output_tokens=context.max_tokens if context.max_tokens else None,
186
+ system_instruction=system_prompt if system_prompt else None,
187
+ tools=tools if tools else None,
188
+ )
189
+ resp = client.models.generate_content(
190
+ model=model.id,
191
+ contents=inputs,
192
+ config=cfg,
193
+ )
194
+
195
+ if ctx:
196
+ calls = self.chat.extract_tool_calls(resp)
197
+ if calls:
198
+ ctx.tool_calls = calls
199
+ return self.chat.extract_text(resp)
200
+ except Exception as e:
201
+ self.window.core.debug.log(e)
202
+ return ""
203
+ finally:
204
+ self.locked = False
205
+
206
+ def build_remote_tools(self, model: ModelItem = None) -> list:
207
+ """
208
+ Build Google GenAI remote tools based on config flags.
209
+ - google_tool_search: enables grounding via Google Search (Gemini 2.x)
210
+ or GoogleSearchRetrieval (Gemini 1.5 fallback).
211
+ - google_tool_code_execution: enables code execution tool.
212
+
213
+ Returns a list of gtypes.Tool objects (can be empty).
214
+
215
+ :param model: ModelItem
216
+ :return: list of gtypes.Tool
217
+ """
218
+ tools: list = []
219
+ cfg = self.window.core.config
220
+ model_id = (model.id if model and getattr(model, "id", None) else "").lower()
221
+
222
+ # Google Search tool
223
+ if cfg.get("remote_tools.google.web_search") and "image" not in model.id:
224
+ try:
225
+ if not model_id.startswith("gemini-1.5") and not model_id.startswith("models/gemini-1.5"):
226
+ # Gemini 2.x uses GoogleSearch
227
+ tools.append(gtypes.Tool(google_search=gtypes.GoogleSearch()))
228
+ else:
229
+ # Gemini 1.5 fallback uses GoogleSearchRetrieval
230
+ # Note: Supported only for 1.5 models.
231
+ tools.append(gtypes.Tool(
232
+ google_search_retrieval=gtypes.GoogleSearchRetrieval()
233
+ ))
234
+ except Exception as e:
235
+ # Do not break the request if tool construction fails
236
+ self.window.core.debug.log(e)
237
+
238
+ # Code Execution tool
239
+ if cfg.get("remote_tools.google.code_interpreter") and "image" not in model.id:
240
+ try:
241
+ tools.append(gtypes.Tool(code_execution=gtypes.ToolCodeExecution))
242
+ except Exception as e:
243
+ self.window.core.debug.log(e)
244
+
245
+ return tools
246
+
247
+
248
+ def stop(self):
249
+ """On global event stop"""
250
+ pass
251
+
252
+ def close(self):
253
+ """Close Google client"""
254
+ if self.locked:
255
+ return
256
+ if self.client is not None:
257
+ try:
258
+ pass
259
+ # self.client.close()
260
+ except Exception as e:
261
+ self.window.core.debug.log(e)
262
+ print("Error closing Google client:", e)
@@ -0,0 +1,114 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.08.28 20:00:00 #
10
+ # ================================================== #
11
+
12
+ import base64
13
+ import io
14
+ import wave
15
+ from typing import Optional, Tuple
16
+
17
+ from google.genai.types import Part
18
+ from pygpt_net.core.bridge.context import MultimodalContext
19
+
20
+
21
+ class Audio:
22
+ def __init__(self, window=None):
23
+ """
24
+ Audio helpers for Google GenAI.
25
+ - Build audio input parts for requests
26
+ - Convert Google PCM output to WAV (base64) for UI compatibility
27
+ """
28
+ self.window = window
29
+
30
+ # ---------- INPUT (user -> model) ----------
31
+
32
+ def build_part(
33
+ self,
34
+ multimodal_ctx: Optional[MultimodalContext]
35
+ ) -> Optional[Part]:
36
+ """
37
+ Build audio Part from multimodal context (inline bytes).
38
+
39
+ :param multimodal_ctx: MultimodalContext
40
+ :return: Part or None
41
+ """
42
+ if not multimodal_ctx or not multimodal_ctx.is_audio_input or not multimodal_ctx.audio_data:
43
+ return None
44
+ audio_format = (multimodal_ctx.audio_format or "wav").lower()
45
+ mime = f"audio/{audio_format}"
46
+ return Part.from_bytes(data=multimodal_ctx.audio_data, mime_type=mime)
47
+
48
+ # ---------- OUTPUT (model -> UI) ----------
49
+
50
+ def extract_first_audio_part(
51
+ self,
52
+ response
53
+ ) -> Tuple[Optional[bytes], Optional[str]]:
54
+ """
55
+ Extract first audio inline_data from a non-streaming response.
56
+
57
+ :param response: Google response object
58
+ :return: (audio_bytes, mime_type) or (None, None)
59
+ """
60
+ try:
61
+ candidates = getattr(response, "candidates", None) or []
62
+ for cand in candidates:
63
+ content = getattr(cand, "content", None)
64
+ parts = getattr(content, "parts", None) or []
65
+ for p in parts:
66
+ inline = getattr(p, "inline_data", None)
67
+ if not inline:
68
+ continue
69
+ mime = (getattr(inline, "mime_type", "") or "").lower()
70
+ if not mime.startswith("audio/"):
71
+ continue
72
+ data = getattr(inline, "data", None)
73
+ audio_bytes = self._ensure_bytes(data)
74
+ if audio_bytes:
75
+ return audio_bytes, mime
76
+ except Exception:
77
+ pass
78
+ return None, None
79
+
80
+ def pcm16_to_wav_base64(
81
+ self,
82
+ pcm_bytes: bytes,
83
+ rate: int = 24000,
84
+ channels: int = 1,
85
+ sample_width: int = 2
86
+ ) -> str:
87
+ """
88
+ Wrap raw PCM16 mono @ 24kHz into WAV and return base64-encoded payload.
89
+
90
+ :param pcm_bytes: Raw PCM16 bytes
91
+ :param rate: Sample rate (Hz), default 24000 for Google TTS
92
+ :param channels: Channels, default 1
93
+ :param sample_width: Bytes per sample, default 2 for PCM16
94
+ :return: Base64-encoded WAV
95
+ """
96
+ buf = io.BytesIO()
97
+ with wave.open(buf, "wb") as wf:
98
+ wf.setnchannels(channels)
99
+ wf.setsampwidth(sample_width)
100
+ wf.setframerate(rate)
101
+ wf.writeframes(pcm_bytes)
102
+ return base64.b64encode(buf.getvalue()).decode("utf-8")
103
+
104
+ @staticmethod
105
+ def _ensure_bytes(data) -> Optional[bytes]:
106
+ """Return raw bytes from inline_data.data (bytes or base64 string)."""
107
+ try:
108
+ if isinstance(data, (bytes, bytearray)):
109
+ return bytes(data)
110
+ if isinstance(data, str):
111
+ return base64.b64decode(data)
112
+ except Exception:
113
+ return None
114
+ return None