pygpt-net 2.6.29__py3-none-any.whl → 2.6.31__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (182) hide show
  1. pygpt_net/CHANGELOG.txt +15 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/app.py +4 -0
  4. pygpt_net/{container.py → app_core.py} +5 -6
  5. pygpt_net/controller/__init__.py +5 -2
  6. pygpt_net/controller/access/control.py +1 -9
  7. pygpt_net/controller/assistant/assistant.py +4 -4
  8. pygpt_net/controller/assistant/batch.py +7 -7
  9. pygpt_net/controller/assistant/files.py +4 -4
  10. pygpt_net/controller/assistant/threads.py +3 -3
  11. pygpt_net/controller/attachment/attachment.py +4 -7
  12. pygpt_net/controller/audio/audio.py +25 -1
  13. pygpt_net/controller/audio/ui.py +2 -2
  14. pygpt_net/controller/chat/audio.py +1 -8
  15. pygpt_net/controller/chat/common.py +30 -4
  16. pygpt_net/controller/chat/handler/stream_worker.py +1124 -0
  17. pygpt_net/controller/chat/output.py +8 -3
  18. pygpt_net/controller/chat/stream.py +4 -405
  19. pygpt_net/controller/chat/text.py +3 -2
  20. pygpt_net/controller/chat/vision.py +11 -19
  21. pygpt_net/controller/config/placeholder.py +1 -1
  22. pygpt_net/controller/ctx/ctx.py +1 -1
  23. pygpt_net/controller/ctx/summarizer.py +1 -1
  24. pygpt_net/controller/kernel/kernel.py +11 -3
  25. pygpt_net/controller/kernel/reply.py +5 -1
  26. pygpt_net/controller/mode/mode.py +21 -12
  27. pygpt_net/controller/plugins/settings.py +3 -2
  28. pygpt_net/controller/presets/editor.py +112 -99
  29. pygpt_net/controller/realtime/__init__.py +12 -0
  30. pygpt_net/controller/realtime/manager.py +53 -0
  31. pygpt_net/controller/realtime/realtime.py +268 -0
  32. pygpt_net/controller/theme/theme.py +3 -2
  33. pygpt_net/controller/ui/mode.py +7 -0
  34. pygpt_net/controller/ui/ui.py +19 -1
  35. pygpt_net/controller/ui/vision.py +4 -4
  36. pygpt_net/core/agents/legacy.py +2 -2
  37. pygpt_net/core/agents/runners/openai_workflow.py +2 -2
  38. pygpt_net/core/assistants/files.py +5 -5
  39. pygpt_net/core/assistants/store.py +4 -4
  40. pygpt_net/core/audio/audio.py +6 -1
  41. pygpt_net/core/audio/backend/native/__init__.py +12 -0
  42. pygpt_net/core/audio/backend/{native.py → native/native.py} +426 -127
  43. pygpt_net/core/audio/backend/native/player.py +139 -0
  44. pygpt_net/core/audio/backend/native/realtime.py +250 -0
  45. pygpt_net/core/audio/backend/pyaudio/__init__.py +12 -0
  46. pygpt_net/core/audio/backend/pyaudio/playback.py +194 -0
  47. pygpt_net/core/audio/backend/pyaudio/pyaudio.py +923 -0
  48. pygpt_net/core/audio/backend/pyaudio/realtime.py +275 -0
  49. pygpt_net/core/audio/backend/pygame/__init__.py +12 -0
  50. pygpt_net/core/audio/backend/{pygame.py → pygame/pygame.py} +130 -19
  51. pygpt_net/core/audio/backend/shared/__init__.py +38 -0
  52. pygpt_net/core/audio/backend/shared/conversions.py +211 -0
  53. pygpt_net/core/audio/backend/shared/envelope.py +38 -0
  54. pygpt_net/core/audio/backend/shared/player.py +137 -0
  55. pygpt_net/core/audio/backend/shared/rt.py +52 -0
  56. pygpt_net/core/audio/capture.py +5 -0
  57. pygpt_net/core/audio/output.py +13 -2
  58. pygpt_net/core/audio/whisper.py +6 -2
  59. pygpt_net/core/bridge/bridge.py +4 -3
  60. pygpt_net/core/bridge/worker.py +31 -9
  61. pygpt_net/core/debug/console/console.py +2 -2
  62. pygpt_net/core/debug/presets.py +2 -2
  63. pygpt_net/core/dispatcher/dispatcher.py +37 -1
  64. pygpt_net/core/events/__init__.py +2 -1
  65. pygpt_net/core/events/realtime.py +55 -0
  66. pygpt_net/core/experts/experts.py +2 -2
  67. pygpt_net/core/image/image.py +51 -1
  68. pygpt_net/core/modes/modes.py +2 -2
  69. pygpt_net/core/presets/presets.py +3 -3
  70. pygpt_net/core/realtime/options.py +87 -0
  71. pygpt_net/core/realtime/shared/__init__.py +0 -0
  72. pygpt_net/core/realtime/shared/audio.py +213 -0
  73. pygpt_net/core/realtime/shared/loop.py +64 -0
  74. pygpt_net/core/realtime/shared/session.py +59 -0
  75. pygpt_net/core/realtime/shared/text.py +37 -0
  76. pygpt_net/core/realtime/shared/tools.py +276 -0
  77. pygpt_net/core/realtime/shared/turn.py +38 -0
  78. pygpt_net/core/realtime/shared/types.py +16 -0
  79. pygpt_net/core/realtime/worker.py +164 -0
  80. pygpt_net/core/tokens/tokens.py +4 -4
  81. pygpt_net/core/types/__init__.py +1 -0
  82. pygpt_net/core/types/image.py +48 -0
  83. pygpt_net/core/types/mode.py +5 -2
  84. pygpt_net/core/vision/analyzer.py +1 -1
  85. pygpt_net/data/config/config.json +13 -4
  86. pygpt_net/data/config/models.json +219 -101
  87. pygpt_net/data/config/modes.json +3 -9
  88. pygpt_net/data/config/settings.json +135 -27
  89. pygpt_net/data/config/settings_section.json +2 -2
  90. pygpt_net/data/locale/locale.de.ini +7 -7
  91. pygpt_net/data/locale/locale.en.ini +25 -12
  92. pygpt_net/data/locale/locale.es.ini +7 -7
  93. pygpt_net/data/locale/locale.fr.ini +7 -7
  94. pygpt_net/data/locale/locale.it.ini +7 -7
  95. pygpt_net/data/locale/locale.pl.ini +8 -8
  96. pygpt_net/data/locale/locale.uk.ini +7 -7
  97. pygpt_net/data/locale/locale.zh.ini +3 -3
  98. pygpt_net/data/locale/plugin.audio_input.en.ini +4 -0
  99. pygpt_net/data/locale/plugin.audio_output.en.ini +4 -0
  100. pygpt_net/item/model.py +23 -3
  101. pygpt_net/plugin/audio_input/plugin.py +37 -4
  102. pygpt_net/plugin/audio_input/simple.py +57 -8
  103. pygpt_net/plugin/cmd_files/worker.py +3 -0
  104. pygpt_net/plugin/openai_dalle/plugin.py +4 -4
  105. pygpt_net/plugin/openai_vision/plugin.py +12 -13
  106. pygpt_net/provider/agents/openai/agent.py +5 -5
  107. pygpt_net/provider/agents/openai/agent_b2b.py +5 -5
  108. pygpt_net/provider/agents/openai/agent_planner.py +5 -6
  109. pygpt_net/provider/agents/openai/agent_with_experts.py +5 -5
  110. pygpt_net/provider/agents/openai/agent_with_experts_feedback.py +4 -4
  111. pygpt_net/provider/agents/openai/agent_with_feedback.py +4 -4
  112. pygpt_net/provider/agents/openai/bot_researcher.py +2 -2
  113. pygpt_net/provider/agents/openai/bots/research_bot/agents/planner_agent.py +1 -1
  114. pygpt_net/provider/agents/openai/bots/research_bot/agents/search_agent.py +1 -1
  115. pygpt_net/provider/agents/openai/bots/research_bot/agents/writer_agent.py +1 -1
  116. pygpt_net/provider/agents/openai/evolve.py +5 -5
  117. pygpt_net/provider/agents/openai/supervisor.py +4 -4
  118. pygpt_net/provider/api/__init__.py +27 -0
  119. pygpt_net/provider/api/anthropic/__init__.py +68 -0
  120. pygpt_net/provider/api/google/__init__.py +295 -0
  121. pygpt_net/provider/api/google/audio.py +121 -0
  122. pygpt_net/provider/api/google/chat.py +591 -0
  123. pygpt_net/provider/api/google/image.py +427 -0
  124. pygpt_net/provider/api/google/realtime/__init__.py +12 -0
  125. pygpt_net/provider/api/google/realtime/client.py +1945 -0
  126. pygpt_net/provider/api/google/realtime/realtime.py +186 -0
  127. pygpt_net/provider/api/google/tools.py +222 -0
  128. pygpt_net/provider/api/google/vision.py +129 -0
  129. pygpt_net/provider/{gpt → api/openai}/__init__.py +24 -4
  130. pygpt_net/provider/api/openai/agents/__init__.py +0 -0
  131. pygpt_net/provider/{gpt → api/openai}/agents/computer.py +1 -1
  132. pygpt_net/provider/{gpt → api/openai}/agents/experts.py +1 -1
  133. pygpt_net/provider/{gpt → api/openai}/agents/response.py +1 -1
  134. pygpt_net/provider/{gpt → api/openai}/assistants.py +1 -1
  135. pygpt_net/provider/{gpt → api/openai}/chat.py +15 -8
  136. pygpt_net/provider/{gpt → api/openai}/completion.py +1 -1
  137. pygpt_net/provider/{gpt → api/openai}/image.py +1 -1
  138. pygpt_net/provider/api/openai/realtime/__init__.py +12 -0
  139. pygpt_net/provider/api/openai/realtime/client.py +1828 -0
  140. pygpt_net/provider/api/openai/realtime/realtime.py +194 -0
  141. pygpt_net/provider/{gpt → api/openai}/remote_tools.py +1 -1
  142. pygpt_net/provider/{gpt → api/openai}/responses.py +34 -20
  143. pygpt_net/provider/{gpt → api/openai}/store.py +2 -2
  144. pygpt_net/provider/{gpt → api/openai}/vision.py +1 -1
  145. pygpt_net/provider/api/openai/worker/__init__.py +0 -0
  146. pygpt_net/provider/{gpt → api/openai}/worker/assistants.py +4 -4
  147. pygpt_net/provider/{gpt → api/openai}/worker/importer.py +10 -10
  148. pygpt_net/provider/audio_input/google_genai.py +103 -0
  149. pygpt_net/provider/audio_input/openai_whisper.py +1 -1
  150. pygpt_net/provider/audio_output/google_genai_tts.py +229 -0
  151. pygpt_net/provider/audio_output/openai_tts.py +9 -6
  152. pygpt_net/provider/core/config/patch.py +26 -0
  153. pygpt_net/provider/core/model/patch.py +20 -0
  154. pygpt_net/provider/core/preset/json_file.py +2 -4
  155. pygpt_net/provider/llms/anthropic.py +2 -5
  156. pygpt_net/provider/llms/base.py +4 -3
  157. pygpt_net/provider/llms/google.py +8 -9
  158. pygpt_net/provider/llms/openai.py +1 -1
  159. pygpt_net/provider/loaders/hub/image_vision/base.py +1 -1
  160. pygpt_net/ui/dialog/preset.py +71 -55
  161. pygpt_net/ui/layout/toolbox/footer.py +16 -0
  162. pygpt_net/ui/layout/toolbox/image.py +5 -0
  163. pygpt_net/ui/main.py +6 -4
  164. pygpt_net/ui/widget/option/combo.py +15 -1
  165. pygpt_net/utils.py +9 -0
  166. {pygpt_net-2.6.29.dist-info → pygpt_net-2.6.31.dist-info}/METADATA +55 -55
  167. {pygpt_net-2.6.29.dist-info → pygpt_net-2.6.31.dist-info}/RECORD +181 -135
  168. pygpt_net/core/audio/backend/pyaudio.py +0 -554
  169. /pygpt_net/{provider/gpt/agents → controller/chat/handler}/__init__.py +0 -0
  170. /pygpt_net/{provider/gpt/worker → core/realtime}/__init__.py +0 -0
  171. /pygpt_net/provider/{gpt → api/openai}/agents/client.py +0 -0
  172. /pygpt_net/provider/{gpt → api/openai}/agents/remote_tools.py +0 -0
  173. /pygpt_net/provider/{gpt → api/openai}/agents/utils.py +0 -0
  174. /pygpt_net/provider/{gpt → api/openai}/audio.py +0 -0
  175. /pygpt_net/provider/{gpt → api/openai}/computer.py +0 -0
  176. /pygpt_net/provider/{gpt → api/openai}/container.py +0 -0
  177. /pygpt_net/provider/{gpt → api/openai}/summarizer.py +0 -0
  178. /pygpt_net/provider/{gpt → api/openai}/tools.py +0 -0
  179. /pygpt_net/provider/{gpt → api/openai}/utils.py +0 -0
  180. {pygpt_net-2.6.29.dist-info → pygpt_net-2.6.31.dist-info}/LICENSE +0 -0
  181. {pygpt_net-2.6.29.dist-info → pygpt_net-2.6.31.dist-info}/WHEEL +0 -0
  182. {pygpt_net-2.6.29.dist-info → pygpt_net-2.6.31.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,1124 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.08.28 20:00:00 #
10
+ # ================================================== #
11
+
12
+ import base64
13
+ import io
14
+ import json
15
+ from dataclasses import dataclass, field
16
+ from typing import Optional, Literal, Any
17
+
18
+ from PySide6.QtCore import QObject, Signal, Slot, QRunnable
19
+
20
+ from pygpt_net.core.events import RenderEvent
21
+ from pygpt_net.core.text.utils import has_unclosed_code_tag
22
+ from pygpt_net.item.ctx import CtxItem
23
+
24
+ # OpenAI Responses Events
25
+ EventType = Literal[
26
+ "response.completed",
27
+ "response.output_text.delta",
28
+ "response.output_item.added",
29
+ "response.function_call_arguments.delta",
30
+ "response.function_call_arguments.done",
31
+ "response.output_text.annotation.added",
32
+ "response.reasoning_summary_text.delta",
33
+ "response.output_item.done",
34
+ "response.code_interpreter_call_code.delta",
35
+ "response.code_interpreter_call_code.done",
36
+ "response.image_generation_call.partial_image",
37
+ "response.created",
38
+ "response.done",
39
+ "response.failed",
40
+ "error",
41
+ ]
42
+
43
+ # Chunks
44
+ ChunkType = Literal[
45
+ "api_chat",
46
+ "api_chat_responses",
47
+ "api_completion",
48
+ "langchain_chat",
49
+ "llama_chat",
50
+ "google",
51
+ "raw",
52
+ ]
53
+
54
+
55
+ class WorkerSignals(QObject):
56
+ """
57
+ Defines the signals available from a running worker thread.
58
+ - `finished`: No data
59
+ - `errorOccurred`: Exception
60
+ - `eventReady`: RenderEvent
61
+ """
62
+ end = Signal(object)
63
+ errorOccurred = Signal(Exception)
64
+ eventReady = Signal(object)
65
+
66
+
67
+ @dataclass
68
+ class WorkerState:
69
+ """Holds mutable state for the streaming loop."""
70
+ output_parts: list[str] = field(default_factory=list)
71
+ output_tokens: int = 0
72
+ begin: bool = True
73
+ error: Optional[Exception] = None
74
+ fn_args_buffers: dict[str, io.StringIO] = field(default_factory=dict)
75
+ citations: Optional[list] = field(default_factory=list)
76
+ image_paths: list[str] = field(default_factory=list)
77
+ files: list[dict] = field(default_factory=list)
78
+ img_path: Optional[str] = None
79
+ is_image: bool = False
80
+ has_google_inline_image: bool = False
81
+ is_code: bool = False
82
+ force_func_call: bool = False
83
+ stopped: bool = False
84
+ chunk_type: ChunkType = "raw"
85
+ generator: Any = None
86
+ usage_vendor: Optional[str] = None
87
+ usage_payload: dict = field(default_factory=dict)
88
+ google_stream_ref: Any = None
89
+ tool_calls: list[dict] = field(default_factory=list)
90
+
91
+
92
+ class StreamWorker(QRunnable):
93
+ def __init__(self, ctx: CtxItem, window, parent=None):
94
+ super().__init__()
95
+ self.signals = WorkerSignals()
96
+ self.ctx = ctx
97
+ self.window = window
98
+ self.stream = None
99
+
100
+ @Slot()
101
+ def run(self):
102
+ ctx = self.ctx
103
+ win = self.window
104
+ core = win.core
105
+ ctrl = win.controller
106
+
107
+ emit_event = self.signals.eventReady.emit
108
+ emit_error = self.signals.errorOccurred.emit
109
+ emit_end = self.signals.end.emit
110
+
111
+ state = WorkerState()
112
+ state.generator = self.stream
113
+ state.img_path = core.image.gen_unique_path(ctx)
114
+
115
+ base_data = {"meta": ctx.meta, "ctx": ctx}
116
+ emit_event(RenderEvent(RenderEvent.STREAM_BEGIN, base_data))
117
+
118
+ try:
119
+ if state.generator is not None:
120
+ for chunk in state.generator:
121
+ # cooperative stop
122
+ if self._should_stop(ctrl, state, ctx):
123
+ break
124
+
125
+ # if error flagged, stop early
126
+ if state.error is not None:
127
+ ctx.msg_id = None
128
+ state.stopped = True
129
+ break
130
+
131
+ etype: Optional[EventType] = None
132
+
133
+ # detect chunk type
134
+ if ctx.use_responses_api:
135
+ if hasattr(chunk, 'type'):
136
+ etype = chunk.type # type: ignore[assignment]
137
+ state.chunk_type = "api_chat_responses"
138
+ else:
139
+ continue
140
+ else:
141
+ state.chunk_type = self._detect_chunk_type(chunk)
142
+
143
+ # process chunk according to type
144
+ response = self._process_chunk(ctx, core, state, chunk, etype)
145
+
146
+ # emit response delta if present
147
+ if response is not None and response != "" and not state.stopped:
148
+ self._append_response(ctx, state, response, emit_event)
149
+
150
+ # free per-iteration ref
151
+ chunk = None
152
+
153
+ # after loop: handle tool-calls and images assembly
154
+ self._handle_after_loop(ctx, core, state)
155
+
156
+ except Exception as e:
157
+ state.error = e
158
+
159
+ finally:
160
+ self._finalize(ctx, core, state, emit_end, emit_error)
161
+
162
+ # ------------ Orchestration helpers ------------
163
+
164
+ def _should_stop(
165
+ self,
166
+ ctrl,
167
+ state: WorkerState,
168
+ ctx: CtxItem
169
+ ) -> bool:
170
+ """
171
+ Checks external stop signal and attempts to stop the generator gracefully.
172
+
173
+ :param ctrl: Controller with stop signal
174
+ :param state: WorkerState
175
+ :param ctx: CtxItem
176
+ :return: True if stopped, False otherwise
177
+ """
178
+ if not ctrl.kernel.stopped():
179
+ return False
180
+
181
+ gen = state.generator
182
+ if gen is not None:
183
+ # Try common stop methods without raising
184
+ for meth in ("close", "cancel", "stop"):
185
+ if hasattr(gen, meth):
186
+ try:
187
+ getattr(gen, meth)()
188
+ except Exception:
189
+ pass
190
+
191
+ ctx.msg_id = None
192
+ state.stopped = True
193
+ return True
194
+
195
+ def _detect_chunk_type(self, chunk) -> ChunkType:
196
+ """
197
+ Detects chunk type for various providers/SDKs.
198
+
199
+ :param chunk: The chunk object from the stream
200
+ :return: Detected ChunkType
201
+ """
202
+ if (hasattr(chunk, 'choices')
203
+ and chunk.choices
204
+ and hasattr(chunk.choices[0], 'delta')
205
+ and chunk.choices[0].delta is not None):
206
+ return "api_chat"
207
+ if (hasattr(chunk, 'choices')
208
+ and chunk.choices
209
+ and hasattr(chunk.choices[0], 'text')
210
+ and chunk.choices[0].text is not None):
211
+ return "api_completion"
212
+ if hasattr(chunk, 'content') and chunk.content is not None:
213
+ return "langchain_chat"
214
+ if hasattr(chunk, 'delta') and chunk.delta is not None:
215
+ return "llama_chat"
216
+ if hasattr(chunk, "candidates"): # Google python-genai chunk
217
+ return "google"
218
+ return "raw"
219
+
220
+ def _append_response(
221
+ self,
222
+ ctx: CtxItem,
223
+ state: WorkerState,
224
+ response: str,
225
+ emit_event
226
+ ):
227
+ """
228
+ Appends response delta and emits STREAM_APPEND event.
229
+
230
+ Skips empty initial chunks if state.begin is True.
231
+
232
+ :param ctx: CtxItem
233
+ :param state: WorkerState
234
+ :param response: Response delta string
235
+ :param emit_event: Function to emit RenderEvent
236
+ """
237
+ if state.begin and response == "":
238
+ return
239
+ state.output_parts.append(response)
240
+ state.output_tokens += 1
241
+ emit_event(
242
+ RenderEvent(
243
+ RenderEvent.STREAM_APPEND,
244
+ {
245
+ "meta": ctx.meta,
246
+ "ctx": ctx,
247
+ "chunk": response,
248
+ "begin": state.begin,
249
+ },
250
+ )
251
+ )
252
+ state.begin = False
253
+
254
+ def _handle_after_loop(
255
+ self,
256
+ ctx: CtxItem,
257
+ core,
258
+ state: WorkerState
259
+ ):
260
+ """
261
+ Post-loop handling for tool calls and images assembly.
262
+
263
+ :param ctx: CtxItem
264
+ :param core: Core instance
265
+ :param state: WorkerState
266
+ """
267
+ if state.tool_calls:
268
+ ctx.force_call = state.force_func_call
269
+ core.debug.info("[chat] Tool calls found, unpacking...")
270
+ # Ensure function.arguments is JSON string
271
+ for tc in state.tool_calls:
272
+ fn = tc.get("function") or {}
273
+ if isinstance(fn.get("arguments"), dict):
274
+ fn["arguments"] = json.dumps(fn["arguments"], ensure_ascii=False)
275
+ core.command.unpack_tool_calls_chunks(ctx, state.tool_calls)
276
+
277
+ # OpenAI partial image assembly
278
+ if state.is_image and state.img_path:
279
+ core.debug.info("[chat] OpenAI partial image assembled")
280
+ ctx.images = [state.img_path]
281
+
282
+ # Google inline images
283
+ if state.image_paths:
284
+ core.debug.info("[chat] Google inline images found")
285
+ if not isinstance(ctx.images, list) or not ctx.images:
286
+ ctx.images = list(state.image_paths)
287
+ else:
288
+ seen = set(ctx.images)
289
+ for p in state.image_paths:
290
+ if p not in seen:
291
+ ctx.images.append(p)
292
+ seen.add(p)
293
+
294
+ def _finalize(
295
+ self,
296
+ ctx: CtxItem,
297
+ core,
298
+ state: WorkerState,
299
+ emit_end,
300
+ emit_error
301
+ ):
302
+ """
303
+ Finalize stream: build output, usage, tokens, files, errors, cleanup.
304
+
305
+ :param ctx: CtxItem
306
+ :param core: Core instance
307
+ :param state: WorkerState
308
+ :param emit_end: Function to emit end signal
309
+ """
310
+ # Build final output
311
+ output = "".join(state.output_parts)
312
+ state.output_parts.clear()
313
+
314
+ if has_unclosed_code_tag(output):
315
+ output += "\n```"
316
+
317
+ # Attempt to resolve Google usage from the stream object if missing
318
+ if ((state.usage_vendor is None or state.usage_vendor == "google")
319
+ and not state.usage_payload and state.generator is not None):
320
+ try:
321
+ if hasattr(state.generator, "resolve"):
322
+ state.generator.resolve()
323
+ um = getattr(state.generator, "usage_metadata", None)
324
+ if um:
325
+ self._capture_google_usage(state, um)
326
+ except Exception:
327
+ pass
328
+
329
+ # Close generator if possible
330
+ gen = state.generator
331
+ if gen and hasattr(gen, 'close'):
332
+ try:
333
+ gen.close()
334
+ except Exception:
335
+ pass
336
+
337
+ self.stream = None
338
+ ctx.output = output
339
+
340
+ # Tokens usage
341
+ if state.usage_payload:
342
+ in_tok_final = state.usage_payload.get("in")
343
+ out_tok_final = state.usage_payload.get("out")
344
+
345
+ if in_tok_final is None:
346
+ in_tok_final = ctx.input_tokens if ctx.input_tokens is not None else 0
347
+ if out_tok_final is None:
348
+ out_tok_final = state.output_tokens
349
+
350
+ ctx.set_tokens(in_tok_final, out_tok_final)
351
+
352
+ # Attach usage details in ctx.extra for debugging
353
+ try:
354
+ if not isinstance(ctx.extra, dict):
355
+ ctx.extra = {}
356
+ ctx.extra["usage"] = {
357
+ "vendor": state.usage_vendor,
358
+ "input_tokens": in_tok_final,
359
+ "output_tokens": out_tok_final,
360
+ "reasoning_tokens": state.usage_payload.get("reasoning", 0),
361
+ "total_reported": state.usage_payload.get("total"),
362
+ }
363
+ except Exception:
364
+ pass
365
+ else:
366
+ # Fallback when usage is not available
367
+ ctx.set_tokens(ctx.input_tokens if ctx.input_tokens is not None else 0, state.output_tokens)
368
+
369
+ core.ctx.update_item(ctx)
370
+
371
+ # OpenAI only: download container files if present
372
+ if state.files and not state.stopped:
373
+ core.debug.info("[chat] Container files found, downloading...")
374
+ try:
375
+ core.api.openai.container.download_files(ctx, state.files)
376
+ except Exception as e:
377
+ core.debug.error(f"[chat] Error downloading container files: {e}")
378
+
379
+ # Emit error and end
380
+ if state.error:
381
+ emit_error(state.error)
382
+ emit_end(ctx)
383
+
384
+ # Cleanup local buffers
385
+ for _buf in state.fn_args_buffers.values():
386
+ try:
387
+ _buf.close()
388
+ except Exception:
389
+ pass
390
+ state.fn_args_buffers.clear()
391
+ state.files.clear()
392
+ state.tool_calls.clear()
393
+ if state.citations is not None and state.citations is not ctx.urls:
394
+ state.citations.clear()
395
+ state.citations = None
396
+
397
+ # Worker cleanup (signals etc.)
398
+ self.cleanup()
399
+
400
+ # ------------ Chunk processors ------------
401
+
402
+ def _process_chunk(
403
+ self,
404
+ ctx: CtxItem,
405
+ core,
406
+ state: WorkerState,
407
+ chunk,
408
+ etype: Optional[EventType]
409
+ ) -> Optional[str]:
410
+ """
411
+ Dispatches processing to concrete provider-specific processing.
412
+
413
+ :param ctx: CtxItem
414
+ :param core: Core instance
415
+ :param state: WorkerState
416
+ :param chunk: The chunk object from the stream
417
+ :param etype: Optional event type for Responses API
418
+ :return: Response delta string or None
419
+ """
420
+ t = state.chunk_type
421
+ if t == "api_chat":
422
+ return self._process_api_chat(ctx, state, chunk)
423
+ if t == "api_chat_responses":
424
+ return self._process_api_chat_responses(ctx, core, state, chunk, etype)
425
+ if t == "api_completion":
426
+ return self._process_api_completion(chunk)
427
+ if t == "langchain_chat":
428
+ return self._process_langchain_chat(chunk)
429
+ if t == "llama_chat":
430
+ return self._process_llama_chat(state, chunk)
431
+ if t == "google":
432
+ return self._process_google_chunk(ctx, core, state, chunk)
433
+ # raw fallback
434
+ return self._process_raw(chunk)
435
+
436
+ def _process_api_chat(
437
+ self,
438
+ ctx: CtxItem,
439
+ state: WorkerState,
440
+ chunk
441
+ ) -> Optional[str]:
442
+ """
443
+ OpenAI Chat Completions stream delta.
444
+
445
+ Handles text deltas, citations, and streamed tool_calls.
446
+
447
+ :param ctx: CtxItem
448
+ :param state: WorkerState
449
+ :param chunk: The chunk object from the stream
450
+ :return: Response delta string or None
451
+ """
452
+ response = None
453
+ state.citations = None # as in original, reset to None for this type
454
+
455
+ delta = chunk.choices[0].delta if getattr(chunk, "choices", None) else None
456
+ if delta and getattr(delta, "content", None) is not None:
457
+ if state.citations is None and hasattr(chunk, 'citations') and chunk.citations is not None:
458
+ state.citations = chunk.citations
459
+ ctx.urls = state.citations
460
+ response = delta.content
461
+
462
+ # Accumulate streamed tool_calls
463
+ if delta and getattr(delta, "tool_calls", None):
464
+ for tool_chunk in delta.tool_calls:
465
+ if tool_chunk.index is None:
466
+ tool_chunk.index = 0
467
+ if len(state.tool_calls) <= tool_chunk.index:
468
+ state.tool_calls.append(
469
+ {
470
+ "id": "",
471
+ "type": "function",
472
+ "function": {"name": "", "arguments": ""}
473
+ }
474
+ )
475
+ tool_call = state.tool_calls[tool_chunk.index]
476
+ if getattr(tool_chunk, "id", None):
477
+ tool_call["id"] += tool_chunk.id
478
+ if getattr(getattr(tool_chunk, "function", None), "name", None):
479
+ tool_call["function"]["name"] += tool_chunk.function.name
480
+ if getattr(getattr(tool_chunk, "function", None), "arguments", None):
481
+ tool_call["function"]["arguments"] += tool_chunk.function.arguments
482
+
483
+ # Capture usage (if available on final chunk with include_usage=True)
484
+ try:
485
+ u = getattr(chunk, "usage", None)
486
+ if u:
487
+ self._capture_openai_usage(state, u)
488
+ except Exception:
489
+ pass
490
+
491
+ return response
492
+
493
+ def _process_api_chat_responses(
494
+ self,
495
+ ctx: CtxItem,
496
+ core,
497
+ state: WorkerState,
498
+ chunk,
499
+ etype: Optional[EventType]
500
+ ) -> Optional[str]:
501
+ """
502
+ OpenAI Responses API stream events
503
+
504
+ Handles various event types including text deltas, tool calls, citations, images, and usage.
505
+
506
+ :param ctx: CtxItem
507
+ :param core: Core instance
508
+ :param state: WorkerState
509
+ :param chunk: The chunk object from the stream
510
+ :param etype: EventType string
511
+ :return: Response delta string or None
512
+ """
513
+ response = None
514
+
515
+ if etype == "response.completed":
516
+ # usage on final response
517
+ try:
518
+ u = getattr(chunk.response, "usage", None)
519
+ if u:
520
+ self._capture_openai_usage(state, u)
521
+ except Exception:
522
+ pass
523
+
524
+ for item in chunk.response.output:
525
+ if item.type == "mcp_list_tools":
526
+ core.api.openai.responses.mcp_tools = item.tools
527
+ elif item.type == "mcp_call":
528
+ call = {
529
+ "id": item.id,
530
+ "type": "mcp_call",
531
+ "approval_request_id": item.approval_request_id,
532
+ "arguments": item.arguments,
533
+ "error": item.error,
534
+ "name": item.name,
535
+ "output": item.output,
536
+ "server_label": item.server_label,
537
+ }
538
+ state.tool_calls.append({
539
+ "id": item.id,
540
+ "call_id": "",
541
+ "type": "function",
542
+ "function": {"name": item.name, "arguments": item.arguments}
543
+ })
544
+ ctx.extra["mcp_call"] = call
545
+ core.ctx.update_item(ctx)
546
+ elif item.type == "mcp_approval_request":
547
+ call = {
548
+ "id": item.id,
549
+ "type": "mcp_call",
550
+ "arguments": item.arguments,
551
+ "name": item.name,
552
+ "server_label": item.server_label,
553
+ }
554
+ ctx.extra["mcp_approval_request"] = call
555
+ core.ctx.update_item(ctx)
556
+
557
+ elif etype == "response.output_text.delta":
558
+ response = chunk.delta
559
+
560
+ elif etype == "response.output_item.added" and chunk.item.type == "function_call":
561
+ state.tool_calls.append({
562
+ "id": chunk.item.id,
563
+ "call_id": chunk.item.call_id,
564
+ "type": "function",
565
+ "function": {"name": chunk.item.name, "arguments": ""}
566
+ })
567
+ state.fn_args_buffers[chunk.item.id] = io.StringIO()
568
+
569
+ elif etype == "response.function_call_arguments.delta":
570
+ buf = state.fn_args_buffers.get(chunk.item_id)
571
+ if buf is not None:
572
+ buf.write(chunk.delta)
573
+
574
+ elif etype == "response.function_call_arguments.done":
575
+ buf = state.fn_args_buffers.pop(chunk.item_id, None)
576
+ if buf is not None:
577
+ try:
578
+ args_val = buf.getvalue()
579
+ finally:
580
+ buf.close()
581
+ for tc in state.tool_calls:
582
+ if tc["id"] == chunk.item_id:
583
+ tc["function"]["arguments"] = args_val
584
+ break
585
+
586
+ elif etype == "response.output_text.annotation.added":
587
+ ann = chunk.annotation
588
+ if ann['type'] == "url_citation":
589
+ if state.citations is None:
590
+ state.citations = []
591
+ url_citation = ann['url']
592
+ state.citations.append(url_citation)
593
+ ctx.urls = state.citations
594
+ elif ann['type'] == "container_file_citation":
595
+ state.files.append({
596
+ "container_id": ann['container_id'],
597
+ "file_id": ann['file_id'],
598
+ })
599
+
600
+ elif etype == "response.reasoning_summary_text.delta":
601
+ response = chunk.delta
602
+
603
+ elif etype == "response.output_item.done":
604
+ # Delegate to computer handler which may add tool calls
605
+ tool_calls, has_calls = core.api.openai.computer.handle_stream_chunk(ctx, chunk, state.tool_calls)
606
+ state.tool_calls = tool_calls
607
+ if has_calls:
608
+ state.force_func_call = True
609
+
610
+ elif etype == "response.code_interpreter_call_code.delta":
611
+ if not state.is_code:
612
+ response = "\n\n**Code interpreter**\n```python\n" + chunk.delta
613
+ state.is_code = True
614
+ else:
615
+ response = chunk.delta
616
+
617
+ elif etype == "response.code_interpreter_call_code.done":
618
+ response = "\n\n```\n-----------\n"
619
+
620
+ elif etype == "response.image_generation_call.partial_image":
621
+ image_base64 = chunk.partial_image_b64
622
+ image_bytes = base64.b64decode(image_base64)
623
+ if state.img_path:
624
+ with open(state.img_path, "wb") as f:
625
+ f.write(image_bytes)
626
+ del image_bytes
627
+ state.is_image = True
628
+
629
+ elif etype == "response.created":
630
+ ctx.msg_id = str(chunk.response.id)
631
+ core.ctx.update_item(ctx)
632
+
633
+ elif etype in {"response.done", "response.failed", "error"}:
634
+ pass
635
+
636
+ return response
637
+
638
+ def _process_api_completion(self, chunk) -> Optional[str]:
639
+ """
640
+ OpenAI Completions stream text delta.
641
+
642
+ :param chunk: The chunk object from the stream
643
+ :return: Response delta string or None
644
+ """
645
+ if getattr(chunk, "choices", None):
646
+ choice0 = chunk.choices[0]
647
+ if getattr(choice0, "text", None) is not None:
648
+ return choice0.text
649
+ return None
650
+
651
+ def _process_langchain_chat(self, chunk) -> Optional[str]:
652
+ """
653
+ LangChain chat streaming delta.
654
+
655
+ :param chunk: The chunk object from the stream
656
+ :return: Response delta string or None
657
+ """
658
+ if getattr(chunk, "content", None) is not None:
659
+ return str(chunk.content)
660
+ return None
661
+
662
+ def _process_llama_chat(
663
+ self,
664
+ state: WorkerState,
665
+ chunk
666
+ ) -> Optional[str]:
667
+ """
668
+ Llama chat streaming delta with optional tool call extraction.
669
+
670
+ :param state: WorkerState
671
+ :param chunk: The chunk object from the stream
672
+ :return: Response delta string or None
673
+ """
674
+ response = None
675
+ if getattr(chunk, "delta", None) is not None:
676
+ response = str(chunk.delta)
677
+
678
+ tool_chunks = getattr(getattr(chunk, "message", None), "additional_kwargs", {}).get("tool_calls", [])
679
+ if tool_chunks:
680
+ for tool_chunk in tool_chunks:
681
+ id_val = getattr(tool_chunk, "call_id", None) or getattr(tool_chunk, "id", None)
682
+ name = getattr(tool_chunk, "name", None) or getattr(getattr(tool_chunk, "function", None), "name", None)
683
+ args = getattr(tool_chunk, "arguments", None)
684
+ if args is None:
685
+ f = getattr(tool_chunk, "function", None)
686
+ args = getattr(f, "arguments", None) if f else None
687
+ if id_val:
688
+ if not args:
689
+ args = "{}"
690
+ tool_call = {
691
+ "id": id_val,
692
+ "type": "function",
693
+ "function": {"name": name, "arguments": args}
694
+ }
695
+ state.tool_calls.clear()
696
+ state.tool_calls.append(tool_call)
697
+
698
+ return response
699
+
700
+ def _process_google_chunk(
701
+ self,
702
+ ctx: CtxItem,
703
+ core,
704
+ state: WorkerState,
705
+ chunk
706
+ ) -> Optional[str]:
707
+ """
708
+ Google python-genai streaming chunk.
709
+
710
+ Handles text, tool calls, inline images, code execution parts, citations, and usage.
711
+
712
+ :param ctx: CtxItem
713
+ :param core: Core instance
714
+ :param state: WorkerState
715
+ :param chunk: The chunk object from the stream
716
+ :return: Response delta string or None
717
+ """
718
+ response_parts: list[str] = []
719
+
720
+ # Keep a reference to stream object for resolve() later if needed
721
+ if state.google_stream_ref is None:
722
+ state.google_stream_ref = state.generator
723
+
724
+ # Try to capture usage from this chunk (usage_metadata)
725
+ try:
726
+ um = getattr(chunk, "usage_metadata", None)
727
+ if um:
728
+ self._capture_google_usage(state, um)
729
+ except Exception:
730
+ pass
731
+
732
+ # 1) Plain text delta (if present)
733
+ t = None
734
+ try:
735
+ t = getattr(chunk, "text", None)
736
+ if t:
737
+ response_parts.append(t)
738
+ except Exception:
739
+ pass
740
+
741
+ # 2) Tool calls (function_calls property preferred)
742
+ fc_list = []
743
+ try:
744
+ fc_list = getattr(chunk, "function_calls", None) or []
745
+ except Exception:
746
+ fc_list = []
747
+
748
+ new_calls = []
749
+
750
+ def _to_plain_dict(obj):
751
+ """
752
+ Best-effort conversion of SDK objects to plain dict/list.
753
+ """
754
+ try:
755
+ if hasattr(obj, "to_json_dict"):
756
+ return obj.to_json_dict()
757
+ if hasattr(obj, "model_dump"):
758
+ return obj.model_dump()
759
+ if hasattr(obj, "to_dict"):
760
+ return obj.to_dict()
761
+ except Exception:
762
+ pass
763
+ if isinstance(obj, dict):
764
+ return {k: _to_plain_dict(v) for k, v in obj.items()}
765
+ if isinstance(obj, (list, tuple)):
766
+ return [_to_plain_dict(x) for x in obj]
767
+ return obj
768
+
769
+ if fc_list:
770
+ for fc in fc_list:
771
+ name = getattr(fc, "name", "") or ""
772
+ args_obj = getattr(fc, "args", {}) or {}
773
+ args_dict = _to_plain_dict(args_obj) or {}
774
+ new_calls.append({
775
+ "id": getattr(fc, "id", "") or "",
776
+ "type": "function",
777
+ "function": {
778
+ "name": name,
779
+ "arguments": json.dumps(args_dict, ensure_ascii=False),
780
+ }
781
+ })
782
+ else:
783
+ # Fallback: read from candidates -> parts[].function_call
784
+ try:
785
+ cands = getattr(chunk, "candidates", None) or []
786
+ for cand in cands:
787
+ content = getattr(cand, "content", None)
788
+ parts = getattr(content, "parts", None) or []
789
+ for p in parts:
790
+ fn = getattr(p, "function_call", None)
791
+ if not fn:
792
+ continue
793
+ name = getattr(fn, "name", "") or ""
794
+ args_obj = getattr(fn, "args", {}) or {}
795
+ args_dict = _to_plain_dict(args_obj) or {}
796
+ new_calls.append({
797
+ "id": getattr(fn, "id", "") or "",
798
+ "type": "function",
799
+ "function": {
800
+ "name": name,
801
+ "arguments": json.dumps(args_dict, ensure_ascii=False),
802
+ }
803
+ })
804
+ except Exception:
805
+ pass
806
+
807
+ # De-duplicate tool calls and mark force flag if any found
808
+ if new_calls:
809
+ seen = {(tc["function"]["name"], tc["function"]["arguments"]) for tc in state.tool_calls}
810
+ for tc in new_calls:
811
+ key = (tc["function"]["name"], tc["function"]["arguments"])
812
+ if key not in seen:
813
+ state.tool_calls.append(tc)
814
+ seen.add(key)
815
+
816
+ # 3) Inspect candidates for code execution parts, inline images, and citations
817
+ try:
818
+ cands = getattr(chunk, "candidates", None) or []
819
+ for cand in cands:
820
+ content = getattr(cand, "content", None)
821
+ parts = getattr(content, "parts", None) or []
822
+
823
+ for p in parts:
824
+ # Code execution: executable code part -> open or append within fenced block
825
+ ex = getattr(p, "executable_code", None)
826
+ if ex:
827
+ lang = (getattr(ex, "language", None) or "python").strip() or "python"
828
+ code_txt = (
829
+ getattr(ex, "code", None) or
830
+ getattr(ex, "program", None) or
831
+ getattr(ex, "source", None) or
832
+ ""
833
+ )
834
+ if code_txt is None:
835
+ code_txt = ""
836
+ if not state.is_code:
837
+ response_parts.append(f"\n\n**Code interpreter**\n```{lang.lower()}\n{code_txt}")
838
+ state.is_code = True
839
+ else:
840
+ response_parts.append(str(code_txt))
841
+
842
+ # Code execution result -> close fenced block (output will be streamed as normal text if provided)
843
+ cer = getattr(p, "code_execution_result", None)
844
+ if cer:
845
+ if state.is_code:
846
+ response_parts.append("\n\n```\n-----------\n")
847
+ state.is_code = False
848
+ # Note: We do not append execution outputs here to avoid duplicating chunk.text.
849
+
850
+ # Inline image blobs
851
+ blob = getattr(p, "inline_data", None)
852
+ if blob:
853
+ mime = (getattr(blob, "mime_type", "") or "").lower()
854
+ if mime.startswith("image/"):
855
+ data = getattr(blob, "data", None)
856
+ if data:
857
+ # inline_data.data may be bytes or base64-encoded string
858
+ if isinstance(data, (bytes, bytearray)):
859
+ img_bytes = bytes(data)
860
+ else:
861
+ img_bytes = base64.b64decode(data)
862
+ save_path = core.image.gen_unique_path(ctx)
863
+ with open(save_path, "wb") as f:
864
+ f.write(img_bytes)
865
+ if not isinstance(ctx.images, list):
866
+ ctx.images = []
867
+ ctx.images.append(save_path)
868
+ state.image_paths.append(save_path)
869
+ state.has_google_inline_image = True
870
+
871
+ # File data that points to externally hosted image (http/https)
872
+ fdata = getattr(p, "file_data", None)
873
+ if fdata:
874
+ uri = getattr(fdata, "file_uri", None) or getattr(fdata, "uri", None)
875
+ mime = (getattr(fdata, "mime_type", "") or "").lower()
876
+ if uri and mime.startswith("image/") and (uri.startswith("http://") or uri.startswith("https://")):
877
+ if ctx.urls is None:
878
+ ctx.urls = []
879
+ ctx.urls.append(uri)
880
+
881
+ # Collect citations (web search URLs) if present in candidates metadata
882
+ self._collect_google_citations(ctx, state, chunk)
883
+
884
+ except Exception:
885
+ # Never break stream on extraction failures
886
+ pass
887
+
888
+ # Combine all response parts
889
+ return "".join(response_parts) if response_parts else None
890
+
891
+ def _process_raw(self, chunk) -> Optional[str]:
892
+ """
893
+ Raw chunk fallback.
894
+
895
+ :param chunk: The chunk object from the stream
896
+ :return: String representation of chunk or None
897
+ """
898
+ if chunk is not None:
899
+ return chunk if isinstance(chunk, str) else str(chunk)
900
+ return None
901
+
902
+ # ------------ Usage helpers ------------
903
+
904
+ def _safe_get(self, obj, path: str) -> Any:
905
+ """
906
+ Dot-path getter for dicts and objects.
907
+
908
+ :param obj: dict or object
909
+ :param path: Dot-separated path string
910
+ :return: Value or None
911
+ """
912
+ cur = obj
913
+ for seg in path.split("."):
914
+ if cur is None:
915
+ return None
916
+ if isinstance(cur, dict):
917
+ cur = cur.get(seg)
918
+ else:
919
+ # Support numeric indices for lists like candidates.0...
920
+ if seg.isdigit() and isinstance(cur, (list, tuple)):
921
+ idx = int(seg)
922
+ if 0 <= idx < len(cur):
923
+ cur = cur[idx]
924
+ else:
925
+ return None
926
+ else:
927
+ cur = getattr(cur, seg, None)
928
+ return cur
929
+
930
+ def _as_int(self, val) -> Optional[int]:
931
+ """
932
+ Coerce to int if possible, else None.
933
+
934
+ :param val: Any value
935
+ :return: int or None
936
+ """
937
+ if val is None:
938
+ return None
939
+ try:
940
+ return int(val)
941
+ except Exception:
942
+ try:
943
+ return int(float(val))
944
+ except Exception:
945
+ return None
946
+
947
+ def _capture_openai_usage(self, state: WorkerState, u_obj):
948
+ """
949
+ Extract usage for OpenAI; include reasoning tokens in output if available.
950
+
951
+ :param state: WorkerState
952
+ :param u_obj: Usage object from OpenAI response
953
+ """
954
+ if not u_obj:
955
+ return
956
+ state.usage_vendor = "openai"
957
+ in_tok = self._as_int(self._safe_get(u_obj, "input_tokens")) or self._as_int(self._safe_get(u_obj, "prompt_tokens"))
958
+ out_tok = self._as_int(self._safe_get(u_obj, "output_tokens")) or self._as_int(self._safe_get(u_obj, "completion_tokens"))
959
+ total = self._as_int(self._safe_get(u_obj, "total_tokens"))
960
+ reasoning = (
961
+ self._as_int(self._safe_get(u_obj, "output_tokens_details.reasoning_tokens")) or
962
+ self._as_int(self._safe_get(u_obj, "completion_tokens_details.reasoning_tokens")) or
963
+ self._as_int(self._safe_get(u_obj, "reasoning_tokens")) or
964
+ 0
965
+ )
966
+ out_with_reason = (out_tok or 0) + (reasoning or 0)
967
+ state.usage_payload = {"in": in_tok, "out": out_with_reason, "reasoning": reasoning or 0, "total": total}
968
+
969
+ def _capture_google_usage(self, state: WorkerState, um_obj):
970
+ """
971
+ Extract usage for Google python-genai; prefer total - prompt to include reasoning.
972
+
973
+ :param state: WorkerState
974
+ :param um_obj: Usage metadata object from Google chunk
975
+ """
976
+ if not um_obj:
977
+ return
978
+ state.usage_vendor = "google"
979
+ prompt = (
980
+ self._as_int(self._safe_get(um_obj, "prompt_token_count")) or
981
+ self._as_int(self._safe_get(um_obj, "prompt_tokens")) or
982
+ self._as_int(self._safe_get(um_obj, "input_tokens"))
983
+ )
984
+ total = (
985
+ self._as_int(self._safe_get(um_obj, "total_token_count")) or
986
+ self._as_int(self._safe_get(um_obj, "total_tokens"))
987
+ )
988
+ candidates = (
989
+ self._as_int(self._safe_get(um_obj, "candidates_token_count")) or
990
+ self._as_int(self._safe_get(um_obj, "output_tokens"))
991
+ )
992
+ reasoning = (
993
+ self._as_int(self._safe_get(um_obj, "candidates_reasoning_token_count")) or
994
+ self._as_int(self._safe_get(um_obj, "reasoning_tokens")) or 0
995
+ )
996
+ if total is not None and prompt is not None:
997
+ out_total = max(0, total - prompt)
998
+ else:
999
+ out_total = candidates
1000
+ state.usage_payload = {"in": prompt, "out": out_total, "reasoning": reasoning or 0, "total": total}
1001
+
1002
+ def _collect_google_citations(
1003
+ self,
1004
+ ctx: CtxItem,
1005
+ state: WorkerState,
1006
+ chunk: Any
1007
+ ):
1008
+ """
1009
+ Collect web citations (URLs) from Google GenAI stream.
1010
+
1011
+ Tries multiple known locations (grounding metadata and citation metadata)
1012
+ in a defensive manner to remain compatible with SDK changes.
1013
+
1014
+ :param ctx: CtxItem
1015
+ :param state: WorkerState
1016
+ :param chunk: The chunk object from the stream
1017
+ """
1018
+ try:
1019
+ cands = getattr(chunk, "candidates", None) or []
1020
+ except Exception:
1021
+ cands = []
1022
+
1023
+ if not isinstance(state.citations, list):
1024
+ state.citations = []
1025
+
1026
+ # Helper to add URLs with de-duplication
1027
+ def _add_url(url: Optional[str]):
1028
+ if not url or not isinstance(url, str):
1029
+ return
1030
+ url = url.strip()
1031
+ if not (url.startswith("http://") or url.startswith("https://")):
1032
+ return
1033
+ # Initialize ctx.urls if needed
1034
+ if ctx.urls is None:
1035
+ ctx.urls = []
1036
+ if url not in state.citations:
1037
+ state.citations.append(url)
1038
+ if url not in ctx.urls:
1039
+ ctx.urls.append(url)
1040
+
1041
+ # Candidate-level metadata extraction
1042
+ for cand in cands:
1043
+ # Grounding metadata (web search attributions)
1044
+ gm = self._safe_get(cand, "grounding_metadata") or self._safe_get(cand, "groundingMetadata")
1045
+ if gm:
1046
+ atts = self._safe_get(gm, "grounding_attributions") or self._safe_get(gm, "groundingAttributions") or []
1047
+ try:
1048
+ for att in atts or []:
1049
+ # Try several common paths for URI
1050
+ for path in (
1051
+ "web.uri",
1052
+ "web.url",
1053
+ "source.web.uri",
1054
+ "source.web.url",
1055
+ "source.uri",
1056
+ "source.url",
1057
+ "uri",
1058
+ "url",
1059
+ ):
1060
+ _add_url(self._safe_get(att, path))
1061
+ except Exception:
1062
+ pass
1063
+ # Also check search entry point
1064
+ for path in (
1065
+ "search_entry_point.uri",
1066
+ "search_entry_point.url",
1067
+ "searchEntryPoint.uri",
1068
+ "searchEntryPoint.url",
1069
+ "search_entry_point.rendered_content_uri",
1070
+ "searchEntryPoint.rendered_content_uri",
1071
+ ):
1072
+ _add_url(self._safe_get(gm, path))
1073
+
1074
+ # Citation metadata (legacy and alt paths)
1075
+ cm = self._safe_get(cand, "citation_metadata") or self._safe_get(cand, "citationMetadata")
1076
+ if cm:
1077
+ cit_arrays = (
1078
+ self._safe_get(cm, "citation_sources") or
1079
+ self._safe_get(cm, "citationSources") or
1080
+ self._safe_get(cm, "citations") or []
1081
+ )
1082
+ try:
1083
+ for cit in cit_arrays or []:
1084
+ for path in ("uri", "url", "source.uri", "source.url", "web.uri", "web.url"):
1085
+ _add_url(self._safe_get(cit, path))
1086
+ except Exception:
1087
+ pass
1088
+
1089
+ # Part-level citation metadata
1090
+ try:
1091
+ parts = self._safe_get(cand, "content.parts") or []
1092
+ for p in parts:
1093
+ # Per-part citation metadata
1094
+ pcm = self._safe_get(p, "citation_metadata") or self._safe_get(p, "citationMetadata")
1095
+ if pcm:
1096
+ arr = (
1097
+ self._safe_get(pcm, "citation_sources") or
1098
+ self._safe_get(pcm, "citationSources") or
1099
+ self._safe_get(pcm, "citations") or []
1100
+ )
1101
+ for cit in arr or []:
1102
+ for path in ("uri", "url", "source.uri", "source.url", "web.uri", "web.url"):
1103
+ _add_url(self._safe_get(cit, path))
1104
+ # Per-part grounding attributions (rare)
1105
+ gpa = self._safe_get(p, "grounding_attributions") or self._safe_get(p, "groundingAttributions") or []
1106
+ for att in gpa or []:
1107
+ for path in ("web.uri", "web.url", "source.web.uri", "source.web.url", "uri", "url"):
1108
+ _add_url(self._safe_get(att, path))
1109
+ except Exception:
1110
+ pass
1111
+
1112
+ # Bind to ctx on first discovery for compatibility with other parts of the app
1113
+ if state.citations and (ctx.urls is None or not ctx.urls):
1114
+ ctx.urls = list(state.citations)
1115
+
1116
+ def cleanup(self):
1117
+ """Cleanup resources after worker execution."""
1118
+ sig = self.signals
1119
+ self.signals = None
1120
+ if sig is not None:
1121
+ try:
1122
+ sig.deleteLater()
1123
+ except RuntimeError:
1124
+ pass