pygpt-net 2.6.36__py3-none-any.whl → 2.6.38__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (96) hide show
  1. pygpt_net/CHANGELOG.txt +12 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/controller/chat/handler/anthropic_stream.py +164 -0
  4. pygpt_net/controller/chat/handler/google_stream.py +181 -0
  5. pygpt_net/controller/chat/handler/langchain_stream.py +24 -0
  6. pygpt_net/controller/chat/handler/llamaindex_stream.py +47 -0
  7. pygpt_net/controller/chat/handler/openai_stream.py +260 -0
  8. pygpt_net/controller/chat/handler/utils.py +210 -0
  9. pygpt_net/controller/chat/handler/worker.py +570 -0
  10. pygpt_net/controller/chat/handler/xai_stream.py +135 -0
  11. pygpt_net/controller/chat/stream.py +1 -1
  12. pygpt_net/controller/ctx/ctx.py +1 -1
  13. pygpt_net/controller/debug/debug.py +6 -6
  14. pygpt_net/controller/model/editor.py +3 -0
  15. pygpt_net/controller/model/importer.py +9 -2
  16. pygpt_net/controller/plugins/plugins.py +11 -3
  17. pygpt_net/controller/presets/presets.py +2 -2
  18. pygpt_net/core/bridge/context.py +35 -35
  19. pygpt_net/core/bridge/worker.py +40 -16
  20. pygpt_net/core/ctx/bag.py +7 -2
  21. pygpt_net/core/ctx/reply.py +17 -2
  22. pygpt_net/core/db/viewer.py +19 -34
  23. pygpt_net/core/render/plain/pid.py +12 -1
  24. pygpt_net/core/render/web/body.py +30 -39
  25. pygpt_net/core/tabs/tab.py +24 -1
  26. pygpt_net/data/config/config.json +10 -3
  27. pygpt_net/data/config/models.json +3 -3
  28. pygpt_net/data/config/settings.json +105 -0
  29. pygpt_net/data/css/style.dark.css +2 -3
  30. pygpt_net/data/css/style.light.css +2 -3
  31. pygpt_net/data/locale/locale.de.ini +3 -1
  32. pygpt_net/data/locale/locale.en.ini +19 -1
  33. pygpt_net/data/locale/locale.es.ini +3 -1
  34. pygpt_net/data/locale/locale.fr.ini +3 -1
  35. pygpt_net/data/locale/locale.it.ini +3 -1
  36. pygpt_net/data/locale/locale.pl.ini +4 -2
  37. pygpt_net/data/locale/locale.uk.ini +3 -1
  38. pygpt_net/data/locale/locale.zh.ini +3 -1
  39. pygpt_net/item/assistant.py +51 -2
  40. pygpt_net/item/attachment.py +21 -20
  41. pygpt_net/item/calendar_note.py +19 -2
  42. pygpt_net/item/ctx.py +115 -2
  43. pygpt_net/item/index.py +9 -2
  44. pygpt_net/item/mode.py +9 -6
  45. pygpt_net/item/model.py +20 -3
  46. pygpt_net/item/notepad.py +14 -2
  47. pygpt_net/item/preset.py +42 -2
  48. pygpt_net/item/prompt.py +8 -2
  49. pygpt_net/plugin/cmd_files/plugin.py +2 -2
  50. pygpt_net/provider/api/__init__.py +5 -3
  51. pygpt_net/provider/api/anthropic/__init__.py +190 -29
  52. pygpt_net/provider/api/anthropic/audio.py +30 -0
  53. pygpt_net/provider/api/anthropic/chat.py +341 -0
  54. pygpt_net/provider/api/anthropic/image.py +25 -0
  55. pygpt_net/provider/api/anthropic/tools.py +266 -0
  56. pygpt_net/provider/api/anthropic/vision.py +142 -0
  57. pygpt_net/provider/api/google/chat.py +2 -2
  58. pygpt_net/provider/api/google/realtime/client.py +2 -2
  59. pygpt_net/provider/api/google/tools.py +58 -48
  60. pygpt_net/provider/api/google/vision.py +7 -1
  61. pygpt_net/provider/api/openai/chat.py +1 -0
  62. pygpt_net/provider/api/openai/vision.py +6 -0
  63. pygpt_net/provider/api/x_ai/__init__.py +247 -0
  64. pygpt_net/provider/api/x_ai/audio.py +32 -0
  65. pygpt_net/provider/api/x_ai/chat.py +968 -0
  66. pygpt_net/provider/api/x_ai/image.py +208 -0
  67. pygpt_net/provider/api/x_ai/remote.py +262 -0
  68. pygpt_net/provider/api/x_ai/tools.py +120 -0
  69. pygpt_net/provider/api/x_ai/vision.py +119 -0
  70. pygpt_net/provider/core/attachment/json_file.py +2 -2
  71. pygpt_net/provider/core/config/patch.py +28 -0
  72. pygpt_net/provider/llms/anthropic.py +4 -2
  73. pygpt_net/tools/text_editor/tool.py +4 -1
  74. pygpt_net/tools/text_editor/ui/dialogs.py +1 -1
  75. pygpt_net/ui/base/config_dialog.py +5 -11
  76. pygpt_net/ui/dialog/db.py +177 -59
  77. pygpt_net/ui/dialog/dictionary.py +57 -59
  78. pygpt_net/ui/dialog/editor.py +3 -2
  79. pygpt_net/ui/dialog/image.py +1 -1
  80. pygpt_net/ui/dialog/logger.py +3 -2
  81. pygpt_net/ui/dialog/models.py +16 -16
  82. pygpt_net/ui/dialog/plugins.py +63 -60
  83. pygpt_net/ui/layout/ctx/ctx_list.py +3 -4
  84. pygpt_net/ui/layout/toolbox/__init__.py +2 -2
  85. pygpt_net/ui/layout/toolbox/assistants.py +8 -9
  86. pygpt_net/ui/layout/toolbox/presets.py +2 -2
  87. pygpt_net/ui/main.py +9 -4
  88. pygpt_net/ui/widget/element/labels.py +20 -4
  89. pygpt_net/ui/widget/textarea/editor.py +0 -4
  90. pygpt_net/ui/widget/textarea/web.py +1 -1
  91. {pygpt_net-2.6.36.dist-info → pygpt_net-2.6.38.dist-info}/METADATA +18 -6
  92. {pygpt_net-2.6.36.dist-info → pygpt_net-2.6.38.dist-info}/RECORD +95 -76
  93. pygpt_net/controller/chat/handler/stream_worker.py +0 -1136
  94. {pygpt_net-2.6.36.dist-info → pygpt_net-2.6.38.dist-info}/LICENSE +0 -0
  95. {pygpt_net-2.6.36.dist-info → pygpt_net-2.6.38.dist-info}/WHEEL +0 -0
  96. {pygpt_net-2.6.36.dist-info → pygpt_net-2.6.38.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,570 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.09.05 18:00:00 #
10
+ # ================================================== #
11
+
12
+ import io
13
+ import json
14
+ from dataclasses import dataclass, field
15
+ from typing import Optional, Literal, Any
16
+ from enum import Enum
17
+
18
+ from PySide6.QtCore import QObject, Signal, Slot, QRunnable
19
+ from openai.types.chat import ChatCompletionChunk
20
+
21
+ from pygpt_net.core.events import RenderEvent
22
+ from pygpt_net.core.text.utils import has_unclosed_code_tag
23
+ from pygpt_net.item.ctx import CtxItem
24
+
25
+ from . import (
26
+ openai_stream,
27
+ google_stream,
28
+ anthropic_stream,
29
+ xai_stream,
30
+ llamaindex_stream,
31
+ langchain_stream,
32
+ utils as stream_utils,
33
+ )
34
+
35
+ # OpenAI Responses Events
36
+ EventType = Literal[
37
+ "response.completed",
38
+ "response.output_text.delta",
39
+ "response.output_item.added",
40
+ "response.function_call_arguments.delta",
41
+ "response.function_call_arguments.done",
42
+ "response.output_text.annotation.added",
43
+ "response.reasoning_summary_text.delta",
44
+ "response.output_item.done",
45
+ "response.code_interpreter_call_code.delta",
46
+ "response.code_interpreter_call_code.done",
47
+ "response.image_generation_call.partial_image",
48
+ "response.created",
49
+ "response.done",
50
+ "response.failed",
51
+ "error",
52
+ ]
53
+
54
+
55
+ class ChunkType(str, Enum):
56
+ """
57
+ Enum for chunk type classification.
58
+ """
59
+ API_CHAT = "api_chat" # OpenAI Chat Completions / or compatible
60
+ API_CHAT_RESPONSES = "api_chat_responses" # OpenAI Responses
61
+ API_COMPLETION = "api_completion" # OpenAI Completions
62
+ LANGCHAIN_CHAT = "langchain_chat" # LangChain chat (deprecated)
63
+ LLAMA_CHAT = "llama_chat" # LlamaIndex chat
64
+ GOOGLE = "google" # Google SDK
65
+ ANTHROPIC = "anthropic" # Anthropic SDK
66
+ XAI_SDK = "xai_sdk" # xAI SDK
67
+ RAW = "raw" # Raw string fallback
68
+
69
+
70
+ class WorkerSignals(QObject):
71
+ """
72
+ Defines the signals available from a running worker thread.
73
+ - `finished`: No data
74
+ - `errorOccurred`: Exception
75
+ - `eventReady`: RenderEvent
76
+ """
77
+ end = Signal(object)
78
+ errorOccurred = Signal(Exception)
79
+ eventReady = Signal(object)
80
+
81
+
82
+ @dataclass(slots=True)
83
+ class WorkerState:
84
+ """Holds mutable state for the streaming loop."""
85
+ out: Optional[io.StringIO] = None
86
+ output_tokens: int = 0
87
+ begin: bool = True
88
+ error: Optional[Exception] = None
89
+ fn_args_buffers: dict[str, io.StringIO] = field(default_factory=dict)
90
+ citations: Optional[list] = field(default_factory=list)
91
+ image_paths: list[str] = field(default_factory=list)
92
+ files: list[dict] = field(default_factory=list)
93
+ img_path: Optional[str] = None
94
+ is_image: bool = False
95
+ has_google_inline_image: bool = False
96
+ is_code: bool = False
97
+ force_func_call: bool = False
98
+ stopped: bool = False
99
+ chunk_type: ChunkType = ChunkType.RAW
100
+ generator: Any = None
101
+ usage_vendor: Optional[str] = None
102
+ usage_payload: dict = field(default_factory=dict)
103
+ google_stream_ref: Any = None
104
+ tool_calls: list[dict] = field(default_factory=list)
105
+
106
+ # --- XAI SDK only ---
107
+ xai_last_response: Any = None # holds final response from xai_sdk.chat.stream()
108
+
109
+
110
+ class StreamWorker(QRunnable):
111
+ __slots__ = ("signals", "ctx", "window", "stream")
112
+
113
+ def __init__(self, ctx: CtxItem, window, parent=None):
114
+ super().__init__()
115
+ self.signals = WorkerSignals()
116
+ self.ctx = ctx
117
+ self.window = window
118
+ self.stream = None
119
+
120
+ @Slot()
121
+ def run(self):
122
+ ctx = self.ctx
123
+ win = self.window
124
+ core = win.core
125
+ ctrl = win.controller
126
+
127
+ emit_event = self.signals.eventReady.emit
128
+ emit_error = self.signals.errorOccurred.emit
129
+ emit_end = self.signals.end.emit
130
+
131
+ state = WorkerState()
132
+ state.generator = self.stream
133
+ state.img_path = core.image.gen_unique_path(ctx)
134
+
135
+ base_data = {"meta": ctx.meta, "ctx": ctx}
136
+ emit_event(RenderEvent(RenderEvent.STREAM_BEGIN, base_data))
137
+
138
+ try:
139
+ if state.generator is not None:
140
+ # print(state.generator) # TODO: detect by obj type?
141
+ for chunk in state.generator:
142
+ # cooperative stop
143
+ if self._should_stop(ctrl, state, ctx):
144
+ break
145
+
146
+ # if error flagged, stop early
147
+ if state.error is not None:
148
+ ctx.msg_id = None
149
+ state.stopped = True
150
+ break
151
+
152
+ etype: Optional[EventType] = None
153
+
154
+ # detect chunk type
155
+ if ctx.use_responses_api:
156
+ if hasattr(chunk, 'type'):
157
+ etype = chunk.type # type: ignore[assignment]
158
+ state.chunk_type = ChunkType.API_CHAT_RESPONSES
159
+ else:
160
+ continue
161
+ else:
162
+ state.chunk_type = self._detect_chunk_type(chunk)
163
+
164
+ # process chunk according to type
165
+ response = self._process_chunk(ctx, core, state, chunk, etype)
166
+
167
+ # emit response delta if present
168
+ if response is not None and response != "" and not state.stopped:
169
+ self._append_response(ctx, state, response, emit_event)
170
+
171
+ # free per-iteration ref
172
+ chunk = None
173
+
174
+ # after loop: handle tool-calls and images assembly
175
+ self._handle_after_loop(ctx, core, state)
176
+
177
+ except Exception as e:
178
+ state.error = e
179
+
180
+ finally:
181
+ self._finalize(ctx, core, state, emit_end, emit_error)
182
+
183
+ # ------------ Orchestration helpers ------------
184
+
185
+ def _should_stop(
186
+ self,
187
+ ctrl,
188
+ state: WorkerState,
189
+ ctx: CtxItem
190
+ ) -> bool:
191
+ """
192
+ Checks external stop signal and attempts to stop the generator gracefully.
193
+
194
+ :param ctrl: Controller instance for stop checking
195
+ :param state: Current worker state
196
+ :param ctx: Current context item
197
+ :return: True if should stop
198
+ """
199
+ if not ctrl.kernel.stopped():
200
+ return False
201
+
202
+ gen = state.generator
203
+ if gen is not None:
204
+ # Try common stop methods without raising
205
+ for meth in ("close", "cancel", "stop"):
206
+ if hasattr(gen, meth):
207
+ try:
208
+ getattr(gen, meth)()
209
+ except Exception:
210
+ pass
211
+
212
+ ctx.msg_id = None
213
+ state.stopped = True
214
+ return True
215
+
216
+ def _detect_chunk_type(self, chunk) -> ChunkType:
217
+ """
218
+ Detects chunk type for various providers/SDKs.
219
+ Order matters: detect vendor-specific types before generic fallbacks.
220
+
221
+ :param chunk: The chunk object to classify
222
+ :return: Detected ChunkType
223
+ """
224
+ # OpenAI SDK / OpenAI-compatible SSE
225
+ choices = getattr(chunk, 'choices', None)
226
+ if choices:
227
+ choice0 = choices[0] if len(choices) > 0 else None
228
+ if choice0 is not None and hasattr(choice0, 'delta') and choice0.delta is not None:
229
+ return ChunkType.API_CHAT
230
+ if choice0 is not None and hasattr(choice0, 'text') and choice0.text is not None:
231
+ return ChunkType.API_COMPLETION
232
+
233
+ # xAI SDK: chat.stream() yields (response, chunk) tuples
234
+ if isinstance(chunk, (tuple, list)) and len(chunk) == 2:
235
+ _resp, _ch = chunk[0], chunk[1]
236
+ if hasattr(_ch, "content") or isinstance(_ch, str):
237
+ return ChunkType.XAI_SDK
238
+
239
+ # Anthropic: detect both SSE events and raw delta objects early
240
+ t = getattr(chunk, "type", None)
241
+ if isinstance(t, str):
242
+ anthropic_events = {
243
+ "message_start", "content_block_start", "content_block_delta",
244
+ "content_block_stop", "message_delta", "message_stop",
245
+ "ping", "error", # control / error
246
+ "text_delta", "input_json_delta", # content deltas
247
+ "thinking_delta", "signature_delta", # thinking deltas
248
+ }
249
+ if t in anthropic_events or t.startswith("message_") or t.startswith("content_block_"):
250
+ return ChunkType.ANTHROPIC
251
+
252
+ # Google python-genai
253
+ if hasattr(chunk, "candidates"):
254
+ return ChunkType.GOOGLE
255
+
256
+ # LangChain chat-like objects
257
+ if hasattr(chunk, 'content') and getattr(chunk, 'content') is not None:
258
+ return ChunkType.LANGCHAIN_CHAT
259
+
260
+ # LlamaIndex (generic delta fallback) - exclude Anthropic/Google shapes
261
+ if hasattr(chunk, 'delta') and getattr(chunk, 'delta') is not None:
262
+ # guard: do not misclassify Anthropic or Google objects
263
+ if not hasattr(chunk, "type") and not hasattr(chunk, "candidates"):
264
+ return ChunkType.LLAMA_CHAT
265
+
266
+ # fallback: OpenAI ChatCompletionChunk not caught above
267
+ if isinstance(chunk, ChatCompletionChunk):
268
+ return ChunkType.API_CHAT
269
+
270
+ return ChunkType.RAW
271
+
272
+ def _append_response(
273
+ self,
274
+ ctx: CtxItem,
275
+ state: WorkerState,
276
+ response: str,
277
+ emit_event
278
+ ):
279
+ """
280
+ Appends response delta and emits STREAM_APPEND event.
281
+
282
+ :param ctx: Current context item
283
+ :param state: Current worker state
284
+ :param response: Response delta to append
285
+ :param emit_event: Function to emit events
286
+ """
287
+ if state.begin and response == "":
288
+ return
289
+ if state.out is None:
290
+ state.out = io.StringIO()
291
+ state.out.write(response)
292
+ state.output_tokens += 1
293
+ emit_event(
294
+ RenderEvent(
295
+ RenderEvent.STREAM_APPEND,
296
+ {
297
+ "meta": ctx.meta,
298
+ "ctx": ctx,
299
+ "chunk": response,
300
+ "begin": state.begin,
301
+ },
302
+ )
303
+ )
304
+ state.begin = False
305
+
306
+ def _handle_after_loop(
307
+ self,
308
+ ctx: CtxItem,
309
+ core,
310
+ state: WorkerState
311
+ ):
312
+ """
313
+ Post-loop handling for tool calls and images assembly.
314
+
315
+ :param ctx: Current context item
316
+ :param core: Core instance
317
+ :param state: Current worker state
318
+ """
319
+ if state.tool_calls:
320
+ ctx.force_call = state.force_func_call
321
+ core.debug.info("[chat] Tool calls found, unpacking...")
322
+ # Ensure function.arguments is JSON string
323
+ for tc in state.tool_calls:
324
+ fn = tc.get("function") or {}
325
+ if isinstance(fn.get("arguments"), dict):
326
+ fn["arguments"] = json.dumps(fn["arguments"], ensure_ascii=False)
327
+ core.command.unpack_tool_calls_chunks(ctx, state.tool_calls)
328
+
329
+ # OpenAI: partial image assembly
330
+ if state.is_image and state.img_path:
331
+ core.debug.info("[chat] OpenAI partial image assembled")
332
+ ctx.images = [state.img_path]
333
+
334
+ # Google: inline images
335
+ if state.image_paths:
336
+ core.debug.info("[chat] Google inline images found")
337
+ if not isinstance(ctx.images, list) or not ctx.images:
338
+ ctx.images = list(state.image_paths)
339
+ else:
340
+ seen = set(ctx.images)
341
+ for p in state.image_paths:
342
+ if p not in seen:
343
+ ctx.images.append(p)
344
+ seen.add(p)
345
+
346
+ # xAI: extract tool calls from final response if not already present
347
+ if (not state.tool_calls) and (state.xai_last_response is not None):
348
+ try:
349
+ calls = xai_stream.xai_extract_tool_calls(state.xai_last_response)
350
+ if calls:
351
+ state.tool_calls = calls
352
+ except Exception:
353
+ pass
354
+
355
+ # xAI: collect citations (final response) -> ctx.urls
356
+ if state.xai_last_response is not None:
357
+ try:
358
+ cites = xai_stream.xai_extract_citations(state.xai_last_response) or []
359
+ if cites:
360
+ if ctx.urls is None:
361
+ ctx.urls = []
362
+ for u in cites:
363
+ if u not in ctx.urls:
364
+ ctx.urls.append(u)
365
+ except Exception:
366
+ pass
367
+
368
+ def _finalize(
369
+ self,
370
+ ctx: CtxItem,
371
+ core,
372
+ state: WorkerState,
373
+ emit_end,
374
+ emit_error
375
+ ):
376
+ """
377
+ Finalize stream: build output, usage, tokens, files, errors, cleanup.
378
+
379
+ :param ctx: Current context item
380
+ :param core: Core instance
381
+ :param state: Current worker state
382
+ :param emit_end: Function to emit end event
383
+ :param emit_error: Function to emit error event
384
+ """
385
+ output = state.out.getvalue() if state.out is not None else ""
386
+ if state.out is not None:
387
+ try:
388
+ state.out.close()
389
+ except Exception:
390
+ pass
391
+ state.out = None
392
+
393
+ if has_unclosed_code_tag(output):
394
+ output += "\n```"
395
+
396
+ # Google: resolve usage if present
397
+ if ((state.usage_vendor is None or state.usage_vendor == "google")
398
+ and not state.usage_payload and state.generator is not None):
399
+ try:
400
+ if hasattr(state.generator, "resolve"):
401
+ state.generator.resolve()
402
+ um = getattr(state.generator, "usage_metadata", None)
403
+ if um:
404
+ stream_utils.capture_google_usage(state, um)
405
+ except Exception:
406
+ pass
407
+
408
+ # xAI: usage from final response if still missing
409
+ if (not state.usage_payload) and (state.xai_last_response is not None):
410
+ try:
411
+ up = xai_stream.xai_extract_usage(state.xai_last_response)
412
+ if up:
413
+ state.usage_payload = up
414
+ state.usage_vendor = "xai"
415
+ except Exception:
416
+ pass
417
+
418
+ # Close generator if possible
419
+ gen = state.generator
420
+ if gen and hasattr(gen, 'close'):
421
+ try:
422
+ gen.close()
423
+ except Exception:
424
+ pass
425
+
426
+ self.stream = None
427
+ ctx.output = output
428
+ output = None # free ref
429
+
430
+ # Tokens usage
431
+ if state.usage_payload:
432
+ in_tok_final = state.usage_payload.get("in")
433
+ out_tok_final = state.usage_payload.get("out")
434
+
435
+ if in_tok_final is None:
436
+ in_tok_final = ctx.input_tokens if ctx.input_tokens is not None else 0
437
+ if out_tok_final is None:
438
+ out_tok_final = state.output_tokens
439
+
440
+ ctx.set_tokens(in_tok_final, out_tok_final)
441
+
442
+ # Attach usage details in ctx.extra for debugging
443
+ try:
444
+ if not isinstance(ctx.extra, dict):
445
+ ctx.extra = {}
446
+ ctx.extra["usage"] = {
447
+ "vendor": state.usage_vendor,
448
+ "input_tokens": in_tok_final,
449
+ "output_tokens": out_tok_final,
450
+ "reasoning_tokens": state.usage_payload.get("reasoning", 0),
451
+ "total_reported": state.usage_payload.get("total"),
452
+ }
453
+ except Exception:
454
+ pass
455
+ else:
456
+ ctx.set_tokens(ctx.input_tokens if ctx.input_tokens is not None else 0, state.output_tokens)
457
+
458
+ core.ctx.update_item(ctx)
459
+
460
+ # OpenAI: download container files if present
461
+ if state.files and not state.stopped:
462
+ core.debug.info("[chat] Container files found, downloading...")
463
+ try:
464
+ core.api.openai.container.download_files(ctx, state.files)
465
+ except Exception as e:
466
+ core.debug.error(f"[chat] Error downloading container files: {e}")
467
+
468
+ # Emit error and end
469
+ if state.error:
470
+ emit_error(state.error)
471
+ emit_end(ctx)
472
+
473
+ # Cleanup local buffers
474
+ for _buf in state.fn_args_buffers.values():
475
+ try:
476
+ _buf.close()
477
+ except Exception:
478
+ pass
479
+ state.fn_args_buffers.clear()
480
+ state.files.clear()
481
+ state.tool_calls.clear()
482
+ if state.citations is not None and state.citations is not ctx.urls:
483
+ state.citations.clear()
484
+ state.citations = None
485
+
486
+ self.cleanup()
487
+
488
+ # ------------ Chunk processors ------------
489
+
490
+ def _process_chunk(
491
+ self,
492
+ ctx: CtxItem,
493
+ core,
494
+ state: WorkerState,
495
+ chunk,
496
+ etype: Optional[EventType]
497
+ ) -> Optional[str]:
498
+ """
499
+ Dispatches processing to concrete provider-specific processing.
500
+
501
+ :param ctx: Current context item
502
+ :param core: Core instance
503
+ :param state: Current worker state
504
+ :param chunk: The chunk to process
505
+ :param etype: Optional event type for Responses API
506
+ :return: Processed string delta or None
507
+ """
508
+ t = state.chunk_type
509
+ if t == ChunkType.API_CHAT:
510
+ return self._process_api_chat(ctx, state, chunk)
511
+ if t == ChunkType.API_CHAT_RESPONSES:
512
+ return self._process_api_chat_responses(ctx, core, state, chunk, etype)
513
+ if t == ChunkType.API_COMPLETION:
514
+ return self._process_api_completion(chunk)
515
+ if t == ChunkType.LANGCHAIN_CHAT:
516
+ return self._process_langchain_chat(chunk)
517
+ if t == ChunkType.LLAMA_CHAT:
518
+ return self._process_llama_chat(state, chunk)
519
+ if t == ChunkType.GOOGLE:
520
+ return self._process_google_chunk(ctx, core, state, chunk)
521
+ if t == ChunkType.ANTHROPIC:
522
+ return self._process_anthropic_chunk(ctx, core, state, chunk)
523
+ if t == ChunkType.XAI_SDK:
524
+ return self._process_xai_sdk_chunk(ctx, core, state, chunk)
525
+ return self._process_raw(chunk)
526
+
527
+ def _process_api_chat(self, ctx, state, chunk):
528
+ return openai_stream.process_api_chat(ctx, state, chunk)
529
+
530
+ def _process_api_chat_responses(self, ctx, core, state, chunk, etype):
531
+ return openai_stream.process_api_chat_responses(ctx, core, state, chunk, etype)
532
+
533
+ def _process_api_completion(self, chunk):
534
+ return openai_stream.process_api_completion(chunk)
535
+
536
+ def _process_langchain_chat(self, chunk):
537
+ return langchain_stream.process_langchain_chat(chunk)
538
+
539
+ def _process_llama_chat(self, state, chunk):
540
+ return llamaindex_stream.process_llama_chat(state, chunk)
541
+
542
+ def _process_google_chunk(self, ctx, core, state, chunk):
543
+ return google_stream.process_google_chunk(ctx, core, state, chunk)
544
+
545
+ def _process_anthropic_chunk(self, ctx, core, state, chunk):
546
+ return anthropic_stream.process_anthropic_chunk(ctx, core, state, chunk)
547
+
548
+ def _process_xai_sdk_chunk(self, ctx, core, state, item):
549
+ return xai_stream.process_xai_sdk_chunk(ctx, core, state, item)
550
+
551
+ def _process_raw(self, chunk) -> Optional[str]:
552
+ """
553
+ Raw chunk fallback.
554
+
555
+ :param chunk: The chunk to process
556
+ :return: String representation or None
557
+ """
558
+ if chunk is not None:
559
+ return chunk if isinstance(chunk, str) else str(chunk)
560
+ return None
561
+
562
+ def cleanup(self):
563
+ """Cleanup resources after worker execution."""
564
+ sig = self.signals
565
+ self.signals = None
566
+ if sig is not None:
567
+ try:
568
+ sig.deleteLater()
569
+ except RuntimeError:
570
+ pass
@@ -0,0 +1,135 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.09.05 00:00:00 #
10
+ # ================================================== #
11
+
12
+ from typing import Optional
13
+
14
+
15
+ def process_xai_sdk_chunk(ctx, core, state, item) -> Optional[str]:
16
+ """
17
+ xAI SDK native streaming chunk.
18
+
19
+ :param ctx: Chat context
20
+ :param core: Core controller
21
+ :param state: Chat state
22
+ :param item: Incoming streaming chunk (tuple of (response, chunk))
23
+ :return: Extracted text delta or None
24
+ """
25
+ try:
26
+ response, chunk = item
27
+ except Exception:
28
+ return None
29
+
30
+ state.xai_last_response = response
31
+
32
+ try:
33
+ if hasattr(chunk, "content") and chunk.content is not None:
34
+ return str(chunk.content)
35
+ if isinstance(chunk, str):
36
+ return chunk
37
+ except Exception:
38
+ pass
39
+ return None
40
+
41
+
42
+ def xai_extract_tool_calls(response) -> list[dict]:
43
+ """
44
+ Extract tool calls from xAI SDK final response (proto).
45
+
46
+ :param response: xAI final response object
47
+ :return: List of tool calls in normalized dict format
48
+ """
49
+ out: list[dict] = []
50
+ try:
51
+ proto = getattr(response, "proto", None)
52
+ if not proto:
53
+ return out
54
+ choices = getattr(proto, "choices", None) or []
55
+ if not choices:
56
+ return out
57
+ msg = getattr(choices[0], "message", None)
58
+ if not msg:
59
+ return out
60
+ tool_calls = getattr(msg, "tool_calls", None) or []
61
+ for tc in tool_calls:
62
+ try:
63
+ name = getattr(getattr(tc, "function", None), "name", "") or ""
64
+ args = getattr(getattr(tc, "function", None), "arguments", "") or "{}"
65
+ out.append({
66
+ "id": getattr(tc, "id", "") or "",
67
+ "type": "function",
68
+ "function": {"name": name, "arguments": args},
69
+ })
70
+ except Exception:
71
+ continue
72
+ except Exception:
73
+ pass
74
+ return out
75
+
76
+
77
+ def xai_extract_citations(response) -> list[str]:
78
+ """
79
+ Extract citations (URLs) from xAI final response if present.
80
+
81
+ :param response: xAI final response object
82
+ :return: List of citation URLs
83
+ """
84
+ urls: list[str] = []
85
+ try:
86
+ cites = getattr(response, "citations", None)
87
+ if isinstance(cites, (list, tuple)):
88
+ for u in cites:
89
+ if isinstance(u, str) and (u.startswith("http://") or u.startswith("https://")):
90
+ if u not in urls:
91
+ urls.append(u)
92
+ except Exception:
93
+ pass
94
+ try:
95
+ proto = getattr(response, "proto", None)
96
+ if proto:
97
+ proto_cites = getattr(proto, "citations", None) or []
98
+ for u in proto_cites:
99
+ if isinstance(u, str) and (u.startswith("http://") or u.startswith("https://")):
100
+ if u not in urls:
101
+ urls.append(u)
102
+ except Exception:
103
+ pass
104
+ return urls
105
+
106
+
107
+ def xai_extract_usage(response) -> dict:
108
+ """
109
+ Extract usage from xAI final response via proto. Return {'in','out','reasoning','total'}.
110
+
111
+ :param response: xAI final response object
112
+ :return: Usage dict
113
+ """
114
+ try:
115
+ proto = getattr(response, "proto", None)
116
+ usage = getattr(proto, "usage", None) if proto else None
117
+ if not usage:
118
+ return {}
119
+
120
+ def _as_int(v):
121
+ try:
122
+ return int(v)
123
+ except Exception:
124
+ try:
125
+ return int(float(v))
126
+ except Exception:
127
+ return 0
128
+
129
+ p = _as_int(getattr(usage, "prompt_tokens", 0) or 0)
130
+ c = _as_int(getattr(usage, "completion_tokens", 0) or 0)
131
+ t = _as_int(getattr(usage, "total_tokens", (p + c)) or (p + c))
132
+ out_total = max(0, t - p) if t else c
133
+ return {"in": p, "out": out_total, "reasoning": 0, "total": t}
134
+ except Exception:
135
+ return {}