pygpt-net 2.6.36__py3-none-any.whl → 2.6.37__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. pygpt_net/CHANGELOG.txt +5 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/controller/chat/handler/anthropic_stream.py +166 -0
  4. pygpt_net/controller/chat/handler/google_stream.py +181 -0
  5. pygpt_net/controller/chat/handler/langchain_stream.py +24 -0
  6. pygpt_net/controller/chat/handler/llamaindex_stream.py +47 -0
  7. pygpt_net/controller/chat/handler/openai_stream.py +260 -0
  8. pygpt_net/controller/chat/handler/utils.py +210 -0
  9. pygpt_net/controller/chat/handler/worker.py +566 -0
  10. pygpt_net/controller/chat/handler/xai_stream.py +135 -0
  11. pygpt_net/controller/chat/stream.py +1 -1
  12. pygpt_net/controller/ctx/ctx.py +1 -1
  13. pygpt_net/controller/model/editor.py +3 -0
  14. pygpt_net/core/bridge/context.py +35 -35
  15. pygpt_net/core/bridge/worker.py +40 -16
  16. pygpt_net/core/render/web/body.py +29 -34
  17. pygpt_net/data/config/config.json +10 -3
  18. pygpt_net/data/config/models.json +3 -3
  19. pygpt_net/data/config/settings.json +105 -0
  20. pygpt_net/data/css/style.dark.css +2 -3
  21. pygpt_net/data/css/style.light.css +2 -3
  22. pygpt_net/data/locale/locale.de.ini +3 -1
  23. pygpt_net/data/locale/locale.en.ini +19 -1
  24. pygpt_net/data/locale/locale.es.ini +3 -1
  25. pygpt_net/data/locale/locale.fr.ini +3 -1
  26. pygpt_net/data/locale/locale.it.ini +3 -1
  27. pygpt_net/data/locale/locale.pl.ini +4 -2
  28. pygpt_net/data/locale/locale.uk.ini +3 -1
  29. pygpt_net/data/locale/locale.zh.ini +3 -1
  30. pygpt_net/provider/api/__init__.py +5 -3
  31. pygpt_net/provider/api/anthropic/__init__.py +190 -29
  32. pygpt_net/provider/api/anthropic/audio.py +30 -0
  33. pygpt_net/provider/api/anthropic/chat.py +341 -0
  34. pygpt_net/provider/api/anthropic/image.py +25 -0
  35. pygpt_net/provider/api/anthropic/tools.py +266 -0
  36. pygpt_net/provider/api/anthropic/vision.py +142 -0
  37. pygpt_net/provider/api/google/chat.py +2 -2
  38. pygpt_net/provider/api/google/tools.py +58 -48
  39. pygpt_net/provider/api/google/vision.py +7 -1
  40. pygpt_net/provider/api/openai/chat.py +1 -0
  41. pygpt_net/provider/api/openai/vision.py +6 -0
  42. pygpt_net/provider/api/x_ai/__init__.py +247 -0
  43. pygpt_net/provider/api/x_ai/audio.py +32 -0
  44. pygpt_net/provider/api/x_ai/chat.py +968 -0
  45. pygpt_net/provider/api/x_ai/image.py +208 -0
  46. pygpt_net/provider/api/x_ai/remote.py +262 -0
  47. pygpt_net/provider/api/x_ai/tools.py +120 -0
  48. pygpt_net/provider/api/x_ai/vision.py +119 -0
  49. pygpt_net/provider/core/config/patch.py +28 -0
  50. pygpt_net/provider/llms/anthropic.py +4 -2
  51. pygpt_net/ui/base/config_dialog.py +5 -11
  52. pygpt_net/ui/dialog/models.py +2 -4
  53. pygpt_net/ui/dialog/plugins.py +40 -43
  54. pygpt_net/ui/widget/element/labels.py +19 -3
  55. pygpt_net/ui/widget/textarea/web.py +1 -1
  56. {pygpt_net-2.6.36.dist-info → pygpt_net-2.6.37.dist-info}/METADATA +11 -6
  57. {pygpt_net-2.6.36.dist-info → pygpt_net-2.6.37.dist-info}/RECORD +60 -41
  58. pygpt_net/controller/chat/handler/stream_worker.py +0 -1136
  59. {pygpt_net-2.6.36.dist-info → pygpt_net-2.6.37.dist-info}/LICENSE +0 -0
  60. {pygpt_net-2.6.36.dist-info → pygpt_net-2.6.37.dist-info}/WHEEL +0 -0
  61. {pygpt_net-2.6.36.dist-info → pygpt_net-2.6.37.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,566 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.09.04 00:00:00 #
10
+ # ================================================== #
11
+
12
+ import io
13
+ import json
14
+ from dataclasses import dataclass, field
15
+ from typing import Optional, Literal, Any
16
+ from enum import Enum
17
+
18
+ from PySide6.QtCore import QObject, Signal, Slot, QRunnable
19
+
20
+ from pygpt_net.core.events import RenderEvent
21
+ from pygpt_net.core.text.utils import has_unclosed_code_tag
22
+ from pygpt_net.item.ctx import CtxItem
23
+
24
+ from . import (
25
+ openai_stream,
26
+ google_stream,
27
+ anthropic_stream,
28
+ xai_stream,
29
+ llamaindex_stream,
30
+ langchain_stream,
31
+ utils as stream_utils,
32
+ )
33
+
34
+ # OpenAI Responses Events
35
+ EventType = Literal[
36
+ "response.completed",
37
+ "response.output_text.delta",
38
+ "response.output_item.added",
39
+ "response.function_call_arguments.delta",
40
+ "response.function_call_arguments.done",
41
+ "response.output_text.annotation.added",
42
+ "response.reasoning_summary_text.delta",
43
+ "response.output_item.done",
44
+ "response.code_interpreter_call_code.delta",
45
+ "response.code_interpreter_call_code.done",
46
+ "response.image_generation_call.partial_image",
47
+ "response.created",
48
+ "response.done",
49
+ "response.failed",
50
+ "error",
51
+ ]
52
+
53
+
54
+ class ChunkType(str, Enum):
55
+ """
56
+ Enum for chunk type classification.
57
+ """
58
+ API_CHAT = "api_chat" # OpenAI Chat Completions / or compatible
59
+ API_CHAT_RESPONSES = "api_chat_responses" # OpenAI Responses
60
+ API_COMPLETION = "api_completion" # OpenAI Completions
61
+ LANGCHAIN_CHAT = "langchain_chat" # LangChain chat (deprecated)
62
+ LLAMA_CHAT = "llama_chat" # LlamaIndex chat
63
+ GOOGLE = "google" # Google SDK
64
+ ANTHROPIC = "anthropic" # Anthropic SDK
65
+ XAI_SDK = "xai_sdk" # xAI SDK
66
+ RAW = "raw" # Raw string fallback
67
+
68
+
69
+ class WorkerSignals(QObject):
70
+ """
71
+ Defines the signals available from a running worker thread.
72
+ - `finished`: No data
73
+ - `errorOccurred`: Exception
74
+ - `eventReady`: RenderEvent
75
+ """
76
+ end = Signal(object)
77
+ errorOccurred = Signal(Exception)
78
+ eventReady = Signal(object)
79
+
80
+
81
+ @dataclass(slots=True)
82
+ class WorkerState:
83
+ """Holds mutable state for the streaming loop."""
84
+ out: Optional[io.StringIO] = None
85
+ output_tokens: int = 0
86
+ begin: bool = True
87
+ error: Optional[Exception] = None
88
+ fn_args_buffers: dict[str, io.StringIO] = field(default_factory=dict)
89
+ citations: Optional[list] = field(default_factory=list)
90
+ image_paths: list[str] = field(default_factory=list)
91
+ files: list[dict] = field(default_factory=list)
92
+ img_path: Optional[str] = None
93
+ is_image: bool = False
94
+ has_google_inline_image: bool = False
95
+ is_code: bool = False
96
+ force_func_call: bool = False
97
+ stopped: bool = False
98
+ chunk_type: ChunkType = ChunkType.RAW
99
+ generator: Any = None
100
+ usage_vendor: Optional[str] = None
101
+ usage_payload: dict = field(default_factory=dict)
102
+ google_stream_ref: Any = None
103
+ tool_calls: list[dict] = field(default_factory=list)
104
+
105
+ # --- XAI SDK only ---
106
+ xai_last_response: Any = None # holds final response from xai_sdk.chat.stream()
107
+
108
+
109
+ class StreamWorker(QRunnable):
110
+ __slots__ = ("signals", "ctx", "window", "stream")
111
+
112
+ def __init__(self, ctx: CtxItem, window, parent=None):
113
+ super().__init__()
114
+ self.signals = WorkerSignals()
115
+ self.ctx = ctx
116
+ self.window = window
117
+ self.stream = None
118
+
119
+ @Slot()
120
+ def run(self):
121
+ ctx = self.ctx
122
+ win = self.window
123
+ core = win.core
124
+ ctrl = win.controller
125
+
126
+ emit_event = self.signals.eventReady.emit
127
+ emit_error = self.signals.errorOccurred.emit
128
+ emit_end = self.signals.end.emit
129
+
130
+ state = WorkerState()
131
+ state.generator = self.stream
132
+ state.img_path = core.image.gen_unique_path(ctx)
133
+
134
+ base_data = {"meta": ctx.meta, "ctx": ctx}
135
+ emit_event(RenderEvent(RenderEvent.STREAM_BEGIN, base_data))
136
+
137
+ try:
138
+ if state.generator is not None:
139
+ # print(state.generator) # TODO: detect by obj type?
140
+ for chunk in state.generator:
141
+ # cooperative stop
142
+ if self._should_stop(ctrl, state, ctx):
143
+ break
144
+
145
+ # if error flagged, stop early
146
+ if state.error is not None:
147
+ ctx.msg_id = None
148
+ state.stopped = True
149
+ break
150
+
151
+ etype: Optional[EventType] = None
152
+
153
+ # detect chunk type
154
+ if ctx.use_responses_api:
155
+ if hasattr(chunk, 'type'):
156
+ etype = chunk.type # type: ignore[assignment]
157
+ state.chunk_type = ChunkType.API_CHAT_RESPONSES
158
+ else:
159
+ continue
160
+ else:
161
+ state.chunk_type = self._detect_chunk_type(chunk)
162
+
163
+ # process chunk according to type
164
+ response = self._process_chunk(ctx, core, state, chunk, etype)
165
+
166
+ # emit response delta if present
167
+ if response is not None and response != "" and not state.stopped:
168
+ self._append_response(ctx, state, response, emit_event)
169
+
170
+ # free per-iteration ref
171
+ chunk = None
172
+
173
+ # after loop: handle tool-calls and images assembly
174
+ self._handle_after_loop(ctx, core, state)
175
+
176
+ except Exception as e:
177
+ state.error = e
178
+
179
+ finally:
180
+ self._finalize(ctx, core, state, emit_end, emit_error)
181
+
182
+ # ------------ Orchestration helpers ------------
183
+
184
+ def _should_stop(
185
+ self,
186
+ ctrl,
187
+ state: WorkerState,
188
+ ctx: CtxItem
189
+ ) -> bool:
190
+ """
191
+ Checks external stop signal and attempts to stop the generator gracefully.
192
+
193
+ :param ctrl: Controller instance for stop checking
194
+ :param state: Current worker state
195
+ :param ctx: Current context item
196
+ :return: True if should stop
197
+ """
198
+ if not ctrl.kernel.stopped():
199
+ return False
200
+
201
+ gen = state.generator
202
+ if gen is not None:
203
+ # Try common stop methods without raising
204
+ for meth in ("close", "cancel", "stop"):
205
+ if hasattr(gen, meth):
206
+ try:
207
+ getattr(gen, meth)()
208
+ except Exception:
209
+ pass
210
+
211
+ ctx.msg_id = None
212
+ state.stopped = True
213
+ return True
214
+
215
+ def _detect_chunk_type(self, chunk) -> ChunkType:
216
+ """
217
+ Detects chunk type for various providers/SDKs.
218
+ Order matters: detect vendor-specific types before generic fallbacks.
219
+
220
+ :param chunk: The chunk object to classify
221
+ :return: Detected ChunkType
222
+ """
223
+ # OpenAI SDK / OpenAI-compatible SSE
224
+ choices = getattr(chunk, 'choices', None)
225
+ if choices:
226
+ choice0 = choices[0] if len(choices) > 0 else None
227
+ if choice0 is not None and hasattr(choice0, 'delta') and choice0.delta is not None:
228
+ return ChunkType.API_CHAT
229
+ if choice0 is not None and hasattr(choice0, 'text') and choice0.text is not None:
230
+ return ChunkType.API_COMPLETION
231
+
232
+ # xAI SDK: chat.stream() yields (response, chunk) tuples
233
+ if isinstance(chunk, (tuple, list)) and len(chunk) == 2:
234
+ _resp, _ch = chunk[0], chunk[1]
235
+ if hasattr(_ch, "content") or isinstance(_ch, str):
236
+ return ChunkType.XAI_SDK
237
+
238
+ # Anthropic: detect both SSE events and raw delta objects early
239
+ t = getattr(chunk, "type", None)
240
+ if isinstance(t, str):
241
+ anthropic_events = {
242
+ "message_start", "content_block_start", "content_block_delta",
243
+ "content_block_stop", "message_delta", "message_stop",
244
+ "ping", "error", # control / error
245
+ "text_delta", "input_json_delta", # content deltas
246
+ "thinking_delta", "signature_delta", # thinking deltas
247
+ }
248
+ if t in anthropic_events or t.startswith("message_") or t.startswith("content_block_"):
249
+ return ChunkType.ANTHROPIC
250
+
251
+ # Google python-genai
252
+ if hasattr(chunk, "candidates"):
253
+ return ChunkType.GOOGLE
254
+
255
+ # LangChain chat-like objects
256
+ if hasattr(chunk, 'content') and getattr(chunk, 'content') is not None:
257
+ return ChunkType.LANGCHAIN_CHAT
258
+
259
+ # LlamaIndex (generic delta fallback) - exclude Anthropic/Google shapes
260
+ if hasattr(chunk, 'delta') and getattr(chunk, 'delta') is not None:
261
+ # guard: do not misclassify Anthropic or Google objects
262
+ if not hasattr(chunk, "type") and not hasattr(chunk, "candidates"):
263
+ return ChunkType.LLAMA_CHAT
264
+
265
+ return ChunkType.RAW
266
+
267
+ def _append_response(
268
+ self,
269
+ ctx: CtxItem,
270
+ state: WorkerState,
271
+ response: str,
272
+ emit_event
273
+ ):
274
+ """
275
+ Appends response delta and emits STREAM_APPEND event.
276
+
277
+ :param ctx: Current context item
278
+ :param state: Current worker state
279
+ :param response: Response delta to append
280
+ :param emit_event: Function to emit events
281
+ """
282
+ if state.begin and response == "":
283
+ return
284
+ if state.out is None:
285
+ state.out = io.StringIO()
286
+ state.out.write(response)
287
+ state.output_tokens += 1
288
+ emit_event(
289
+ RenderEvent(
290
+ RenderEvent.STREAM_APPEND,
291
+ {
292
+ "meta": ctx.meta,
293
+ "ctx": ctx,
294
+ "chunk": response,
295
+ "begin": state.begin,
296
+ },
297
+ )
298
+ )
299
+ state.begin = False
300
+
301
+ def _handle_after_loop(
302
+ self,
303
+ ctx: CtxItem,
304
+ core,
305
+ state: WorkerState
306
+ ):
307
+ """
308
+ Post-loop handling for tool calls and images assembly.
309
+
310
+ :param ctx: Current context item
311
+ :param core: Core instance
312
+ :param state: Current worker state
313
+ """
314
+ if state.tool_calls:
315
+ ctx.force_call = state.force_func_call
316
+ core.debug.info("[chat] Tool calls found, unpacking...")
317
+ # Ensure function.arguments is JSON string
318
+ for tc in state.tool_calls:
319
+ fn = tc.get("function") or {}
320
+ if isinstance(fn.get("arguments"), dict):
321
+ fn["arguments"] = json.dumps(fn["arguments"], ensure_ascii=False)
322
+ core.command.unpack_tool_calls_chunks(ctx, state.tool_calls)
323
+
324
+ # OpenAI: partial image assembly
325
+ if state.is_image and state.img_path:
326
+ core.debug.info("[chat] OpenAI partial image assembled")
327
+ ctx.images = [state.img_path]
328
+
329
+ # Google: inline images
330
+ if state.image_paths:
331
+ core.debug.info("[chat] Google inline images found")
332
+ if not isinstance(ctx.images, list) or not ctx.images:
333
+ ctx.images = list(state.image_paths)
334
+ else:
335
+ seen = set(ctx.images)
336
+ for p in state.image_paths:
337
+ if p not in seen:
338
+ ctx.images.append(p)
339
+ seen.add(p)
340
+
341
+ # xAI: extract tool calls from final response if not already present
342
+ if (not state.tool_calls) and (state.xai_last_response is not None):
343
+ try:
344
+ calls = xai_stream.xai_extract_tool_calls(state.xai_last_response)
345
+ if calls:
346
+ state.tool_calls = calls
347
+ state.force_func_call = True
348
+ except Exception:
349
+ pass
350
+
351
+ # xAI: collect citations (final response) -> ctx.urls
352
+ if state.xai_last_response is not None:
353
+ try:
354
+ cites = xai_stream.xai_extract_citations(state.xai_last_response) or []
355
+ if cites:
356
+ if ctx.urls is None:
357
+ ctx.urls = []
358
+ for u in cites:
359
+ if u not in ctx.urls:
360
+ ctx.urls.append(u)
361
+ except Exception:
362
+ pass
363
+
364
+ def _finalize(
365
+ self,
366
+ ctx: CtxItem,
367
+ core,
368
+ state: WorkerState,
369
+ emit_end,
370
+ emit_error
371
+ ):
372
+ """
373
+ Finalize stream: build output, usage, tokens, files, errors, cleanup.
374
+
375
+ :param ctx: Current context item
376
+ :param core: Core instance
377
+ :param state: Current worker state
378
+ :param emit_end: Function to emit end event
379
+ :param emit_error: Function to emit error event
380
+ """
381
+ output = state.out.getvalue() if state.out is not None else ""
382
+ if state.out is not None:
383
+ try:
384
+ state.out.close()
385
+ except Exception:
386
+ pass
387
+ state.out = None
388
+
389
+ if has_unclosed_code_tag(output):
390
+ output += "\n```"
391
+
392
+ # Google: resolve usage if present
393
+ if ((state.usage_vendor is None or state.usage_vendor == "google")
394
+ and not state.usage_payload and state.generator is not None):
395
+ try:
396
+ if hasattr(state.generator, "resolve"):
397
+ state.generator.resolve()
398
+ um = getattr(state.generator, "usage_metadata", None)
399
+ if um:
400
+ stream_utils.capture_google_usage(state, um)
401
+ except Exception:
402
+ pass
403
+
404
+ # xAI: usage from final response if still missing
405
+ if (not state.usage_payload) and (state.xai_last_response is not None):
406
+ try:
407
+ up = xai_stream.xai_extract_usage(state.xai_last_response)
408
+ if up:
409
+ state.usage_payload = up
410
+ state.usage_vendor = "xai"
411
+ except Exception:
412
+ pass
413
+
414
+ # Close generator if possible
415
+ gen = state.generator
416
+ if gen and hasattr(gen, 'close'):
417
+ try:
418
+ gen.close()
419
+ except Exception:
420
+ pass
421
+
422
+ self.stream = None
423
+ ctx.output = output
424
+ output = None # free ref
425
+
426
+ # Tokens usage
427
+ if state.usage_payload:
428
+ in_tok_final = state.usage_payload.get("in")
429
+ out_tok_final = state.usage_payload.get("out")
430
+
431
+ if in_tok_final is None:
432
+ in_tok_final = ctx.input_tokens if ctx.input_tokens is not None else 0
433
+ if out_tok_final is None:
434
+ out_tok_final = state.output_tokens
435
+
436
+ ctx.set_tokens(in_tok_final, out_tok_final)
437
+
438
+ # Attach usage details in ctx.extra for debugging
439
+ try:
440
+ if not isinstance(ctx.extra, dict):
441
+ ctx.extra = {}
442
+ ctx.extra["usage"] = {
443
+ "vendor": state.usage_vendor,
444
+ "input_tokens": in_tok_final,
445
+ "output_tokens": out_tok_final,
446
+ "reasoning_tokens": state.usage_payload.get("reasoning", 0),
447
+ "total_reported": state.usage_payload.get("total"),
448
+ }
449
+ except Exception:
450
+ pass
451
+ else:
452
+ ctx.set_tokens(ctx.input_tokens if ctx.input_tokens is not None else 0, state.output_tokens)
453
+
454
+ core.ctx.update_item(ctx)
455
+
456
+ # OpenAI: download container files if present
457
+ if state.files and not state.stopped:
458
+ core.debug.info("[chat] Container files found, downloading...")
459
+ try:
460
+ core.api.openai.container.download_files(ctx, state.files)
461
+ except Exception as e:
462
+ core.debug.error(f"[chat] Error downloading container files: {e}")
463
+
464
+ # Emit error and end
465
+ if state.error:
466
+ emit_error(state.error)
467
+ emit_end(ctx)
468
+
469
+ # Cleanup local buffers
470
+ for _buf in state.fn_args_buffers.values():
471
+ try:
472
+ _buf.close()
473
+ except Exception:
474
+ pass
475
+ state.fn_args_buffers.clear()
476
+ state.files.clear()
477
+ state.tool_calls.clear()
478
+ if state.citations is not None and state.citations is not ctx.urls:
479
+ state.citations.clear()
480
+ state.citations = None
481
+
482
+ self.cleanup()
483
+
484
+ # ------------ Chunk processors ------------
485
+
486
+ def _process_chunk(
487
+ self,
488
+ ctx: CtxItem,
489
+ core,
490
+ state: WorkerState,
491
+ chunk,
492
+ etype: Optional[EventType]
493
+ ) -> Optional[str]:
494
+ """
495
+ Dispatches processing to concrete provider-specific processing.
496
+
497
+ :param ctx: Current context item
498
+ :param core: Core instance
499
+ :param state: Current worker state
500
+ :param chunk: The chunk to process
501
+ :param etype: Optional event type for Responses API
502
+ :return: Processed string delta or None
503
+ """
504
+ t = state.chunk_type
505
+ if t == ChunkType.API_CHAT:
506
+ return self._process_api_chat(ctx, state, chunk)
507
+ if t == ChunkType.API_CHAT_RESPONSES:
508
+ return self._process_api_chat_responses(ctx, core, state, chunk, etype)
509
+ if t == ChunkType.API_COMPLETION:
510
+ return self._process_api_completion(chunk)
511
+ if t == ChunkType.LANGCHAIN_CHAT:
512
+ return self._process_langchain_chat(chunk)
513
+ if t == ChunkType.LLAMA_CHAT:
514
+ return self._process_llama_chat(state, chunk)
515
+ if t == ChunkType.GOOGLE:
516
+ return self._process_google_chunk(ctx, core, state, chunk)
517
+ if t == ChunkType.ANTHROPIC:
518
+ return self._process_anthropic_chunk(ctx, core, state, chunk)
519
+ if t == ChunkType.XAI_SDK:
520
+ return self._process_xai_sdk_chunk(ctx, core, state, chunk)
521
+ return self._process_raw(chunk)
522
+
523
+ def _process_api_chat(self, ctx, state, chunk):
524
+ return openai_stream.process_api_chat(ctx, state, chunk)
525
+
526
+ def _process_api_chat_responses(self, ctx, core, state, chunk, etype):
527
+ return openai_stream.process_api_chat_responses(ctx, core, state, chunk, etype)
528
+
529
+ def _process_api_completion(self, chunk):
530
+ return openai_stream.process_api_completion(chunk)
531
+
532
+ def _process_langchain_chat(self, chunk):
533
+ return langchain_stream.process_langchain_chat(chunk)
534
+
535
+ def _process_llama_chat(self, state, chunk):
536
+ return llamaindex_stream.process_llama_chat(state, chunk)
537
+
538
+ def _process_google_chunk(self, ctx, core, state, chunk):
539
+ return google_stream.process_google_chunk(ctx, core, state, chunk)
540
+
541
+ def _process_anthropic_chunk(self, ctx, core, state, chunk):
542
+ return anthropic_stream.process_anthropic_chunk(ctx, core, state, chunk)
543
+
544
+ def _process_xai_sdk_chunk(self, ctx, core, state, item):
545
+ return xai_stream.process_xai_sdk_chunk(ctx, core, state, item)
546
+
547
+ def _process_raw(self, chunk) -> Optional[str]:
548
+ """
549
+ Raw chunk fallback.
550
+
551
+ :param chunk: The chunk to process
552
+ :return: String representation or None
553
+ """
554
+ if chunk is not None:
555
+ return chunk if isinstance(chunk, str) else str(chunk)
556
+ return None
557
+
558
+ def cleanup(self):
559
+ """Cleanup resources after worker execution."""
560
+ sig = self.signals
561
+ self.signals = None
562
+ if sig is not None:
563
+ try:
564
+ sig.deleteLater()
565
+ except RuntimeError:
566
+ pass
@@ -0,0 +1,135 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.09.05 00:00:00 #
10
+ # ================================================== #
11
+
12
+ from typing import Optional
13
+
14
+
15
+ def process_xai_sdk_chunk(ctx, core, state, item) -> Optional[str]:
16
+ """
17
+ xAI SDK native streaming chunk.
18
+
19
+ :param ctx: Chat context
20
+ :param core: Core controller
21
+ :param state: Chat state
22
+ :param item: Incoming streaming chunk (tuple of (response, chunk))
23
+ :return: Extracted text delta or None
24
+ """
25
+ try:
26
+ response, chunk = item
27
+ except Exception:
28
+ return None
29
+
30
+ state.xai_last_response = response
31
+
32
+ try:
33
+ if hasattr(chunk, "content") and chunk.content is not None:
34
+ return str(chunk.content)
35
+ if isinstance(chunk, str):
36
+ return chunk
37
+ except Exception:
38
+ pass
39
+ return None
40
+
41
+
42
+ def xai_extract_tool_calls(response) -> list[dict]:
43
+ """
44
+ Extract tool calls from xAI SDK final response (proto).
45
+
46
+ :param response: xAI final response object
47
+ :return: List of tool calls in normalized dict format
48
+ """
49
+ out: list[dict] = []
50
+ try:
51
+ proto = getattr(response, "proto", None)
52
+ if not proto:
53
+ return out
54
+ choices = getattr(proto, "choices", None) or []
55
+ if not choices:
56
+ return out
57
+ msg = getattr(choices[0], "message", None)
58
+ if not msg:
59
+ return out
60
+ tool_calls = getattr(msg, "tool_calls", None) or []
61
+ for tc in tool_calls:
62
+ try:
63
+ name = getattr(getattr(tc, "function", None), "name", "") or ""
64
+ args = getattr(getattr(tc, "function", None), "arguments", "") or "{}"
65
+ out.append({
66
+ "id": getattr(tc, "id", "") or "",
67
+ "type": "function",
68
+ "function": {"name": name, "arguments": args},
69
+ })
70
+ except Exception:
71
+ continue
72
+ except Exception:
73
+ pass
74
+ return out
75
+
76
+
77
+ def xai_extract_citations(response) -> list[str]:
78
+ """
79
+ Extract citations (URLs) from xAI final response if present.
80
+
81
+ :param response: xAI final response object
82
+ :return: List of citation URLs
83
+ """
84
+ urls: list[str] = []
85
+ try:
86
+ cites = getattr(response, "citations", None)
87
+ if isinstance(cites, (list, tuple)):
88
+ for u in cites:
89
+ if isinstance(u, str) and (u.startswith("http://") or u.startswith("https://")):
90
+ if u not in urls:
91
+ urls.append(u)
92
+ except Exception:
93
+ pass
94
+ try:
95
+ proto = getattr(response, "proto", None)
96
+ if proto:
97
+ proto_cites = getattr(proto, "citations", None) or []
98
+ for u in proto_cites:
99
+ if isinstance(u, str) and (u.startswith("http://") or u.startswith("https://")):
100
+ if u not in urls:
101
+ urls.append(u)
102
+ except Exception:
103
+ pass
104
+ return urls
105
+
106
+
107
+ def xai_extract_usage(response) -> dict:
108
+ """
109
+ Extract usage from xAI final response via proto. Return {'in','out','reasoning','total'}.
110
+
111
+ :param response: xAI final response object
112
+ :return: Usage dict
113
+ """
114
+ try:
115
+ proto = getattr(response, "proto", None)
116
+ usage = getattr(proto, "usage", None) if proto else None
117
+ if not usage:
118
+ return {}
119
+
120
+ def _as_int(v):
121
+ try:
122
+ return int(v)
123
+ except Exception:
124
+ try:
125
+ return int(float(v))
126
+ except Exception:
127
+ return 0
128
+
129
+ p = _as_int(getattr(usage, "prompt_tokens", 0) or 0)
130
+ c = _as_int(getattr(usage, "completion_tokens", 0) or 0)
131
+ t = _as_int(getattr(usage, "total_tokens", (p + c)) or (p + c))
132
+ out_total = max(0, t - p) if t else c
133
+ return {"in": p, "out": out_total, "reasoning": 0, "total": t}
134
+ except Exception:
135
+ return {}