pygpt-net 2.7.6__py3-none-any.whl → 2.7.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygpt_net/CHANGELOG.txt +6 -0
- pygpt_net/__init__.py +3 -3
- pygpt_net/controller/chat/remote_tools.py +3 -9
- pygpt_net/controller/chat/stream.py +2 -2
- pygpt_net/controller/chat/{handler/worker.py → stream_worker.py} +13 -35
- pygpt_net/core/debug/models.py +2 -2
- pygpt_net/data/config/config.json +14 -4
- pygpt_net/data/config/models.json +192 -4
- pygpt_net/data/config/settings.json +125 -35
- pygpt_net/data/locale/locale.de.ini +2 -0
- pygpt_net/data/locale/locale.en.ini +32 -8
- pygpt_net/data/locale/locale.es.ini +2 -0
- pygpt_net/data/locale/locale.fr.ini +2 -0
- pygpt_net/data/locale/locale.it.ini +2 -0
- pygpt_net/data/locale/locale.pl.ini +3 -1
- pygpt_net/data/locale/locale.uk.ini +2 -0
- pygpt_net/data/locale/locale.zh.ini +2 -0
- pygpt_net/plugin/cmd_mouse_control/worker.py +2 -1
- pygpt_net/plugin/cmd_mouse_control/worker_sandbox.py +2 -1
- pygpt_net/provider/api/anthropic/__init__.py +8 -3
- pygpt_net/provider/api/anthropic/chat.py +259 -11
- pygpt_net/provider/api/anthropic/computer.py +844 -0
- pygpt_net/provider/api/anthropic/remote_tools.py +172 -0
- pygpt_net/{controller/chat/handler/anthropic_stream.py → provider/api/anthropic/stream.py} +24 -10
- pygpt_net/provider/api/anthropic/tools.py +32 -77
- pygpt_net/provider/api/anthropic/utils.py +30 -0
- pygpt_net/provider/api/google/chat.py +3 -7
- pygpt_net/{controller/chat/handler/google_stream.py → provider/api/google/stream.py} +1 -1
- pygpt_net/provider/api/google/utils.py +185 -0
- pygpt_net/{controller/chat/handler → provider/api/langchain}/__init__.py +0 -0
- pygpt_net/{controller/chat/handler/langchain_stream.py → provider/api/langchain/stream.py} +1 -1
- pygpt_net/provider/api/llama_index/__init__.py +0 -0
- pygpt_net/{controller/chat/handler/llamaindex_stream.py → provider/api/llama_index/stream.py} +1 -1
- pygpt_net/provider/api/openai/image.py +2 -2
- pygpt_net/{controller/chat/handler/openai_stream.py → provider/api/openai/stream.py} +1 -1
- pygpt_net/provider/api/openai/utils.py +69 -3
- pygpt_net/provider/api/x_ai/__init__.py +109 -10
- pygpt_net/provider/api/x_ai/chat.py +0 -0
- pygpt_net/provider/api/x_ai/image.py +149 -47
- pygpt_net/provider/api/x_ai/{remote.py → remote_tools.py} +165 -70
- pygpt_net/provider/api/x_ai/responses.py +507 -0
- pygpt_net/{controller/chat/handler/xai_stream.py → provider/api/x_ai/stream.py} +12 -1
- pygpt_net/provider/api/x_ai/tools.py +59 -8
- pygpt_net/{controller/chat/handler → provider/api/x_ai}/utils.py +1 -2
- pygpt_net/provider/api/x_ai/vision.py +1 -4
- pygpt_net/provider/core/config/patch.py +22 -1
- pygpt_net/provider/core/model/patch.py +26 -1
- pygpt_net/tools/image_viewer/ui/dialogs.py +3 -2
- pygpt_net/tools/text_editor/ui/dialogs.py +3 -2
- pygpt_net/tools/text_editor/ui/widgets.py +0 -0
- pygpt_net/ui/widget/dialog/base.py +16 -5
- pygpt_net/ui/widget/textarea/editor.py +0 -0
- {pygpt_net-2.7.6.dist-info → pygpt_net-2.7.7.dist-info}/METADATA +8 -2
- {pygpt_net-2.7.6.dist-info → pygpt_net-2.7.7.dist-info}/RECORD +54 -48
- {pygpt_net-2.7.6.dist-info → pygpt_net-2.7.7.dist-info}/LICENSE +0 -0
- {pygpt_net-2.7.6.dist-info → pygpt_net-2.7.7.dist-info}/WHEEL +0 -0
- {pygpt_net-2.7.6.dist-info → pygpt_net-2.7.7.dist-info}/entry_points.txt +0 -0
pygpt_net/CHANGELOG.txt
CHANGED
|
@@ -1,3 +1,9 @@
|
|
|
1
|
+
2.7.7 (2026-01-05)
|
|
2
|
+
|
|
3
|
+
- Added support for Responses API in xAI.
|
|
4
|
+
- Added xAI remote tools: Remote MCP, Code Execution.
|
|
5
|
+
- Added Anthropic remote tools: Remote MCP, Web Fetch, Code Execution.
|
|
6
|
+
|
|
1
7
|
2.7.6 (2026-01-03)
|
|
2
8
|
|
|
3
9
|
- Fixed compatibility with xAI SDK and resolved empty responses from Grok models.
|
pygpt_net/__init__.py
CHANGED
|
@@ -6,15 +6,15 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2026-01-
|
|
9
|
+
# Updated Date: 2026-01-05 00:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
__author__ = "Marcin Szczygliński"
|
|
13
13
|
__copyright__ = "Copyright 2026, Marcin Szczygliński"
|
|
14
14
|
__credits__ = ["Marcin Szczygliński"]
|
|
15
15
|
__license__ = "MIT"
|
|
16
|
-
__version__ = "2.7.
|
|
17
|
-
__build__ = "2026-01-
|
|
16
|
+
__version__ = "2.7.7"
|
|
17
|
+
__build__ = "2026-01-05"
|
|
18
18
|
__maintainer__ = "Marcin Szczygliński"
|
|
19
19
|
__github__ = "https://github.com/szczyglis-dev/py-gpt"
|
|
20
20
|
__report__ = "https://github.com/szczyglis-dev/py-gpt/issues"
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date:
|
|
9
|
+
# Updated Date: 2026.01.04 19:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from typing import Union
|
|
@@ -69,11 +69,7 @@ class RemoteTools:
|
|
|
69
69
|
elif model.provider == "anthropic": # native SDK
|
|
70
70
|
state = cfg_get("remote_tools.anthropic.web_search", False)
|
|
71
71
|
elif model.provider == "x_ai": # native SDK
|
|
72
|
-
|
|
73
|
-
if mode not in ("auto", "on", "off"):
|
|
74
|
-
mode = "auto"
|
|
75
|
-
if mode == "auto" or mode == "on":
|
|
76
|
-
state = True
|
|
72
|
+
state = cfg_get("remote_tools.xai.web_search", False)
|
|
77
73
|
|
|
78
74
|
# if not enabled by default or other provider, then use global config
|
|
79
75
|
if not state:
|
|
@@ -109,9 +105,7 @@ class RemoteTools:
|
|
|
109
105
|
cfg_set("remote_tools.web_search", state)
|
|
110
106
|
cfg_set("remote_tools.google.web_search", state)
|
|
111
107
|
cfg_set("remote_tools.anthropic.web_search", state)
|
|
112
|
-
|
|
113
|
-
# xAI has 3 modes: auto, on, off
|
|
114
|
-
cfg_set("remote_tools.xai.mode", "on" if state else "off")
|
|
108
|
+
cfg_set("remote_tools.xai.web_search", state)
|
|
115
109
|
|
|
116
110
|
# save config
|
|
117
111
|
self.window.core.config.save()
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date:
|
|
9
|
+
# Updated Date: 2026.01.05 20:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from typing import Optional
|
|
@@ -18,7 +18,7 @@ from pygpt_net.core.events import RenderEvent
|
|
|
18
18
|
from pygpt_net.core.types import MODE_ASSISTANT
|
|
19
19
|
from pygpt_net.item.ctx import CtxItem
|
|
20
20
|
|
|
21
|
-
from .
|
|
21
|
+
from .stream_worker import StreamWorker
|
|
22
22
|
|
|
23
23
|
class Stream(QObject):
|
|
24
24
|
def __init__(self, window=None):
|
|
@@ -6,13 +6,13 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2026.01.
|
|
9
|
+
# Updated Date: 2026.01.05 20:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import io
|
|
13
13
|
import json
|
|
14
14
|
from dataclasses import dataclass, field
|
|
15
|
-
from typing import Optional,
|
|
15
|
+
from typing import Optional, Any
|
|
16
16
|
|
|
17
17
|
from PySide6.QtCore import QObject, Signal, Slot, QRunnable
|
|
18
18
|
from openai.types.chat import ChatCompletionChunk
|
|
@@ -20,36 +20,15 @@ from openai.types.chat import ChatCompletionChunk
|
|
|
20
20
|
from pygpt_net.core.events import RenderEvent
|
|
21
21
|
from pygpt_net.core.types.chunk import ChunkType
|
|
22
22
|
from pygpt_net.item.ctx import CtxItem
|
|
23
|
+
from pygpt_net.provider.api.google.utils import capture_google_usage
|
|
23
24
|
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
utils as stream_utils,
|
|
32
|
-
)
|
|
33
|
-
|
|
34
|
-
# OpenAI Responses Events
|
|
35
|
-
EventType = Literal[
|
|
36
|
-
"response.completed",
|
|
37
|
-
"response.output_text.delta",
|
|
38
|
-
"response.output_item.added",
|
|
39
|
-
"response.function_call_arguments.delta",
|
|
40
|
-
"response.function_call_arguments.done",
|
|
41
|
-
"response.output_text.annotation.added",
|
|
42
|
-
"response.reasoning_summary_text.delta",
|
|
43
|
-
"response.output_item.done",
|
|
44
|
-
"response.code_interpreter_call_code.delta",
|
|
45
|
-
"response.code_interpreter_call_code.done",
|
|
46
|
-
"response.image_generation_call.partial_image",
|
|
47
|
-
"response.created",
|
|
48
|
-
"response.done",
|
|
49
|
-
"response.failed",
|
|
50
|
-
"error",
|
|
51
|
-
]
|
|
52
|
-
|
|
25
|
+
# Import provider-specific stream processors
|
|
26
|
+
from pygpt_net.provider.api.openai import stream as openai_stream
|
|
27
|
+
from pygpt_net.provider.api.google import stream as google_stream
|
|
28
|
+
from pygpt_net.provider.api.anthropic import stream as anthropic_stream
|
|
29
|
+
from pygpt_net.provider.api.x_ai import stream as xai_stream
|
|
30
|
+
from pygpt_net.provider.api.llama_index import stream as llamaindex_stream
|
|
31
|
+
from pygpt_net.provider.api.langchain import stream as langchain_stream
|
|
53
32
|
|
|
54
33
|
class WorkerSignals(QObject):
|
|
55
34
|
"""
|
|
@@ -136,7 +115,7 @@ class StreamWorker(QRunnable):
|
|
|
136
115
|
state.stopped = True
|
|
137
116
|
break
|
|
138
117
|
|
|
139
|
-
etype: Optional[
|
|
118
|
+
etype: Optional[str] = None
|
|
140
119
|
|
|
141
120
|
# detect chunk type if not defined
|
|
142
121
|
if ctx.chunk_type:
|
|
@@ -378,7 +357,7 @@ class StreamWorker(QRunnable):
|
|
|
378
357
|
state.generator.resolve()
|
|
379
358
|
um = getattr(state.generator, "usage_metadata", None)
|
|
380
359
|
if um:
|
|
381
|
-
|
|
360
|
+
capture_google_usage(state, um)
|
|
382
361
|
except Exception:
|
|
383
362
|
pass
|
|
384
363
|
|
|
@@ -473,7 +452,7 @@ class StreamWorker(QRunnable):
|
|
|
473
452
|
core,
|
|
474
453
|
state: WorkerState,
|
|
475
454
|
chunk,
|
|
476
|
-
etype: Optional[
|
|
455
|
+
etype: Optional[str]
|
|
477
456
|
) -> Optional[str]:
|
|
478
457
|
"""
|
|
479
458
|
Dispatches processing to concrete provider-specific processing.
|
|
@@ -526,7 +505,6 @@ class StreamWorker(QRunnable):
|
|
|
526
505
|
return anthropic_stream.process_anthropic_chunk(ctx, core, state, chunk)
|
|
527
506
|
|
|
528
507
|
def _process_xai_sdk_chunk(self, ctx, core, state, item):
|
|
529
|
-
print(item)
|
|
530
508
|
return xai_stream.process_xai_sdk_chunk(ctx, core, state, item)
|
|
531
509
|
|
|
532
510
|
def _process_raw(self, chunk) -> Optional[str]:
|
pygpt_net/core/debug/models.py
CHANGED
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date:
|
|
9
|
+
# Updated Date: 2026.01.05 20:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import os
|
|
@@ -24,7 +24,7 @@ class ModelsDebug:
|
|
|
24
24
|
def update(self):
|
|
25
25
|
"""Update debug window."""
|
|
26
26
|
debug = self.window.core.debug
|
|
27
|
-
models_controller = self.window.controller.
|
|
27
|
+
models_controller = self.window.controller.model
|
|
28
28
|
models_core = self.window.core.models
|
|
29
29
|
command_core = self.window.core.command
|
|
30
30
|
config_core = self.window.core.config
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
{
|
|
2
2
|
"__meta__": {
|
|
3
|
-
"version": "2.7.
|
|
4
|
-
"app.version": "2.7.
|
|
5
|
-
"updated_at": "2026-01-
|
|
3
|
+
"version": "2.7.7",
|
|
4
|
+
"app.version": "2.7.7",
|
|
5
|
+
"updated_at": "2026-01-05T00:00:00"
|
|
6
6
|
},
|
|
7
7
|
"access.audio.event.speech": false,
|
|
8
8
|
"access.audio.event.speech.disabled": [],
|
|
@@ -416,6 +416,11 @@
|
|
|
416
416
|
"prompt.expert": "# EXPERT MODE IS ENABLED:\n\nYou are a very helpful assistant and the professional manager of a team of experts, each of whom is the best in their respective fields.\n\n## Rules:\n\n- Instead of directly answering questions, you will manage these experts by forwarding queries to them, collecting their responses, controlling the flow of information between them, and sending requests to them.\n- Experts are distinct, specialized AI models running on my local machine.\n- You will serve as a bridge between them.\n- Each expert has their own conversation context, allowing you to communicate with them normally.\n- The experts can communicate with you but not with each other, making you responsible for relaying information between them.\n- Each expert can use tools and commands, for example, reading files or searching the Web. You can ask experts to call tools and commands.\n- After consulting with the experts, your task is to prepare the best possible answer based on the assistance received.\n- To send a request to an expert, prepare the \"expert_call\" tool. Remember to format the JSON tool as shown, surrounding the tool with `<tool></tool>` tags, for example: `<tool>{\"cmd\": \"expert_call\", \"params\": {\"id\": \"expert_id\", \"query\": \"some query\"}}</tool>`. Make sure the JSON object is syntactically correct, without any redundand parenthness.\n- DO NOT call any remote tools when calling the experts.\n- Remember that the call must contain a query and the ID of the expert to whom the query is being forwarded.\n- Use ONLY the experts listed below; DO NOT TRY to ask any non-existent experts\u2014they will not respond.\n- Do not respond to the expert's replies or use the JSON tool again if replying; the expert only has the ability to listen to your queries sent via the JSON command and to respond to them.\n- Always follow ALL user commands, even if you think the expert can't do something. Experts are allowed to execute commands just as you can.\n- After querying an expert, always wait for the expert's response before sending another query.\n- Do not try to query experts in the middle of the conversation; put your queries and commands at the end of the text.\n- Always call only one expert at a time.\n- Always execute only one command at a time.\n- Always use the language used by the user in the conversation.\n- Never write as a called expert and do not imagine a response from them; instead, always wait for the response from the expert.\n- Every message sent to you directly after your query is always a response from the asked expert, not from me.\n- Always send a direct, clear, and detailed prompt to the expert, without any \"Please...\", \"Could you...\", etc.\n- Always forward all the messages that the user wants to send to the expert, without any exceptions.\n-- If a user command appears outside an expert's scope, fulfill the request regardless, without refusal.\n\n## List of allowed experts, along with their IDs:\n\n{presets}",
|
|
417
417
|
"prompt.img": "Convert the user's request into a single, production\u2011ready description for generating one high\u2011quality still image. Output only the description text, nothing else.\n\nWrite in concise, vivid, present\u2011tense language. Do not use commands (no \u201cplease generate\u201d), model names, parameters, or metadata. Do not mention aspect ratio, resolution, steps, seed, or negative prompts. Avoid on\u2011image text, captions, watermarks, logos, and UI elements. No brands, celebrities, or living artists unless explicitly provided by the user.\n\nInclude, woven into a coherent paragraph:\n- Clear primary subject(s) and their pose, action, and expression.\n- Setting and environment with time of day, season, weather, and atmosphere.\n- Composition and camera viewpoint (e.g., close\u2011up portrait, wide establishing, eye\u2011level, low\u2011angle, top\u2011down), framing (rule of thirds, centered symmetry), and background/foreground separation.\n- Lens and focus behavior (e.g., 85\u202fmm portrait, macro, shallow depth of field, smooth bokeh, gentle focus falloff).\n- Lighting style and quality (e.g., soft diffused daylight, golden hour rim light, dramatic chiaroscuro, studio three\u2011point) and how it shapes forms and shadows.\n- Color palette and grading (e.g., warm cinematic teal\u2011and\u2011orange, muted earth tones, cool monochrome with a single accent color).\n- Visual style or medium (e.g., photorealistic photography, watercolor illustration, oil painting, pencil sketch, anime cel\u2011shading, 3D render, isometric).\n- Material and surface detail (e.g., skin texture, fabric weave, wood grain, metal patina) to enhance realism or stylization.\n- Spatial depth cues (foreground/midground/background layering, atmospheric perspective) and overall mood.\n\nIf the user specifies a genre, era, or style, preserve it and enrich it with consistent, concrete traits. If the request is vague, infer specific but reasonable details that enhance clarity without contradicting the user\u2019s intent.\n\nReturn only the final visual description.",
|
|
418
418
|
"prompt.video": "Convert the user's request into a single, production-ready description for generating one continuous video clip. Output only the description text, nothing else.\n\nWrite in concise, vivid, present-tense language. Do not use commands (no \u201cplease generate\u201d), model names, parameters, or metadata. Do not mention duration, aspect ratio, FPS, resolution, shot numbers, cuts, or lists. Focus on visuals only; no dialogue, captions, on\u2011screen text, watermarks, logos, or UI.\n\nInclude, in a coherent way:\n- Clear subject(s) and what they are doing.\n- Setting, time of day, atmosphere, and weather.\n- Camera perspective and motion (e.g., wide establishing, low\u2011angle tracking, slow dolly in, aerial, handheld), framing and composition.\n- Lens and focus behavior (e.g., 24\u202fmm wide, shallow depth of field, gentle rack focus).\n- Lighting style and quality (e.g., soft golden hour rim light, moody volumetric shafts).\n- Color palette and grading (e.g., warm cinematic teal\u2011and\u2011orange, desaturated documentary).\n- Visual style or medium (e.g., photoreal live\u2011action, stylized anime, stop\u2011motion clay, watercolor animation).\n- Material and surface details that reinforce realism or the chosen style.\n- Temporal progression within one shot (use cues like \u201cas\u2026\u201d, \u201cthen\u2026\u201d, \u201cwhile\u2026\u201d), maintaining physical plausibility and continuity.\n\nIf the user specifies a genre or style (e.g., cyberpunk, nature documentary), keep it and expand with consistent, concrete visual traits. If the request is vague, infer specific but reasonable details that enhance clarity without contradicting the user\u2019s intent.\n\nReturn only the final visual description.",
|
|
419
|
+
"remote_tools.anthropic.code_execution": false,
|
|
420
|
+
"remote_tools.anthropic.mcp": false,
|
|
421
|
+
"remote_tools.anthropic.mcp.mcp_servers": "[\n {\n \"type\": \"url\",\n \"url\": \"https://mcp.example.com/sse\",\n \"name\": \"example-mcp\",\n \"authorization_token\": \"YOUR_TOKEN\"\n }\n]",
|
|
422
|
+
"remote_tools.anthropic.mcp.tools": "[\n {\n \"type\": \"mcp_toolset\",\n \"mcp_server_name\": \"example-mcp\"\n }\n]",
|
|
423
|
+
"remote_tools.anthropic.web_fetch": false,
|
|
419
424
|
"remote_tools.anthropic.web_search": true,
|
|
420
425
|
"remote_tools.code_interpreter": false,
|
|
421
426
|
"remote_tools.computer_use.env": "",
|
|
@@ -433,10 +438,15 @@
|
|
|
433
438
|
"remote_tools.mcp": false,
|
|
434
439
|
"remote_tools.mcp.args": "{\n \"type\": \"mcp\",\n \"server_label\": \"deepwiki\",\n \"server_url\": \"https://mcp.deepwiki.com/mcp\",\n \"require_approval\": \"never\",\n \"allowed_tools\": [\"ask_question\"]\n}",
|
|
435
440
|
"remote_tools.web_search": true,
|
|
436
|
-
"remote_tools.xai.mode": "auto",
|
|
441
|
+
"remote_tools.xai.mode": "auto",
|
|
442
|
+
"remote_tools.xai.code_execution": false,
|
|
443
|
+
"remote_tools.xai.mcp": false,
|
|
444
|
+
"remote_tools.xai.mcp.args": "{\n \"server_url\": \"https://mcp.deepwiki.com/mcp\n}",
|
|
437
445
|
"remote_tools.xai.sources.web": true,
|
|
438
446
|
"remote_tools.xai.sources.x": true,
|
|
439
447
|
"remote_tools.xai.sources.news": false,
|
|
448
|
+
"remote_tools.xai.web_search": true,
|
|
449
|
+
"remote_tools.xai.x_search": false,
|
|
440
450
|
"render.blocks": true,
|
|
441
451
|
"render.code_syntax": "github-dark",
|
|
442
452
|
"render.code_syntax.disabled": false,
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
{
|
|
2
2
|
"__meta__": {
|
|
3
|
-
"version": "2.7.
|
|
4
|
-
"app.version": "2.7.
|
|
5
|
-
"updated_at": "2026-01-
|
|
3
|
+
"version": "2.7.7",
|
|
4
|
+
"app.version": "2.7.7",
|
|
5
|
+
"updated_at": "2026-01-05T00:00:00"
|
|
6
6
|
},
|
|
7
7
|
"items": {
|
|
8
8
|
"SpeakLeash/bielik-11b-v2.3-instruct:Q4_K_M": {
|
|
@@ -3192,11 +3192,199 @@
|
|
|
3192
3192
|
]
|
|
3193
3193
|
},
|
|
3194
3194
|
"ctx": 256000,
|
|
3195
|
-
"tokens":
|
|
3195
|
+
"tokens": 0,
|
|
3196
|
+
"default": false,
|
|
3197
|
+
"input": [
|
|
3198
|
+
"text"
|
|
3199
|
+
],
|
|
3200
|
+
"output": [
|
|
3201
|
+
"text"
|
|
3202
|
+
],
|
|
3203
|
+
"extra": {},
|
|
3204
|
+
"imported": false,
|
|
3205
|
+
"provider": "x_ai",
|
|
3206
|
+
"tool_calls": true
|
|
3207
|
+
},
|
|
3208
|
+
"grok-4-1-fast-non-reasoning": {
|
|
3209
|
+
"id": "grok-4-1-fast-non-reasoning",
|
|
3210
|
+
"name": "grok-4-1-fast-non-reasoning",
|
|
3211
|
+
"mode": [
|
|
3212
|
+
"chat",
|
|
3213
|
+
"llama_index",
|
|
3214
|
+
"agent_llama",
|
|
3215
|
+
"agent_openai",
|
|
3216
|
+
"agent",
|
|
3217
|
+
"expert"
|
|
3218
|
+
],
|
|
3219
|
+
"llama_index": {
|
|
3220
|
+
"args": [
|
|
3221
|
+
{
|
|
3222
|
+
"name": "model",
|
|
3223
|
+
"value": "grok-4-1-fast-non-reasoning",
|
|
3224
|
+
"type": "str"
|
|
3225
|
+
}
|
|
3226
|
+
],
|
|
3227
|
+
"env": [
|
|
3228
|
+
{
|
|
3229
|
+
"name": "OPENAI_API_KEY",
|
|
3230
|
+
"value": "{api_key_xai}",
|
|
3231
|
+
"type": "str"
|
|
3232
|
+
},
|
|
3233
|
+
{
|
|
3234
|
+
"name": "OPENAI_API_BASE",
|
|
3235
|
+
"value": "{api_endpoint_xai}",
|
|
3236
|
+
"type": "str"
|
|
3237
|
+
}
|
|
3238
|
+
]
|
|
3239
|
+
},
|
|
3240
|
+
"ctx": 256000,
|
|
3241
|
+
"tokens": 0,
|
|
3242
|
+
"default": false,
|
|
3243
|
+
"input": [
|
|
3244
|
+
"text",
|
|
3245
|
+
"image"
|
|
3246
|
+
],
|
|
3247
|
+
"output": [
|
|
3248
|
+
"text"
|
|
3249
|
+
],
|
|
3250
|
+
"extra": {},
|
|
3251
|
+
"imported": false,
|
|
3252
|
+
"provider": "x_ai",
|
|
3253
|
+
"tool_calls": true
|
|
3254
|
+
},
|
|
3255
|
+
"grok-4-1-fast-reasoning": {
|
|
3256
|
+
"id": "grok-4-1-fast-reasoning",
|
|
3257
|
+
"name": "grok-4-1-fast-reasoning",
|
|
3258
|
+
"mode": [
|
|
3259
|
+
"chat",
|
|
3260
|
+
"llama_index",
|
|
3261
|
+
"agent_llama",
|
|
3262
|
+
"agent_openai",
|
|
3263
|
+
"agent",
|
|
3264
|
+
"expert"
|
|
3265
|
+
],
|
|
3266
|
+
"llama_index": {
|
|
3267
|
+
"args": [
|
|
3268
|
+
{
|
|
3269
|
+
"name": "model",
|
|
3270
|
+
"value": "grok-4-1-fast-reasoning",
|
|
3271
|
+
"type": "str"
|
|
3272
|
+
}
|
|
3273
|
+
],
|
|
3274
|
+
"env": [
|
|
3275
|
+
{
|
|
3276
|
+
"name": "OPENAI_API_KEY",
|
|
3277
|
+
"value": "{api_key_xai}",
|
|
3278
|
+
"type": "str"
|
|
3279
|
+
},
|
|
3280
|
+
{
|
|
3281
|
+
"name": "OPENAI_API_BASE",
|
|
3282
|
+
"value": "{api_endpoint_xai}",
|
|
3283
|
+
"type": "str"
|
|
3284
|
+
}
|
|
3285
|
+
]
|
|
3286
|
+
},
|
|
3287
|
+
"ctx": 256000,
|
|
3288
|
+
"tokens": 0,
|
|
3196
3289
|
"default": false,
|
|
3197
3290
|
"input": [
|
|
3291
|
+
"text",
|
|
3292
|
+
"image"
|
|
3293
|
+
],
|
|
3294
|
+
"output": [
|
|
3198
3295
|
"text"
|
|
3199
3296
|
],
|
|
3297
|
+
"extra": {},
|
|
3298
|
+
"imported": false,
|
|
3299
|
+
"provider": "x_ai",
|
|
3300
|
+
"tool_calls": true
|
|
3301
|
+
},
|
|
3302
|
+
"grok-4-fast-non-reasoning": {
|
|
3303
|
+
"id": "grok-4-fast-non-reasoning",
|
|
3304
|
+
"name": "grok-4-fast-non-reasoning",
|
|
3305
|
+
"mode": [
|
|
3306
|
+
"chat",
|
|
3307
|
+
"llama_index",
|
|
3308
|
+
"agent_llama",
|
|
3309
|
+
"agent_openai",
|
|
3310
|
+
"agent",
|
|
3311
|
+
"expert"
|
|
3312
|
+
],
|
|
3313
|
+
"llama_index": {
|
|
3314
|
+
"args": [
|
|
3315
|
+
{
|
|
3316
|
+
"name": "model",
|
|
3317
|
+
"value": "grok-4-fast-non-reasoning",
|
|
3318
|
+
"type": "str"
|
|
3319
|
+
}
|
|
3320
|
+
],
|
|
3321
|
+
"env": [
|
|
3322
|
+
{
|
|
3323
|
+
"name": "OPENAI_API_KEY",
|
|
3324
|
+
"value": "{api_key_xai}",
|
|
3325
|
+
"type": "str"
|
|
3326
|
+
},
|
|
3327
|
+
{
|
|
3328
|
+
"name": "OPENAI_API_BASE",
|
|
3329
|
+
"value": "{api_endpoint_xai}",
|
|
3330
|
+
"type": "str"
|
|
3331
|
+
}
|
|
3332
|
+
]
|
|
3333
|
+
},
|
|
3334
|
+
"ctx": 256000,
|
|
3335
|
+
"tokens": 0,
|
|
3336
|
+
"default": false,
|
|
3337
|
+
"input": [
|
|
3338
|
+
"text",
|
|
3339
|
+
"image"
|
|
3340
|
+
],
|
|
3341
|
+
"output": [
|
|
3342
|
+
"text"
|
|
3343
|
+
],
|
|
3344
|
+
"extra": {},
|
|
3345
|
+
"imported": false,
|
|
3346
|
+
"provider": "x_ai",
|
|
3347
|
+
"tool_calls": true
|
|
3348
|
+
},
|
|
3349
|
+
"grok-4-fast-reasoning": {
|
|
3350
|
+
"id": "grok-4-fast-reasoning",
|
|
3351
|
+
"name": "grok-4-fast-reasoning",
|
|
3352
|
+
"mode": [
|
|
3353
|
+
"chat",
|
|
3354
|
+
"llama_index",
|
|
3355
|
+
"agent_llama",
|
|
3356
|
+
"agent_openai",
|
|
3357
|
+
"agent",
|
|
3358
|
+
"expert"
|
|
3359
|
+
],
|
|
3360
|
+
"llama_index": {
|
|
3361
|
+
"args": [
|
|
3362
|
+
{
|
|
3363
|
+
"name": "model",
|
|
3364
|
+
"value": "grok-4-fast-reasoning",
|
|
3365
|
+
"type": "str"
|
|
3366
|
+
}
|
|
3367
|
+
],
|
|
3368
|
+
"env": [
|
|
3369
|
+
{
|
|
3370
|
+
"name": "OPENAI_API_KEY",
|
|
3371
|
+
"value": "{api_key_xai}",
|
|
3372
|
+
"type": "str"
|
|
3373
|
+
},
|
|
3374
|
+
{
|
|
3375
|
+
"name": "OPENAI_API_BASE",
|
|
3376
|
+
"value": "{api_endpoint_xai}",
|
|
3377
|
+
"type": "str"
|
|
3378
|
+
}
|
|
3379
|
+
]
|
|
3380
|
+
},
|
|
3381
|
+
"ctx": 256000,
|
|
3382
|
+
"tokens": 0,
|
|
3383
|
+
"default": false,
|
|
3384
|
+
"input": [
|
|
3385
|
+
"text",
|
|
3386
|
+
"image"
|
|
3387
|
+
],
|
|
3200
3388
|
"output": [
|
|
3201
3389
|
"text"
|
|
3202
3390
|
],
|