pygpt-net 2.6.20__py3-none-any.whl → 2.6.21__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygpt_net/CHANGELOG.txt +9 -0
- pygpt_net/__init__.py +3 -3
- pygpt_net/controller/agent/agent.py +130 -2
- pygpt_net/controller/agent/experts.py +93 -96
- pygpt_net/controller/agent/llama.py +2 -1
- pygpt_net/controller/assistant/assistant.py +18 -1
- pygpt_net/controller/attachment/attachment.py +17 -1
- pygpt_net/controller/camera/camera.py +15 -7
- pygpt_net/controller/chat/chat.py +2 -2
- pygpt_net/controller/chat/common.py +50 -33
- pygpt_net/controller/chat/image.py +67 -77
- pygpt_net/controller/chat/input.py +94 -166
- pygpt_net/controller/chat/output.py +83 -140
- pygpt_net/controller/chat/response.py +83 -102
- pygpt_net/controller/chat/text.py +116 -149
- pygpt_net/controller/ctx/common.py +2 -1
- pygpt_net/controller/ctx/ctx.py +86 -6
- pygpt_net/controller/files/files.py +13 -1
- pygpt_net/controller/idx/idx.py +26 -2
- pygpt_net/controller/kernel/reply.py +53 -66
- pygpt_net/controller/kernel/stack.py +16 -16
- pygpt_net/controller/model/importer.py +2 -1
- pygpt_net/controller/model/model.py +62 -3
- pygpt_net/controller/settings/editor.py +4 -4
- pygpt_net/controller/ui/ui.py +16 -2
- pygpt_net/core/agents/observer/evaluation.py +3 -3
- pygpt_net/core/agents/provider.py +25 -3
- pygpt_net/core/agents/runner.py +4 -1
- pygpt_net/core/agents/runners/llama_workflow.py +19 -7
- pygpt_net/core/agents/runners/loop.py +3 -1
- pygpt_net/core/agents/runners/openai_workflow.py +17 -3
- pygpt_net/core/agents/tools.py +4 -1
- pygpt_net/core/bridge/context.py +34 -37
- pygpt_net/core/ctx/ctx.py +1 -1
- pygpt_net/core/db/database.py +2 -2
- pygpt_net/core/debug/debug.py +12 -1
- pygpt_net/core/dispatcher/dispatcher.py +24 -1
- pygpt_net/core/events/app.py +7 -7
- pygpt_net/core/events/control.py +26 -26
- pygpt_net/core/events/event.py +6 -3
- pygpt_net/core/events/kernel.py +2 -2
- pygpt_net/core/events/render.py +13 -13
- pygpt_net/core/experts/experts.py +76 -82
- pygpt_net/core/experts/worker.py +12 -12
- pygpt_net/core/models/models.py +5 -1
- pygpt_net/core/models/ollama.py +14 -5
- pygpt_net/core/render/web/helpers.py +2 -2
- pygpt_net/core/render/web/renderer.py +4 -4
- pygpt_net/core/types/__init__.py +2 -1
- pygpt_net/core/types/agent.py +4 -4
- pygpt_net/core/types/base.py +19 -0
- pygpt_net/core/types/console.py +6 -6
- pygpt_net/core/types/mode.py +8 -8
- pygpt_net/core/types/multimodal.py +3 -3
- pygpt_net/core/types/openai.py +2 -1
- pygpt_net/data/config/config.json +4 -4
- pygpt_net/data/config/models.json +19 -3
- pygpt_net/data/config/settings.json +14 -14
- pygpt_net/data/locale/locale.en.ini +2 -2
- pygpt_net/item/ctx.py +256 -240
- pygpt_net/item/model.py +59 -116
- pygpt_net/item/preset.py +122 -105
- pygpt_net/provider/agents/llama_index/workflow/planner.py +3 -3
- pygpt_net/provider/agents/openai/agent.py +4 -12
- pygpt_net/provider/agents/openai/agent_b2b.py +10 -15
- pygpt_net/provider/agents/openai/agent_planner.py +4 -4
- pygpt_net/provider/agents/openai/agent_with_experts.py +3 -7
- pygpt_net/provider/agents/openai/agent_with_experts_feedback.py +4 -8
- pygpt_net/provider/agents/openai/agent_with_feedback.py +4 -8
- pygpt_net/provider/agents/openai/bot_researcher.py +2 -18
- pygpt_net/provider/agents/openai/bots/__init__.py +0 -0
- pygpt_net/provider/agents/openai/bots/research_bot/__init__.py +0 -0
- pygpt_net/provider/agents/openai/bots/research_bot/agents/__init__.py +0 -0
- pygpt_net/provider/agents/openai/bots/research_bot/agents/planner_agent.py +1 -1
- pygpt_net/provider/agents/openai/bots/research_bot/agents/search_agent.py +1 -0
- pygpt_net/provider/agents/openai/bots/research_bot/agents/writer_agent.py +1 -1
- pygpt_net/provider/agents/openai/bots/research_bot/manager.py +1 -10
- pygpt_net/provider/agents/openai/evolve.py +5 -9
- pygpt_net/provider/agents/openai/supervisor.py +4 -8
- pygpt_net/provider/core/config/patch.py +10 -3
- pygpt_net/provider/core/ctx/db_sqlite/utils.py +43 -43
- pygpt_net/provider/core/model/patch.py +11 -1
- pygpt_net/provider/core/preset/json_file.py +47 -49
- pygpt_net/provider/gpt/agents/experts.py +2 -2
- {pygpt_net-2.6.20.dist-info → pygpt_net-2.6.21.dist-info}/METADATA +13 -6
- {pygpt_net-2.6.20.dist-info → pygpt_net-2.6.21.dist-info}/RECORD +86 -85
- {pygpt_net-2.6.20.dist-info → pygpt_net-2.6.21.dist-info}/LICENSE +0 -0
- {pygpt_net-2.6.20.dist-info → pygpt_net-2.6.21.dist-info}/WHEEL +0 -0
- {pygpt_net-2.6.20.dist-info → pygpt_net-2.6.21.dist-info}/entry_points.txt +0 -0
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.24 03:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import copy
|
|
@@ -15,7 +15,6 @@ from typing import Dict, Any, Tuple, Union, Optional
|
|
|
15
15
|
from agents import (
|
|
16
16
|
Agent as OpenAIAgent,
|
|
17
17
|
Runner,
|
|
18
|
-
RunConfig,
|
|
19
18
|
TResponseInputItem,
|
|
20
19
|
)
|
|
21
20
|
|
|
@@ -30,7 +29,6 @@ from pygpt_net.item.ctx import CtxItem
|
|
|
30
29
|
from pygpt_net.item.model import ModelItem
|
|
31
30
|
from pygpt_net.item.preset import PresetItem
|
|
32
31
|
|
|
33
|
-
from pygpt_net.provider.gpt.agents.client import get_custom_model_provider, set_openai_env
|
|
34
32
|
from pygpt_net.provider.gpt.agents.remote_tools import append_tools
|
|
35
33
|
from pygpt_net.provider.gpt.agents.response import StreamHandler
|
|
36
34
|
|
|
@@ -77,7 +75,7 @@ class Agent(BaseAgent):
|
|
|
77
75
|
kwargs = {
|
|
78
76
|
"name": "Bot {}".format(id),
|
|
79
77
|
"instructions": self.get_option(preset, option_key, "prompt"),
|
|
80
|
-
"model": model
|
|
78
|
+
"model": window.core.agents.provider.get_openai_model(model),
|
|
81
79
|
}
|
|
82
80
|
if handoffs:
|
|
83
81
|
kwargs["handoffs"] = handoffs
|
|
@@ -182,13 +180,7 @@ class Agent(BaseAgent):
|
|
|
182
180
|
:param kwargs: Additional keyword arguments for the model configuration
|
|
183
181
|
:return: Prepared keyword arguments for the model
|
|
184
182
|
"""
|
|
185
|
-
if model.provider
|
|
186
|
-
custom_provider = get_custom_model_provider(window, model)
|
|
187
|
-
kwargs["run_config"] = RunConfig(model_provider=custom_provider)
|
|
188
|
-
else:
|
|
189
|
-
if "run_config" in kwargs:
|
|
190
|
-
kwargs.pop("run_config")
|
|
191
|
-
set_openai_env(window)
|
|
183
|
+
if model.provider == "openai":
|
|
192
184
|
if previous_response_id:
|
|
193
185
|
kwargs["previous_response_id"] = previous_response_id
|
|
194
186
|
return kwargs
|
|
@@ -233,20 +225,23 @@ class Agent(BaseAgent):
|
|
|
233
225
|
verbose=verbose,
|
|
234
226
|
tools=tools,
|
|
235
227
|
)
|
|
236
|
-
if experts:
|
|
237
|
-
agent_kwargs["handoffs"] = experts
|
|
238
228
|
|
|
239
229
|
bot_1_kwargs = copy.deepcopy(agent_kwargs)
|
|
230
|
+
bot_2_kwargs = copy.deepcopy(agent_kwargs)
|
|
231
|
+
|
|
240
232
|
bot_1_kwargs["bot_id"] = 1
|
|
241
|
-
|
|
233
|
+
if experts:
|
|
234
|
+
bot_1_kwargs["handoffs"] = experts
|
|
235
|
+
bot_1 = self.get_agent(window, bot_1_kwargs)
|
|
242
236
|
|
|
243
237
|
model_2 = model
|
|
244
|
-
bot_2_kwargs = copy.deepcopy(agent_kwargs)
|
|
245
238
|
model_name_2 = self.get_option(preset, "bot_2", "model")
|
|
246
239
|
if model_name_2:
|
|
247
240
|
model_2 = window.core.models.get(model_name_2)
|
|
248
241
|
bot_2_kwargs["model"] = model_2
|
|
249
242
|
bot_2_kwargs["bot_id"] = 2
|
|
243
|
+
if experts:
|
|
244
|
+
bot_2_kwargs["handoffs"] = experts
|
|
250
245
|
bot_2 = self.get_agent(window, bot_2_kwargs)
|
|
251
246
|
|
|
252
247
|
kwargs = {
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.24 03:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from dataclasses import dataclass
|
|
@@ -115,7 +115,7 @@ class Agent(BaseAgent):
|
|
|
115
115
|
kwargs = {
|
|
116
116
|
"name": agent_name,
|
|
117
117
|
"instructions": self.get_option(preset, "base", "prompt"),
|
|
118
|
-
"model": model
|
|
118
|
+
"model": window.core.agents.provider.get_openai_model(model),
|
|
119
119
|
}
|
|
120
120
|
if handoffs:
|
|
121
121
|
kwargs["handoffs"] = handoffs
|
|
@@ -156,7 +156,7 @@ class Agent(BaseAgent):
|
|
|
156
156
|
kwargs = {
|
|
157
157
|
"name": "evaluator",
|
|
158
158
|
"instructions": instructions,
|
|
159
|
-
"model": model
|
|
159
|
+
"model": window.core.agents.provider.get_openai_model(model),
|
|
160
160
|
"output_type": EvaluationFeedback,
|
|
161
161
|
}
|
|
162
162
|
tool_kwargs = append_tools(
|
|
@@ -195,7 +195,7 @@ class Agent(BaseAgent):
|
|
|
195
195
|
kwargs = {
|
|
196
196
|
"name": "StructuredPlanner",
|
|
197
197
|
"instructions": instructions,
|
|
198
|
-
"model": model
|
|
198
|
+
"model": window.core.agents.provider.get_openai_model(model),
|
|
199
199
|
"output_type": StructuredPlan,
|
|
200
200
|
}
|
|
201
201
|
tool_kwargs = append_tools(
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.24 03:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from typing import Dict, Any, Tuple, Optional
|
|
@@ -63,7 +63,7 @@ class Agent(BaseAgent):
|
|
|
63
63
|
kwargs = {
|
|
64
64
|
"name": agent_name,
|
|
65
65
|
"instructions": system_prompt,
|
|
66
|
-
"model": model
|
|
66
|
+
"model": window.core.agents.provider.get_openai_model(model),
|
|
67
67
|
}
|
|
68
68
|
if handoffs:
|
|
69
69
|
kwargs["handoffs"] = handoffs
|
|
@@ -128,11 +128,7 @@ class Agent(BaseAgent):
|
|
|
128
128
|
"input": messages,
|
|
129
129
|
"max_turns": int(max_steps),
|
|
130
130
|
}
|
|
131
|
-
if model.provider
|
|
132
|
-
custom_provider = get_custom_model_provider(window, model)
|
|
133
|
-
kwargs["run_config"] = RunConfig(model_provider=custom_provider)
|
|
134
|
-
else:
|
|
135
|
-
set_openai_env(window)
|
|
131
|
+
if model.provider == "openai":
|
|
136
132
|
if previous_response_id:
|
|
137
133
|
kwargs["previous_response_id"] = previous_response_id
|
|
138
134
|
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.24 03:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from dataclasses import dataclass
|
|
@@ -82,7 +82,7 @@ class Agent(BaseAgent):
|
|
|
82
82
|
agent_kwargs = {
|
|
83
83
|
"name": agent_name,
|
|
84
84
|
"instructions": self.get_option(preset, "base", "prompt"),
|
|
85
|
-
"model": model
|
|
85
|
+
"model": window.core.agents.provider.get_openai_model(model),
|
|
86
86
|
}
|
|
87
87
|
if handoffs:
|
|
88
88
|
agent_kwargs["handoffs"] = handoffs
|
|
@@ -123,7 +123,7 @@ class Agent(BaseAgent):
|
|
|
123
123
|
kwargs = {
|
|
124
124
|
"name": "evaluator",
|
|
125
125
|
"instructions": instructions,
|
|
126
|
-
"model": model
|
|
126
|
+
"model": window.core.agents.provider.get_openai_model(model),
|
|
127
127
|
"output_type": EvaluationFeedback,
|
|
128
128
|
}
|
|
129
129
|
tool_kwargs = append_tools(
|
|
@@ -192,11 +192,7 @@ class Agent(BaseAgent):
|
|
|
192
192
|
"input": messages,
|
|
193
193
|
"max_turns": int(max_steps),
|
|
194
194
|
}
|
|
195
|
-
if model.provider
|
|
196
|
-
custom_provider = get_custom_model_provider(window, model)
|
|
197
|
-
kwargs["run_config"] = RunConfig(model_provider=custom_provider)
|
|
198
|
-
else:
|
|
199
|
-
set_openai_env(window)
|
|
195
|
+
if model.provider == "openai":
|
|
200
196
|
if previous_response_id:
|
|
201
197
|
kwargs["previous_response_id"] = previous_response_id
|
|
202
198
|
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.24 03:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from dataclasses import dataclass
|
|
@@ -82,7 +82,7 @@ class Agent(BaseAgent):
|
|
|
82
82
|
kwargs = {
|
|
83
83
|
"name": agent_name,
|
|
84
84
|
"instructions": self.get_option(preset, "base", "prompt"),
|
|
85
|
-
"model": model
|
|
85
|
+
"model": window.core.agents.provider.get_openai_model(model),
|
|
86
86
|
}
|
|
87
87
|
if handoffs:
|
|
88
88
|
kwargs["handoffs"] = handoffs
|
|
@@ -123,7 +123,7 @@ class Agent(BaseAgent):
|
|
|
123
123
|
kwargs = {
|
|
124
124
|
"name": "evaluator",
|
|
125
125
|
"instructions": instructions,
|
|
126
|
-
"model": model
|
|
126
|
+
"model": window.core.agents.provider.get_openai_model(model),
|
|
127
127
|
"output_type": EvaluationFeedback,
|
|
128
128
|
}
|
|
129
129
|
tool_kwargs = append_tools(
|
|
@@ -192,11 +192,7 @@ class Agent(BaseAgent):
|
|
|
192
192
|
"input": messages,
|
|
193
193
|
"max_turns": int(max_steps),
|
|
194
194
|
}
|
|
195
|
-
if model.provider
|
|
196
|
-
custom_provider = get_custom_model_provider(window, model)
|
|
197
|
-
kwargs["run_config"] = RunConfig(model_provider=custom_provider)
|
|
198
|
-
else:
|
|
199
|
-
set_openai_env(window)
|
|
195
|
+
if model.provider == "openai":
|
|
200
196
|
if previous_response_id:
|
|
201
197
|
kwargs["previous_response_id"] = previous_response_id
|
|
202
198
|
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.24 03:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from typing import Dict, Any, Tuple, Union, Optional
|
|
@@ -89,7 +89,7 @@ class Agent(BaseAgent):
|
|
|
89
89
|
kwargs = {
|
|
90
90
|
"name": agent_name,
|
|
91
91
|
"instructions": system_prompt,
|
|
92
|
-
"model": model
|
|
92
|
+
"model": window.core.agents.provider.get_openai_model(model),
|
|
93
93
|
"handoffs": handoffs,
|
|
94
94
|
}
|
|
95
95
|
tool_kwargs = append_tools(
|
|
@@ -154,18 +154,6 @@ class Agent(BaseAgent):
|
|
|
154
154
|
model_search_kwargs = {}
|
|
155
155
|
model_planner_kwargs = {}
|
|
156
156
|
|
|
157
|
-
if model.provider != "openai":
|
|
158
|
-
custom_provider = get_custom_model_provider(window, model)
|
|
159
|
-
model_kwargs["run_config"] = RunConfig(model_provider=custom_provider)
|
|
160
|
-
|
|
161
|
-
if model_search.provider != "openai":
|
|
162
|
-
custom_provider = get_custom_model_provider(window, model_search)
|
|
163
|
-
model_search_kwargs["run_config"] = RunConfig(model_provider=custom_provider)
|
|
164
|
-
|
|
165
|
-
if model_planner.provider != "openai":
|
|
166
|
-
custom_provider = get_custom_model_provider(window, model_planner)
|
|
167
|
-
model_planner_kwargs["run_config"] = RunConfig(model_provider=custom_provider)
|
|
168
|
-
|
|
169
157
|
# get experts
|
|
170
158
|
experts = get_experts(
|
|
171
159
|
window=window,
|
|
@@ -207,10 +195,6 @@ class Agent(BaseAgent):
|
|
|
207
195
|
},
|
|
208
196
|
history=messages if messages else [],
|
|
209
197
|
)
|
|
210
|
-
|
|
211
|
-
if model.provider == "openai":
|
|
212
|
-
set_openai_env(window)
|
|
213
|
-
|
|
214
198
|
final_output = await bot.run(query)
|
|
215
199
|
return ctx, final_output, response_id
|
|
216
200
|
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
@@ -46,7 +46,7 @@ def get_planner_agent(
|
|
|
46
46
|
kwargs = {
|
|
47
47
|
"name": "PlannerAgent",
|
|
48
48
|
"instructions": config["prompt"],
|
|
49
|
-
"model": config["model"]
|
|
49
|
+
"model": window.core.agents.provider.get_openai_model(config["model"]),
|
|
50
50
|
"output_type": WebSearchPlan,
|
|
51
51
|
}
|
|
52
52
|
tool_kwargs = append_tools(
|
|
@@ -37,7 +37,7 @@ def get_writer_agent(
|
|
|
37
37
|
kwargs = {
|
|
38
38
|
"name": "WriterAgent",
|
|
39
39
|
"instructions": config["prompt"],
|
|
40
|
-
"model": config["model"]
|
|
40
|
+
"model": window.core.agents.provider.get_openai_model(config["model"]),
|
|
41
41
|
"output_type": ReportData,
|
|
42
42
|
}
|
|
43
43
|
if config.get("experts"):
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.24 03:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
# Based on OpenAI examples: https://github.com/openai/openai-agents-python/blob/main/examples
|
|
12
12
|
from __future__ import annotations
|
|
@@ -104,9 +104,6 @@ class ResearchManager:
|
|
|
104
104
|
kwargs = {
|
|
105
105
|
"input": messages,
|
|
106
106
|
}
|
|
107
|
-
if self.planner_config["run_kwargs"]:
|
|
108
|
-
kwargs.update(self.planner_config["run_kwargs"])
|
|
109
|
-
|
|
110
107
|
result = await Runner.run(
|
|
111
108
|
agent,
|
|
112
109
|
**kwargs
|
|
@@ -151,9 +148,6 @@ class ResearchManager:
|
|
|
151
148
|
kwargs = {
|
|
152
149
|
"input": input,
|
|
153
150
|
}
|
|
154
|
-
if self.search_config["run_kwargs"]:
|
|
155
|
-
kwargs.update(self.search_config["run_kwargs"])
|
|
156
|
-
|
|
157
151
|
try:
|
|
158
152
|
result = await Runner.run(
|
|
159
153
|
agent,
|
|
@@ -182,9 +176,6 @@ class ResearchManager:
|
|
|
182
176
|
kwargs = {
|
|
183
177
|
"input": input,
|
|
184
178
|
}
|
|
185
|
-
if self.writer_config["run_kwargs"]:
|
|
186
|
-
kwargs.update(self.writer_config["run_kwargs"])
|
|
187
|
-
|
|
188
179
|
result = Runner.run_streamed(
|
|
189
180
|
agent,
|
|
190
181
|
**kwargs,
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.24 03:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import copy
|
|
@@ -94,7 +94,7 @@ class Agent(BaseAgent):
|
|
|
94
94
|
kwargs = {
|
|
95
95
|
"name": agent_name,
|
|
96
96
|
"instructions": self.get_option(preset, "base", "prompt"),
|
|
97
|
-
"model": model
|
|
97
|
+
"model": window.core.agents.provider.get_openai_model(model),
|
|
98
98
|
}
|
|
99
99
|
if handoffs:
|
|
100
100
|
kwargs["handoffs"] = handoffs
|
|
@@ -135,7 +135,7 @@ class Agent(BaseAgent):
|
|
|
135
135
|
kwargs = {
|
|
136
136
|
"name": "evaluator",
|
|
137
137
|
"instructions": instructions,
|
|
138
|
-
"model": model
|
|
138
|
+
"model": window.core.agents.provider.get_openai_model(model),
|
|
139
139
|
"output_type": EvaluationFeedback,
|
|
140
140
|
}
|
|
141
141
|
tool_kwargs = append_tools(
|
|
@@ -174,7 +174,7 @@ class Agent(BaseAgent):
|
|
|
174
174
|
kwargs = {
|
|
175
175
|
"name": "chooser",
|
|
176
176
|
"instructions": instructions,
|
|
177
|
-
"model": model
|
|
177
|
+
"model": window.core.agents.provider.get_openai_model(model),
|
|
178
178
|
"output_type": ChooseFeedback,
|
|
179
179
|
}
|
|
180
180
|
tool_kwargs = append_tools(
|
|
@@ -267,11 +267,7 @@ class Agent(BaseAgent):
|
|
|
267
267
|
kwargs = {
|
|
268
268
|
"max_turns": int(max_steps),
|
|
269
269
|
}
|
|
270
|
-
if model.provider
|
|
271
|
-
custom_provider = get_custom_model_provider(window, model)
|
|
272
|
-
kwargs["run_config"] = RunConfig(model_provider=custom_provider)
|
|
273
|
-
else:
|
|
274
|
-
set_openai_env(window)
|
|
270
|
+
if model.provider == "openai":
|
|
275
271
|
if previous_response_id:
|
|
276
272
|
kwargs["previous_response_id"] = previous_response_id
|
|
277
273
|
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.24 03:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import json
|
|
@@ -89,7 +89,7 @@ class Agent(BaseAgent):
|
|
|
89
89
|
kwargs = {
|
|
90
90
|
"name": agent_name,
|
|
91
91
|
"instructions": self.get_option(preset, "supervisor", "prompt"),
|
|
92
|
-
"model": model
|
|
92
|
+
"model": window.core.agents.provider.get_openai_model(model)
|
|
93
93
|
}
|
|
94
94
|
if worker_tool:
|
|
95
95
|
kwargs["tools"] = [worker_tool]
|
|
@@ -114,7 +114,7 @@ class Agent(BaseAgent):
|
|
|
114
114
|
kwargs = {
|
|
115
115
|
"name": agent_name,
|
|
116
116
|
"instructions": self.get_option(preset, "worker", "prompt"),
|
|
117
|
-
"model": model
|
|
117
|
+
"model": window.core.agents.provider.get_openai_model(model)
|
|
118
118
|
}
|
|
119
119
|
if handoffs:
|
|
120
120
|
kwargs["handoffs"] = handoffs
|
|
@@ -178,11 +178,7 @@ class Agent(BaseAgent):
|
|
|
178
178
|
"input": messages,
|
|
179
179
|
"max_turns": int(max_steps),
|
|
180
180
|
}
|
|
181
|
-
if model.provider
|
|
182
|
-
custom_provider = get_custom_model_provider(window, model)
|
|
183
|
-
kwargs["run_config"] = RunConfig(model_provider=custom_provider)
|
|
184
|
-
else:
|
|
185
|
-
set_openai_env(window)
|
|
181
|
+
if model.provider == "openai":
|
|
186
182
|
if previous_response_id:
|
|
187
183
|
kwargs["previous_response_id"] = previous_response_id
|
|
188
184
|
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.24 02:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import copy
|
|
@@ -2051,8 +2051,8 @@ class Patch:
|
|
|
2051
2051
|
# < 2.5.61
|
|
2052
2052
|
if old < parse_version("2.5.61"):
|
|
2053
2053
|
print("Migrating config from < 2.5.61..")
|
|
2054
|
-
if "
|
|
2055
|
-
data["
|
|
2054
|
+
if "agent.output.render.all" not in data:
|
|
2055
|
+
data["agent.output.render.all"] = False
|
|
2056
2056
|
data["prompt.expert"] = self.window.core.config.get_base(
|
|
2057
2057
|
'prompt.expert')
|
|
2058
2058
|
if "experts.use_agent" not in data:
|
|
@@ -2273,6 +2273,13 @@ class Patch:
|
|
|
2273
2273
|
data['plugins']['google']['oauth_scopes'] += " https://www.googleapis.com/auth/documents"
|
|
2274
2274
|
updated = True
|
|
2275
2275
|
|
|
2276
|
+
# < 2.6.21
|
|
2277
|
+
if old < parse_version("2.6.21"):
|
|
2278
|
+
print("Migrating config from < 2.6.21...")
|
|
2279
|
+
if "agent.output.render.all" not in data:
|
|
2280
|
+
data["agent.output.render.all"] = True
|
|
2281
|
+
updated = True
|
|
2282
|
+
|
|
2276
2283
|
# update file
|
|
2277
2284
|
migrated = False
|
|
2278
2285
|
if updated:
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date:
|
|
9
|
+
# Updated Date: 2025.08.23 15:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import json
|
|
@@ -136,35 +136,35 @@ def unpack_item(
|
|
|
136
136
|
:param row: DB row
|
|
137
137
|
:return: context item
|
|
138
138
|
"""
|
|
139
|
-
item.
|
|
140
|
-
item.
|
|
139
|
+
item.additional_ctx = unpack_item_value(row['additional_ctx_json'])
|
|
140
|
+
item.attachments = unpack_item_value(row['attachments_json'])
|
|
141
|
+
item.audio_expires_ts = row['audio_expires_ts']
|
|
142
|
+
item.audio_id = row['audio_id']
|
|
143
|
+
item.cmds = unpack_item_value(row['cmds_json'])
|
|
144
|
+
item.doc_ids = unpack_item_value(row['docs_json'])
|
|
141
145
|
item.external_id = row['external_id']
|
|
146
|
+
item.extra = unpack_item_value(row['extra'])
|
|
147
|
+
item.files = unpack_item_value(row['files_json'])
|
|
148
|
+
item.id = unpack_var(row['id'], 'int')
|
|
149
|
+
item.images = unpack_item_value(row['images_json'])
|
|
142
150
|
item.input = row['input']
|
|
143
|
-
item.output = row['output']
|
|
144
151
|
item.input_name = row['input_name']
|
|
145
|
-
item.output_name = row['output_name']
|
|
146
152
|
item.input_timestamp = unpack_var(row['input_ts'], 'int')
|
|
147
|
-
item.
|
|
153
|
+
item.input_tokens = unpack_var(row['input_tokens'], 'int')
|
|
154
|
+
item.internal = unpack_var(row['is_internal'], 'bool')
|
|
155
|
+
item.meta_id = unpack_var(row['meta_id'], 'int')
|
|
148
156
|
item.mode = row['mode']
|
|
149
157
|
item.model = row['model']
|
|
150
|
-
item.thread = row['thread_id']
|
|
151
158
|
item.msg_id = row['msg_id']
|
|
152
|
-
item.
|
|
153
|
-
item.
|
|
154
|
-
item.
|
|
155
|
-
item.urls = unpack_item_value(row['urls_json'])
|
|
156
|
-
item.images = unpack_item_value(row['images_json'])
|
|
157
|
-
item.files = unpack_item_value(row['files_json'])
|
|
158
|
-
item.attachments = unpack_item_value(row['attachments_json'])
|
|
159
|
-
item.additional_ctx = unpack_item_value(row['additional_ctx_json'])
|
|
160
|
-
item.extra = unpack_item_value(row['extra'])
|
|
161
|
-
item.input_tokens = unpack_var(row['input_tokens'], 'int')
|
|
159
|
+
item.output = row['output']
|
|
160
|
+
item.output_name = row['output_name']
|
|
161
|
+
item.output_timestamp = unpack_var(row['output_ts'], 'int')
|
|
162
162
|
item.output_tokens = unpack_var(row['output_tokens'], 'int')
|
|
163
|
+
item.results = unpack_item_value(row['results_json'])
|
|
164
|
+
item.run_id = row['run_id']
|
|
165
|
+
item.thread = row['thread_id']
|
|
163
166
|
item.total_tokens = unpack_var(row['total_tokens'], 'int')
|
|
164
|
-
item.
|
|
165
|
-
item.doc_ids = unpack_item_value(row['docs_json'])
|
|
166
|
-
item.audio_id = row['audio_id']
|
|
167
|
-
item.audio_expires_ts = row['audio_expires_ts']
|
|
167
|
+
item.urls = unpack_item_value(row['urls_json'])
|
|
168
168
|
|
|
169
169
|
# set defaults
|
|
170
170
|
if item.cmds is None:
|
|
@@ -199,31 +199,31 @@ def unpack_meta(
|
|
|
199
199
|
:param row: DB row
|
|
200
200
|
:return: context meta
|
|
201
201
|
"""
|
|
202
|
-
meta.
|
|
203
|
-
meta.
|
|
204
|
-
meta.
|
|
202
|
+
meta.additional_ctx = unpack_item_value(row['additional_ctx_json'])
|
|
203
|
+
meta.archived = unpack_var(row['is_archived'], 'bool')
|
|
204
|
+
meta.assistant = row['assistant_id']
|
|
205
205
|
meta.created = unpack_var(row['created_ts'], 'int')
|
|
206
|
-
meta.
|
|
206
|
+
meta.deleted = unpack_var(row['is_deleted'], 'bool')
|
|
207
|
+
meta.external_id = row['external_id']
|
|
208
|
+
meta.extra = row['extra']
|
|
209
|
+
meta.group_id = unpack_var(row['group_id'], 'int')
|
|
210
|
+
meta.id = unpack_var(row['id'], 'int')
|
|
207
211
|
meta.indexed = unpack_var(row['indexed_ts'], 'int')
|
|
208
|
-
meta.
|
|
209
|
-
meta.
|
|
210
|
-
meta.
|
|
212
|
+
meta.indexes = unpack_item_value(row['indexes_json'])
|
|
213
|
+
meta.initialized = unpack_var(row['is_initialized'], 'bool')
|
|
214
|
+
meta.important = unpack_var(row['is_important'], 'bool')
|
|
215
|
+
meta.label = unpack_var(row['label'], 'int')
|
|
211
216
|
meta.last_mode = row['last_mode']
|
|
212
217
|
meta.last_model = row['last_model']
|
|
213
|
-
meta.
|
|
214
|
-
meta.
|
|
218
|
+
meta.mode = row['mode']
|
|
219
|
+
meta.model = row['model']
|
|
220
|
+
meta.name = row['name']
|
|
215
221
|
meta.preset = row['preset_id']
|
|
216
222
|
meta.run = row['run_id']
|
|
217
223
|
meta.status = row['status']
|
|
218
|
-
meta.
|
|
219
|
-
meta.
|
|
220
|
-
meta.
|
|
221
|
-
meta.important = unpack_var(row['is_important'], 'bool')
|
|
222
|
-
meta.archived = unpack_var(row['is_archived'], 'bool')
|
|
223
|
-
meta.label = unpack_var(row['label'], 'int')
|
|
224
|
-
meta.indexes = unpack_item_value(row['indexes_json'])
|
|
225
|
-
meta.group_id = unpack_var(row['group_id'], 'int')
|
|
226
|
-
meta.additional_ctx = unpack_item_value(row['additional_ctx_json'])
|
|
224
|
+
meta.thread = row['thread_id']
|
|
225
|
+
meta.updated = unpack_var(row['updated_ts'], 'int')
|
|
226
|
+
meta.uuid = row['uuid']
|
|
227
227
|
|
|
228
228
|
if meta.additional_ctx is None:
|
|
229
229
|
meta.additional_ctx = []
|
|
@@ -253,12 +253,12 @@ def unpack_group(
|
|
|
253
253
|
:param row: DB row
|
|
254
254
|
:return: context group
|
|
255
255
|
"""
|
|
256
|
-
group.
|
|
257
|
-
group.uuid = row['uuid']
|
|
256
|
+
group.additional_ctx = unpack_item_value(row['additional_ctx_json'])
|
|
258
257
|
group.created = unpack_var(row['created_ts'], 'int')
|
|
259
|
-
group.
|
|
258
|
+
group.id = unpack_var(row['id'], 'int')
|
|
260
259
|
group.name = row['name']
|
|
261
|
-
group.
|
|
260
|
+
group.updated = unpack_var(row['updated_ts'], 'int')
|
|
261
|
+
group.uuid = row['uuid']
|
|
262
262
|
if group.additional_ctx is None:
|
|
263
263
|
group.additional_ctx = []
|
|
264
264
|
return group
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.24 03:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from packaging.version import parse as parse_version, Version
|
|
@@ -753,6 +753,16 @@ class Patch:
|
|
|
753
753
|
data["gpt-4.1-nano"] = base_data["gpt-4.1-nano"]
|
|
754
754
|
updated = True
|
|
755
755
|
|
|
756
|
+
# < 2.6.21 <-- add OpenAI Agents to Ollama
|
|
757
|
+
if old < parse_version("2.6.21"):
|
|
758
|
+
print("Migrating models from < 2.6.21...")
|
|
759
|
+
for id in data:
|
|
760
|
+
model = data[id]
|
|
761
|
+
if model.provider in ["ollama"]:
|
|
762
|
+
if "agent_openai" not in model.mode:
|
|
763
|
+
model.mode.append(MODE_AGENT_OPENAI)
|
|
764
|
+
updated = True
|
|
765
|
+
|
|
756
766
|
# update file
|
|
757
767
|
if updated:
|
|
758
768
|
data = dict(sorted(data.items()))
|