pygpt-net 2.6.19.post1__py3-none-any.whl → 2.6.21__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygpt_net/CHANGELOG.txt +14 -0
- pygpt_net/__init__.py +3 -3
- pygpt_net/app.py +3 -1
- pygpt_net/controller/agent/agent.py +130 -2
- pygpt_net/controller/agent/experts.py +93 -96
- pygpt_net/controller/agent/llama.py +2 -1
- pygpt_net/controller/assistant/assistant.py +18 -1
- pygpt_net/controller/attachment/attachment.py +17 -1
- pygpt_net/controller/camera/camera.py +15 -7
- pygpt_net/controller/chat/chat.py +2 -2
- pygpt_net/controller/chat/common.py +50 -33
- pygpt_net/controller/chat/image.py +67 -77
- pygpt_net/controller/chat/input.py +94 -166
- pygpt_net/controller/chat/output.py +83 -140
- pygpt_net/controller/chat/response.py +83 -102
- pygpt_net/controller/chat/text.py +116 -149
- pygpt_net/controller/ctx/common.py +2 -1
- pygpt_net/controller/ctx/ctx.py +86 -6
- pygpt_net/controller/files/files.py +13 -1
- pygpt_net/controller/idx/idx.py +26 -2
- pygpt_net/controller/kernel/reply.py +53 -66
- pygpt_net/controller/kernel/stack.py +16 -16
- pygpt_net/controller/model/importer.py +2 -1
- pygpt_net/controller/model/model.py +62 -3
- pygpt_net/controller/settings/editor.py +4 -4
- pygpt_net/controller/ui/ui.py +16 -2
- pygpt_net/core/agents/observer/evaluation.py +3 -3
- pygpt_net/core/agents/provider.py +25 -3
- pygpt_net/core/agents/runner.py +4 -1
- pygpt_net/core/agents/runners/llama_workflow.py +19 -7
- pygpt_net/core/agents/runners/loop.py +3 -1
- pygpt_net/core/agents/runners/openai_workflow.py +17 -3
- pygpt_net/core/agents/tools.py +4 -1
- pygpt_net/core/bridge/context.py +34 -37
- pygpt_net/core/ctx/ctx.py +1 -1
- pygpt_net/core/db/database.py +2 -2
- pygpt_net/core/debug/debug.py +12 -1
- pygpt_net/core/dispatcher/dispatcher.py +24 -1
- pygpt_net/core/events/app.py +7 -7
- pygpt_net/core/events/control.py +26 -26
- pygpt_net/core/events/event.py +6 -3
- pygpt_net/core/events/kernel.py +2 -2
- pygpt_net/core/events/render.py +13 -13
- pygpt_net/core/experts/experts.py +76 -82
- pygpt_net/core/experts/worker.py +12 -12
- pygpt_net/core/models/models.py +5 -1
- pygpt_net/core/models/ollama.py +14 -5
- pygpt_net/core/render/web/helpers.py +2 -2
- pygpt_net/core/render/web/renderer.py +4 -4
- pygpt_net/core/types/__init__.py +2 -1
- pygpt_net/core/types/agent.py +4 -4
- pygpt_net/core/types/base.py +19 -0
- pygpt_net/core/types/console.py +6 -6
- pygpt_net/core/types/mode.py +8 -8
- pygpt_net/core/types/multimodal.py +3 -3
- pygpt_net/core/types/openai.py +2 -1
- pygpt_net/data/config/config.json +4 -4
- pygpt_net/data/config/models.json +19 -3
- pygpt_net/data/config/settings.json +14 -14
- pygpt_net/data/locale/locale.en.ini +2 -2
- pygpt_net/item/ctx.py +256 -240
- pygpt_net/item/model.py +59 -116
- pygpt_net/item/preset.py +122 -105
- pygpt_net/plugin/server/__init__.py +12 -0
- pygpt_net/plugin/server/config.py +301 -0
- pygpt_net/plugin/server/plugin.py +111 -0
- pygpt_net/plugin/server/worker.py +1057 -0
- pygpt_net/provider/agents/llama_index/workflow/planner.py +3 -3
- pygpt_net/provider/agents/openai/agent.py +4 -12
- pygpt_net/provider/agents/openai/agent_b2b.py +10 -15
- pygpt_net/provider/agents/openai/agent_planner.py +4 -4
- pygpt_net/provider/agents/openai/agent_with_experts.py +3 -7
- pygpt_net/provider/agents/openai/agent_with_experts_feedback.py +4 -8
- pygpt_net/provider/agents/openai/agent_with_feedback.py +4 -8
- pygpt_net/provider/agents/openai/bot_researcher.py +2 -18
- pygpt_net/provider/agents/openai/bots/__init__.py +0 -0
- pygpt_net/provider/agents/openai/bots/research_bot/__init__.py +0 -0
- pygpt_net/provider/agents/openai/bots/research_bot/agents/__init__.py +0 -0
- pygpt_net/provider/agents/openai/bots/research_bot/agents/planner_agent.py +1 -1
- pygpt_net/provider/agents/openai/bots/research_bot/agents/search_agent.py +1 -0
- pygpt_net/provider/agents/openai/bots/research_bot/agents/writer_agent.py +1 -1
- pygpt_net/provider/agents/openai/bots/research_bot/manager.py +1 -10
- pygpt_net/provider/agents/openai/evolve.py +5 -9
- pygpt_net/provider/agents/openai/supervisor.py +4 -8
- pygpt_net/provider/core/config/patch.py +10 -3
- pygpt_net/provider/core/ctx/db_sqlite/utils.py +43 -43
- pygpt_net/provider/core/model/patch.py +11 -1
- pygpt_net/provider/core/preset/json_file.py +47 -49
- pygpt_net/provider/gpt/agents/experts.py +2 -2
- pygpt_net/ui/base/config_dialog.py +17 -3
- pygpt_net/ui/widget/option/checkbox.py +16 -2
- {pygpt_net-2.6.19.post1.dist-info → pygpt_net-2.6.21.dist-info}/METADATA +30 -6
- {pygpt_net-2.6.19.post1.dist-info → pygpt_net-2.6.21.dist-info}/RECORD +93 -88
- {pygpt_net-2.6.19.post1.dist-info → pygpt_net-2.6.21.dist-info}/LICENSE +0 -0
- {pygpt_net-2.6.19.post1.dist-info → pygpt_net-2.6.21.dist-info}/WHEEL +0 -0
- {pygpt_net-2.6.19.post1.dist-info → pygpt_net-2.6.21.dist-info}/entry_points.txt +0 -0
|
@@ -6,11 +6,11 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.23 15:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import json
|
|
13
|
-
from typing import Dict, List
|
|
13
|
+
from typing import Dict, List, Optional
|
|
14
14
|
|
|
15
15
|
from PySide6.QtCore import Slot
|
|
16
16
|
|
|
@@ -114,16 +114,17 @@ class Experts:
|
|
|
114
114
|
:return: experts dict
|
|
115
115
|
"""
|
|
116
116
|
experts = {}
|
|
117
|
-
|
|
117
|
+
core = self.window.core
|
|
118
|
+
presets = core.presets.get_by_mode(MODE_EXPERT)
|
|
118
119
|
|
|
119
120
|
# mode: agent
|
|
120
121
|
if self.agent_enabled():
|
|
121
|
-
agents =
|
|
122
|
-
agent =
|
|
122
|
+
agents = core.presets.get_by_mode(MODE_AGENT)
|
|
123
|
+
agent = core.config.get('preset')
|
|
123
124
|
if agent is not None:
|
|
124
125
|
if agent in agents:
|
|
125
126
|
for uuid in agents[agent].experts:
|
|
126
|
-
expert =
|
|
127
|
+
expert = core.presets.get_by_uuid(uuid)
|
|
127
128
|
if expert is not None:
|
|
128
129
|
id = expert.filename
|
|
129
130
|
experts[id] = expert
|
|
@@ -137,12 +138,12 @@ class Experts:
|
|
|
137
138
|
experts[k] = presets[k]
|
|
138
139
|
return experts
|
|
139
140
|
|
|
140
|
-
def get_expert_name_by_id(self, id: str) -> str:
|
|
141
|
+
def get_expert_name_by_id(self, id: str) -> Optional[str]:
|
|
141
142
|
"""
|
|
142
143
|
Get expert name by id
|
|
143
144
|
|
|
144
145
|
:param id: expert id
|
|
145
|
-
:return: expert name
|
|
146
|
+
:return: expert name or None if not found
|
|
146
147
|
"""
|
|
147
148
|
experts = self.get_experts()
|
|
148
149
|
if id in experts:
|
|
@@ -156,10 +157,11 @@ class Experts:
|
|
|
156
157
|
:return: number of experts
|
|
157
158
|
"""
|
|
158
159
|
i = 0
|
|
159
|
-
|
|
160
|
+
core = self.window.core
|
|
161
|
+
agents = core.presets.get_by_mode(MODE_AGENT)
|
|
160
162
|
if uuid in agents:
|
|
161
163
|
for expert_uuid in agents[uuid].experts:
|
|
162
|
-
expert =
|
|
164
|
+
expert = core.presets.get_by_uuid(expert_uuid)
|
|
163
165
|
if expert is not None:
|
|
164
166
|
i += 1
|
|
165
167
|
return i
|
|
@@ -189,15 +191,16 @@ class Experts:
|
|
|
189
191
|
:param ctx: context item
|
|
190
192
|
:return: dict with calls
|
|
191
193
|
"""
|
|
194
|
+
core = self.window.core
|
|
192
195
|
ids = self.get_experts().keys()
|
|
193
196
|
if not ids: # abort if no experts
|
|
194
197
|
return {}
|
|
195
|
-
cmds =
|
|
198
|
+
cmds = core.command.extract_cmds(ctx.output)
|
|
196
199
|
if len(cmds) > 0:
|
|
197
200
|
ctx.cmds = cmds # append commands to ctx
|
|
198
201
|
else: # abort if no cmds
|
|
199
202
|
return {}
|
|
200
|
-
commands =
|
|
203
|
+
commands = core.command.from_commands(cmds) # pack to execution list
|
|
201
204
|
is_cmd = False
|
|
202
205
|
my_commands = []
|
|
203
206
|
calls = {}
|
|
@@ -220,7 +223,7 @@ class Experts:
|
|
|
220
223
|
query = item["params"]["query"]
|
|
221
224
|
calls[id] = query
|
|
222
225
|
except Exception as e:
|
|
223
|
-
|
|
226
|
+
core.debug.log(e)
|
|
224
227
|
return {}
|
|
225
228
|
return calls
|
|
226
229
|
|
|
@@ -272,9 +275,6 @@ class Experts:
|
|
|
272
275
|
reply_ctx.output_name = ""
|
|
273
276
|
reply_ctx.sub_call = True # this flag is not copied in to_dict
|
|
274
277
|
|
|
275
|
-
internal = False
|
|
276
|
-
if self.agent_enabled(): # hide in agent mode
|
|
277
|
-
internal = True
|
|
278
278
|
if ctx.output.strip() != "":
|
|
279
279
|
response = reply_ctx.output
|
|
280
280
|
else:
|
|
@@ -290,16 +290,15 @@ class Experts:
|
|
|
290
290
|
if ctx.sub_reply:
|
|
291
291
|
reply_ctx.extra["sub_reply"] = True # mark as sub-reply
|
|
292
292
|
context.prompt = json.dumps(result, ensure_ascii=False, indent=2) # to master
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
"reply": True,
|
|
296
|
-
"internal": internal,
|
|
297
|
-
}
|
|
298
|
-
event = KernelEvent(KernelEvent.INPUT_SYSTEM, {
|
|
293
|
+
|
|
294
|
+
self.window.dispatch(KernelEvent(KernelEvent.INPUT_SYSTEM, {
|
|
299
295
|
'context': context,
|
|
300
|
-
'extra':
|
|
301
|
-
|
|
302
|
-
|
|
296
|
+
'extra': {
|
|
297
|
+
"force": True,
|
|
298
|
+
"reply": True,
|
|
299
|
+
"internal": self.agent_enabled(),
|
|
300
|
+
},
|
|
301
|
+
}))
|
|
303
302
|
|
|
304
303
|
def call(
|
|
305
304
|
self,
|
|
@@ -317,26 +316,27 @@ class Experts:
|
|
|
317
316
|
if self.stopped():
|
|
318
317
|
return
|
|
319
318
|
|
|
320
|
-
|
|
319
|
+
worker = ExpertWorker(
|
|
321
320
|
window=self.window,
|
|
322
321
|
master_ctx=master_ctx,
|
|
323
322
|
expert_id=expert_id,
|
|
324
|
-
query=query
|
|
323
|
+
query=query,
|
|
325
324
|
)
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
# start worker in
|
|
325
|
+
worker.signals.response.connect(self.handle_response) # connect to finished signal
|
|
326
|
+
worker.signals.finished.connect(self.handle_finished) # connect to finished signal
|
|
327
|
+
worker.signals.error.connect(self.handle_error) # connect to error signal
|
|
328
|
+
worker.signals.event.connect(self.handle_event) # connect to event signal
|
|
329
|
+
worker.signals.output.connect(self.handle_output) # connect to output signal
|
|
330
|
+
worker.signals.lock_input.connect(self.handle_input_locked) # connect to lock input signal
|
|
331
|
+
worker.signals.cmd.connect(self.handle_cmd) # connect to cmd signal
|
|
332
|
+
|
|
333
|
+
# start worker in threadpool
|
|
334
|
+
self.worker = worker
|
|
335
335
|
self.last_expert_id = expert_id # store last expert id
|
|
336
336
|
self.master_ctx = master_ctx
|
|
337
|
-
|
|
337
|
+
name = self.get_expert_name_by_id(expert_id)
|
|
338
338
|
event = KernelEvent(KernelEvent.STATE_BUSY, {
|
|
339
|
-
"msg": f"{trans('expert.wait.status')} ({
|
|
339
|
+
"msg": f"{trans('expert.wait.status')} ({name})",
|
|
340
340
|
})
|
|
341
341
|
self.window.dispatch(event) # dispatch busy event
|
|
342
342
|
self.window.threadpool.start(self.worker)
|
|
@@ -355,7 +355,7 @@ class Experts:
|
|
|
355
355
|
self.window.controller.chat.output.handle(
|
|
356
356
|
ctx=ctx,
|
|
357
357
|
mode=mode,
|
|
358
|
-
|
|
358
|
+
stream=False,
|
|
359
359
|
)
|
|
360
360
|
|
|
361
361
|
@Slot(CtxItem, CtxItem, str, str, str)
|
|
@@ -379,31 +379,33 @@ class Experts:
|
|
|
379
379
|
if self.stopped():
|
|
380
380
|
return
|
|
381
381
|
|
|
382
|
+
core = self.window.core
|
|
383
|
+
update_status = self.window.update_status
|
|
384
|
+
|
|
382
385
|
# extract native tool calls if provided
|
|
383
386
|
if ctx.tool_calls:
|
|
384
387
|
# if not internal commands in a text body then append tool calls as commands (prevent double commands)
|
|
385
|
-
if not
|
|
386
|
-
|
|
388
|
+
if not core.command.has_cmds(ctx.output):
|
|
389
|
+
core.command.append_tool_calls(ctx) # append tool calls as commands
|
|
387
390
|
if not isinstance(ctx.extra, dict):
|
|
388
391
|
ctx.extra = {}
|
|
389
392
|
ctx.extra["tool_calls"] = ctx.tool_calls
|
|
390
393
|
|
|
391
394
|
# if 'get_context' tool is used then force call, and append idx
|
|
392
395
|
self.extract_tool_calls(ctx) # extract tool calls from ctx
|
|
393
|
-
|
|
394
396
|
self.window.controller.chat.command.handle(ctx, internal=True) # handle cmds sync
|
|
395
|
-
if ctx.reply:
|
|
396
|
-
self.window.update_status("") # clear status
|
|
397
397
|
|
|
398
|
+
if ctx.reply:
|
|
399
|
+
update_status("") # clear status
|
|
398
400
|
# prepare data to send as reply
|
|
399
401
|
tool_data = json.dumps(ctx.results)
|
|
400
402
|
# if "tool_output" in ctx.extra and ctx.extra["tool_output"]:
|
|
401
403
|
# tool_data = str(ctx.extra["tool_output"])
|
|
402
404
|
|
|
403
|
-
|
|
404
|
-
|
|
405
|
+
core.ctx.update_item(ctx) # update context in db
|
|
406
|
+
update_status('...')
|
|
405
407
|
ctx.output = f"<tool>{ctx.cmds}</tool>"
|
|
406
|
-
|
|
408
|
+
core.ctx.update_item(ctx) # update ctx in DB
|
|
407
409
|
self.handle_finished()
|
|
408
410
|
self.call(
|
|
409
411
|
master_ctx=self.master_ctx,
|
|
@@ -461,34 +463,29 @@ class Experts:
|
|
|
461
463
|
|
|
462
464
|
:param error: error message
|
|
463
465
|
"""
|
|
466
|
+
dispatch = self.window.dispatch
|
|
467
|
+
|
|
464
468
|
if self.stopped():
|
|
465
|
-
|
|
466
|
-
self.window.dispatch(event) # dispatch idle event
|
|
469
|
+
dispatch(KernelEvent(KernelEvent.STATE_IDLE, {})) # dispatch idle event
|
|
467
470
|
return
|
|
468
471
|
|
|
469
472
|
# handle error from worker
|
|
470
473
|
context = BridgeContext()
|
|
471
474
|
context.prompt = f"{trans('expert.wait.failed')}: {error}"
|
|
472
|
-
|
|
473
|
-
"force": True,
|
|
474
|
-
"reply": False,
|
|
475
|
-
"internal": False,
|
|
476
|
-
}
|
|
477
|
-
# reply to master
|
|
478
|
-
event = KernelEvent(KernelEvent.INPUT_SYSTEM, {
|
|
475
|
+
dispatch(KernelEvent(KernelEvent.INPUT_SYSTEM, {
|
|
479
476
|
'context': context,
|
|
480
|
-
'extra':
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
477
|
+
'extra': {
|
|
478
|
+
"force": True,
|
|
479
|
+
"reply": False,
|
|
480
|
+
"internal": False,
|
|
481
|
+
},
|
|
482
|
+
})) # reply to master
|
|
483
|
+
dispatch(KernelEvent(KernelEvent.STATE_IDLE, {})) # dispatch idle event
|
|
485
484
|
|
|
486
485
|
@Slot()
|
|
487
486
|
def handle_finished(self):
|
|
488
487
|
"""Handle worker finished signal"""
|
|
489
|
-
|
|
490
|
-
self.window.dispatch(event) # dispatch idle event
|
|
491
|
-
|
|
488
|
+
self.window.dispatch(KernelEvent(KernelEvent.STATE_IDLE, {})) # dispatch idle event
|
|
492
489
|
|
|
493
490
|
@Slot(CtxItem, str)
|
|
494
491
|
def handle_response(self, ctx: CtxItem, expert_id: str):
|
|
@@ -498,9 +495,10 @@ class Experts:
|
|
|
498
495
|
:param ctx: CtxItem
|
|
499
496
|
:param expert_id: expert id
|
|
500
497
|
"""
|
|
498
|
+
dispatch = self.window.dispatch
|
|
499
|
+
|
|
501
500
|
if self.stopped():
|
|
502
|
-
|
|
503
|
-
self.window.dispatch(event) # dispatch idle event
|
|
501
|
+
dispatch(KernelEvent(KernelEvent.STATE_IDLE, {})) # dispatch idle event
|
|
504
502
|
return
|
|
505
503
|
|
|
506
504
|
# handle reply from worker
|
|
@@ -510,20 +508,17 @@ class Experts:
|
|
|
510
508
|
"expert_id": expert_id,
|
|
511
509
|
"result": str(ctx.output),
|
|
512
510
|
}
|
|
511
|
+
# TODO: clear ctx.output here?
|
|
513
512
|
context.prompt = json.dumps(result, ensure_ascii=False, indent=2) # prepare prompt for reply
|
|
514
|
-
|
|
515
|
-
"force": True,
|
|
516
|
-
"reply": True,
|
|
517
|
-
"internal": False,
|
|
518
|
-
}
|
|
519
|
-
# reply to master
|
|
520
|
-
event = KernelEvent(KernelEvent.INPUT_SYSTEM, {
|
|
513
|
+
dispatch(KernelEvent(KernelEvent.INPUT_SYSTEM, {
|
|
521
514
|
'context': context,
|
|
522
|
-
'extra':
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
515
|
+
'extra': {
|
|
516
|
+
"force": True,
|
|
517
|
+
"reply": True,
|
|
518
|
+
"internal": False,
|
|
519
|
+
},
|
|
520
|
+
})) # reply to master
|
|
521
|
+
dispatch(KernelEvent(KernelEvent.STATE_IDLE, {})) # dispatch idle event
|
|
527
522
|
|
|
528
523
|
def get_functions(self) -> List[Dict[str, str]]:
|
|
529
524
|
"""
|
|
@@ -531,7 +526,7 @@ class Experts:
|
|
|
531
526
|
|
|
532
527
|
:return: call the expert commands
|
|
533
528
|
"""
|
|
534
|
-
|
|
529
|
+
return [
|
|
535
530
|
{
|
|
536
531
|
"cmd": TOOL_EXPERT_CALL_NAME,
|
|
537
532
|
"instruction": TOOL_EXPERT_CALL_DESCRIPTION,
|
|
@@ -551,7 +546,6 @@ class Experts:
|
|
|
551
546
|
]
|
|
552
547
|
}
|
|
553
548
|
]
|
|
554
|
-
return cmds
|
|
555
549
|
|
|
556
550
|
def get_retriever_tool(self) -> Dict[str, str]:
|
|
557
551
|
"""
|
|
@@ -580,10 +574,10 @@ class Experts:
|
|
|
580
574
|
:return: True if expert calls found
|
|
581
575
|
"""
|
|
582
576
|
if not ctx.sub_reply and not ctx.reply:
|
|
583
|
-
mentions = self.
|
|
577
|
+
mentions = self.extract_calls(ctx)
|
|
584
578
|
if mentions:
|
|
585
579
|
for expert_id in mentions:
|
|
586
|
-
if not self.
|
|
580
|
+
if not self.exists(expert_id):
|
|
587
581
|
continue
|
|
588
582
|
return True
|
|
589
583
|
return False
|
pygpt_net/core/experts/worker.py
CHANGED
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.23 15:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from typing import List, Optional
|
|
@@ -303,16 +303,6 @@ class ExpertWorker(QRunnable):
|
|
|
303
303
|
self.signals.finished.emit()
|
|
304
304
|
self.cleanup()
|
|
305
305
|
|
|
306
|
-
def cleanup(self):
|
|
307
|
-
"""Cleanup resources after worker execution."""
|
|
308
|
-
sig = self.signals
|
|
309
|
-
self.signals = None
|
|
310
|
-
if sig is not None:
|
|
311
|
-
try:
|
|
312
|
-
sig.deleteLater()
|
|
313
|
-
except RuntimeError:
|
|
314
|
-
pass
|
|
315
|
-
|
|
316
306
|
def call_agent(
|
|
317
307
|
self,
|
|
318
308
|
context: BridgeContext,
|
|
@@ -359,4 +349,14 @@ class ExpertWorker(QRunnable):
|
|
|
359
349
|
if response_ctx:
|
|
360
350
|
return str(response_ctx.output)
|
|
361
351
|
else:
|
|
362
|
-
return "No response from expert."
|
|
352
|
+
return "No response from expert."
|
|
353
|
+
|
|
354
|
+
def cleanup(self):
|
|
355
|
+
"""Cleanup resources after worker execution."""
|
|
356
|
+
sig = self.signals
|
|
357
|
+
self.signals = None
|
|
358
|
+
if sig is not None:
|
|
359
|
+
try:
|
|
360
|
+
sig.deleteLater()
|
|
361
|
+
except RuntimeError:
|
|
362
|
+
pass
|
pygpt_net/core/models/models.py
CHANGED
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.24 03:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import copy
|
|
@@ -498,6 +498,10 @@ class Models:
|
|
|
498
498
|
args["api_key"] = cfg.get('api_key_hugging_face', "")
|
|
499
499
|
args["base_url"] = cfg.get('api_endpoint_hugging_face', "")
|
|
500
500
|
self.window.core.debug.info("[api] Using client: HuggingFace Router API")
|
|
501
|
+
elif model.provider == "ollama":
|
|
502
|
+
args["api_key"] = "ollama"
|
|
503
|
+
args["base_url"] = self.window.core.models.ollama.get_base_url() + "/v1"
|
|
504
|
+
self.window.core.debug.info("[api] Using client: Ollama")
|
|
501
505
|
else:
|
|
502
506
|
self.window.core.debug.info("[api] Using client: OpenAI (default)")
|
|
503
507
|
|
pygpt_net/core/models/ollama.py
CHANGED
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.
|
|
9
|
+
# Updated Date: 2025.08.24 03:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import os
|
|
@@ -22,16 +22,25 @@ class Ollama:
|
|
|
22
22
|
self.window = window
|
|
23
23
|
self.available_models = []
|
|
24
24
|
|
|
25
|
-
def
|
|
25
|
+
def get_base_url(self) -> str:
|
|
26
26
|
"""
|
|
27
|
-
|
|
27
|
+
Get Ollama API base URL
|
|
28
28
|
|
|
29
|
-
:return:
|
|
29
|
+
:return: Ollama API base URL
|
|
30
30
|
"""
|
|
31
31
|
api_base = "http://localhost:11434"
|
|
32
32
|
if 'OLLAMA_API_BASE' in os.environ:
|
|
33
33
|
api_base = os.environ['OLLAMA_API_BASE']
|
|
34
|
-
|
|
34
|
+
return api_base
|
|
35
|
+
|
|
36
|
+
def get_status(self) -> dict:
|
|
37
|
+
"""
|
|
38
|
+
Check Ollama status
|
|
39
|
+
|
|
40
|
+
:return: dict
|
|
41
|
+
"""
|
|
42
|
+
api_base = self.get_base_url()
|
|
43
|
+
self.window.core.idx.log(f"Using Ollama base URL: {api_base}")
|
|
35
44
|
|
|
36
45
|
url = api_base + "/api/tags"
|
|
37
46
|
try:
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.24 02:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import re
|
|
@@ -136,7 +136,7 @@ class Helpers:
|
|
|
136
136
|
:return: formatted text
|
|
137
137
|
"""
|
|
138
138
|
s = text
|
|
139
|
-
if self.window.core.config.get("
|
|
139
|
+
if self.window.core.config.get("agent.output.render.all", False):
|
|
140
140
|
if "__agent_begin__" in s or "__agent_end__" in s:
|
|
141
141
|
s = s.replace("__agent_begin__", '<div class="msg-agent">').replace("__agent_end__", "</div>")
|
|
142
142
|
return s.strip()
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.24 02:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
import json
|
|
@@ -594,10 +594,10 @@ class Renderer(BaseRenderer):
|
|
|
594
594
|
"""
|
|
595
595
|
output = ctx.output
|
|
596
596
|
if isinstance(ctx.extra, dict) and ctx.extra.get("output"):
|
|
597
|
-
if self.window.core.config.get("
|
|
598
|
-
output =
|
|
597
|
+
if self.window.core.config.get("agent.output.render.all", False):
|
|
598
|
+
output = ctx.output # full agent output
|
|
599
599
|
else:
|
|
600
|
-
output = ctx.extra["output"]
|
|
600
|
+
output = ctx.extra["output"] # final output only
|
|
601
601
|
else:
|
|
602
602
|
if not output:
|
|
603
603
|
return
|
pygpt_net/core/types/__init__.py
CHANGED
|
@@ -6,10 +6,11 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.
|
|
9
|
+
# Updated Date: 2025.08.23 21:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
from .agent import *
|
|
13
|
+
from .base import *
|
|
13
14
|
from .mode import *
|
|
14
15
|
from .model import *
|
|
15
16
|
from .openai import *
|
pygpt_net/core/types/agent.py
CHANGED
|
@@ -6,14 +6,14 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.
|
|
9
|
+
# Updated Date: 2025.08.23 21:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
|
-
AGENT_MODE_STEP = "agent.mode.step" # LlamaIndex
|
|
13
|
-
AGENT_MODE_PLAN = "agent.mode.plan" # LlamaIndex
|
|
14
12
|
AGENT_MODE_ASSISTANT = "agent.mode.assistant" # LlamaIndex
|
|
15
|
-
AGENT_MODE_WORKFLOW = "agent.mode.workflow" # LlamaIndex
|
|
16
13
|
AGENT_MODE_OPENAI = "agent.mode.openai" # openai-agent
|
|
14
|
+
AGENT_MODE_PLAN = "agent.mode.plan" # LlamaIndex
|
|
15
|
+
AGENT_MODE_STEP = "agent.mode.step" # LlamaIndex
|
|
16
|
+
AGENT_MODE_WORKFLOW = "agent.mode.workflow" # LlamaIndex
|
|
17
17
|
|
|
18
18
|
AGENT_TYPE_LLAMA = "agent.type.llama" # LlamaIndex
|
|
19
19
|
AGENT_TYPE_OPENAI = "agent.type.openai" # OpenAI Agent
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
# ================================================== #
|
|
4
|
+
# This file is a part of PYGPT package #
|
|
5
|
+
# Website: https://pygpt.net #
|
|
6
|
+
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
|
+
# MIT License #
|
|
8
|
+
# Created By : Marcin Szczygliński #
|
|
9
|
+
# Updated Date: 2025.08.23 21:00:00 #
|
|
10
|
+
# ================================================== #
|
|
11
|
+
|
|
12
|
+
TYPE_BOOL = "bool"
|
|
13
|
+
TYPE_DICT = "dict"
|
|
14
|
+
TYPE_ENUM = "enum"
|
|
15
|
+
TYPE_FLOAT = "float"
|
|
16
|
+
TYPE_INT = "int"
|
|
17
|
+
TYPE_LIST = "list"
|
|
18
|
+
TYPE_STR = "str"
|
|
19
|
+
TYPE_TEXT = "text"
|
pygpt_net/core/types/console.py
CHANGED
|
@@ -6,16 +6,16 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.
|
|
9
|
+
# Updated Date: 2025.08.23 21:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
class Color:
|
|
13
|
+
BOLD = '\033[1m'
|
|
14
|
+
ENDC = '\033[0m'
|
|
15
|
+
FAIL = '\033[91m'
|
|
13
16
|
HEADER = '\033[95m'
|
|
14
17
|
OKBLUE = '\033[94m'
|
|
15
18
|
OKCYAN = '\033[96m'
|
|
16
19
|
OKGREEN = '\033[92m'
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
ENDC = '\033[0m'
|
|
20
|
-
BOLD = '\033[1m'
|
|
21
|
-
UNDERLINE = '\033[4m'
|
|
20
|
+
UNDERLINE = '\033[4m'
|
|
21
|
+
WARNING = '\033[93m'
|
pygpt_net/core/types/mode.py
CHANGED
|
@@ -6,20 +6,20 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.
|
|
9
|
+
# Updated Date: 2025.08.23 21:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
|
+
MODE_AGENT = "agent"
|
|
13
|
+
MODE_AGENT_LLAMA = "agent_llama"
|
|
14
|
+
MODE_AGENT_OPENAI = "agent_openai"
|
|
15
|
+
MODE_ASSISTANT = "assistant"
|
|
12
16
|
MODE_AUDIO = "audio"
|
|
13
17
|
MODE_CHAT = "chat"
|
|
14
18
|
MODE_COMPLETION = "completion"
|
|
19
|
+
MODE_COMPUTER = "computer"
|
|
20
|
+
MODE_EXPERT = "expert"
|
|
15
21
|
MODE_IMAGE = "img"
|
|
16
|
-
MODE_VISION = "vision"
|
|
17
|
-
MODE_ASSISTANT = "assistant"
|
|
18
22
|
MODE_LANGCHAIN = "langchain"
|
|
19
23
|
MODE_LLAMA_INDEX = "llama_index"
|
|
20
|
-
MODE_AGENT = "agent"
|
|
21
|
-
MODE_AGENT_LLAMA = "agent_llama"
|
|
22
|
-
MODE_AGENT_OPENAI = "agent_openai"
|
|
23
|
-
MODE_EXPERT = "expert"
|
|
24
24
|
MODE_RESEARCH = "research"
|
|
25
|
-
|
|
25
|
+
MODE_VISION = "vision"
|
|
@@ -6,10 +6,10 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.
|
|
9
|
+
# Updated Date: 2025.08.23 21:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
|
-
MULTIMODAL_TEXT = "text"
|
|
13
|
-
MULTIMODAL_IMAGE = "image"
|
|
14
12
|
MULTIMODAL_AUDIO = "audio"
|
|
13
|
+
MULTIMODAL_IMAGE = "image"
|
|
14
|
+
MULTIMODAL_TEXT = "text"
|
|
15
15
|
MULTIMODAL_VIDEO = "video"
|
pygpt_net/core/types/openai.py
CHANGED
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
# GitHub: https://github.com/szczyglis-dev/py-gpt #
|
|
7
7
|
# MIT License #
|
|
8
8
|
# Created By : Marcin Szczygliński #
|
|
9
|
-
# Updated Date: 2025.08.
|
|
9
|
+
# Updated Date: 2025.08.24 03:00:00 #
|
|
10
10
|
# ================================================== #
|
|
11
11
|
|
|
12
12
|
OPENAI_COMPATIBLE_PROVIDERS = [
|
|
@@ -17,6 +17,7 @@ OPENAI_COMPATIBLE_PROVIDERS = [
|
|
|
17
17
|
"huggingface_router",
|
|
18
18
|
"local_ai",
|
|
19
19
|
"mistral_ai",
|
|
20
|
+
"ollama",
|
|
20
21
|
"perplexity",
|
|
21
22
|
"deepseek_api",
|
|
22
23
|
"x_ai",
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
{
|
|
2
2
|
"__meta__": {
|
|
3
|
-
"version": "2.6.
|
|
4
|
-
"app.version": "2.6.
|
|
5
|
-
"updated_at": "2025-08-
|
|
3
|
+
"version": "2.6.21",
|
|
4
|
+
"app.version": "2.6.21",
|
|
5
|
+
"updated_at": "2025-08-24T00:00:00"
|
|
6
6
|
},
|
|
7
7
|
"access.audio.event.speech": false,
|
|
8
8
|
"access.audio.event.speech.disabled": [],
|
|
@@ -63,6 +63,7 @@
|
|
|
63
63
|
"agent.mode": "chat",
|
|
64
64
|
"agent.openai.provider": "openai_agent_base",
|
|
65
65
|
"agent.openai.response.split": true,
|
|
66
|
+
"agent.output.render.all": true,
|
|
66
67
|
"ai_name": "",
|
|
67
68
|
"api_azure_version": "2023-07-01-preview",
|
|
68
69
|
"api_azure_endpoint": "https://<your-resource-name>.openai.azure.com/",
|
|
@@ -221,7 +222,6 @@
|
|
|
221
222
|
"llama.idx.auto": false,
|
|
222
223
|
"llama.idx.auto.index": "base",
|
|
223
224
|
"llama.idx.auto.modes": "chat,completion,vision,assistant,research,llama_index,agent",
|
|
224
|
-
"llama.idx.chat.agent.render.all": false,
|
|
225
225
|
"llama.idx.chat.auto_retrieve": true,
|
|
226
226
|
"llama.idx.chat.mode": "context",
|
|
227
227
|
"llama.idx.current": null,
|