pygpt-net 2.6.20__py3-none-any.whl → 2.6.21__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (89) hide show
  1. pygpt_net/CHANGELOG.txt +9 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/controller/agent/agent.py +130 -2
  4. pygpt_net/controller/agent/experts.py +93 -96
  5. pygpt_net/controller/agent/llama.py +2 -1
  6. pygpt_net/controller/assistant/assistant.py +18 -1
  7. pygpt_net/controller/attachment/attachment.py +17 -1
  8. pygpt_net/controller/camera/camera.py +15 -7
  9. pygpt_net/controller/chat/chat.py +2 -2
  10. pygpt_net/controller/chat/common.py +50 -33
  11. pygpt_net/controller/chat/image.py +67 -77
  12. pygpt_net/controller/chat/input.py +94 -166
  13. pygpt_net/controller/chat/output.py +83 -140
  14. pygpt_net/controller/chat/response.py +83 -102
  15. pygpt_net/controller/chat/text.py +116 -149
  16. pygpt_net/controller/ctx/common.py +2 -1
  17. pygpt_net/controller/ctx/ctx.py +86 -6
  18. pygpt_net/controller/files/files.py +13 -1
  19. pygpt_net/controller/idx/idx.py +26 -2
  20. pygpt_net/controller/kernel/reply.py +53 -66
  21. pygpt_net/controller/kernel/stack.py +16 -16
  22. pygpt_net/controller/model/importer.py +2 -1
  23. pygpt_net/controller/model/model.py +62 -3
  24. pygpt_net/controller/settings/editor.py +4 -4
  25. pygpt_net/controller/ui/ui.py +16 -2
  26. pygpt_net/core/agents/observer/evaluation.py +3 -3
  27. pygpt_net/core/agents/provider.py +25 -3
  28. pygpt_net/core/agents/runner.py +4 -1
  29. pygpt_net/core/agents/runners/llama_workflow.py +19 -7
  30. pygpt_net/core/agents/runners/loop.py +3 -1
  31. pygpt_net/core/agents/runners/openai_workflow.py +17 -3
  32. pygpt_net/core/agents/tools.py +4 -1
  33. pygpt_net/core/bridge/context.py +34 -37
  34. pygpt_net/core/ctx/ctx.py +1 -1
  35. pygpt_net/core/db/database.py +2 -2
  36. pygpt_net/core/debug/debug.py +12 -1
  37. pygpt_net/core/dispatcher/dispatcher.py +24 -1
  38. pygpt_net/core/events/app.py +7 -7
  39. pygpt_net/core/events/control.py +26 -26
  40. pygpt_net/core/events/event.py +6 -3
  41. pygpt_net/core/events/kernel.py +2 -2
  42. pygpt_net/core/events/render.py +13 -13
  43. pygpt_net/core/experts/experts.py +76 -82
  44. pygpt_net/core/experts/worker.py +12 -12
  45. pygpt_net/core/models/models.py +5 -1
  46. pygpt_net/core/models/ollama.py +14 -5
  47. pygpt_net/core/render/web/helpers.py +2 -2
  48. pygpt_net/core/render/web/renderer.py +4 -4
  49. pygpt_net/core/types/__init__.py +2 -1
  50. pygpt_net/core/types/agent.py +4 -4
  51. pygpt_net/core/types/base.py +19 -0
  52. pygpt_net/core/types/console.py +6 -6
  53. pygpt_net/core/types/mode.py +8 -8
  54. pygpt_net/core/types/multimodal.py +3 -3
  55. pygpt_net/core/types/openai.py +2 -1
  56. pygpt_net/data/config/config.json +4 -4
  57. pygpt_net/data/config/models.json +19 -3
  58. pygpt_net/data/config/settings.json +14 -14
  59. pygpt_net/data/locale/locale.en.ini +2 -2
  60. pygpt_net/item/ctx.py +256 -240
  61. pygpt_net/item/model.py +59 -116
  62. pygpt_net/item/preset.py +122 -105
  63. pygpt_net/provider/agents/llama_index/workflow/planner.py +3 -3
  64. pygpt_net/provider/agents/openai/agent.py +4 -12
  65. pygpt_net/provider/agents/openai/agent_b2b.py +10 -15
  66. pygpt_net/provider/agents/openai/agent_planner.py +4 -4
  67. pygpt_net/provider/agents/openai/agent_with_experts.py +3 -7
  68. pygpt_net/provider/agents/openai/agent_with_experts_feedback.py +4 -8
  69. pygpt_net/provider/agents/openai/agent_with_feedback.py +4 -8
  70. pygpt_net/provider/agents/openai/bot_researcher.py +2 -18
  71. pygpt_net/provider/agents/openai/bots/__init__.py +0 -0
  72. pygpt_net/provider/agents/openai/bots/research_bot/__init__.py +0 -0
  73. pygpt_net/provider/agents/openai/bots/research_bot/agents/__init__.py +0 -0
  74. pygpt_net/provider/agents/openai/bots/research_bot/agents/planner_agent.py +1 -1
  75. pygpt_net/provider/agents/openai/bots/research_bot/agents/search_agent.py +1 -0
  76. pygpt_net/provider/agents/openai/bots/research_bot/agents/writer_agent.py +1 -1
  77. pygpt_net/provider/agents/openai/bots/research_bot/manager.py +1 -10
  78. pygpt_net/provider/agents/openai/evolve.py +5 -9
  79. pygpt_net/provider/agents/openai/supervisor.py +4 -8
  80. pygpt_net/provider/core/config/patch.py +10 -3
  81. pygpt_net/provider/core/ctx/db_sqlite/utils.py +43 -43
  82. pygpt_net/provider/core/model/patch.py +11 -1
  83. pygpt_net/provider/core/preset/json_file.py +47 -49
  84. pygpt_net/provider/gpt/agents/experts.py +2 -2
  85. {pygpt_net-2.6.20.dist-info → pygpt_net-2.6.21.dist-info}/METADATA +13 -6
  86. {pygpt_net-2.6.20.dist-info → pygpt_net-2.6.21.dist-info}/RECORD +86 -85
  87. {pygpt_net-2.6.20.dist-info → pygpt_net-2.6.21.dist-info}/LICENSE +0 -0
  88. {pygpt_net-2.6.20.dist-info → pygpt_net-2.6.21.dist-info}/WHEEL +0 -0
  89. {pygpt_net-2.6.20.dist-info → pygpt_net-2.6.21.dist-info}/entry_points.txt +0 -0
pygpt_net/CHANGELOG.txt CHANGED
@@ -1,3 +1,12 @@
1
+ 2.6.21 (2025-08-24)
2
+
3
+ - Ollama models are now available in OpenAI Agents mode.
4
+ - Improved parsing of responses from Agents.
5
+ - Fix: do not initialize index in Agents mode if not provided.
6
+ - Fix: agent response evaluation steps limit.
7
+ - Fix: do not execute code in agents if Tools are disabled.
8
+ - Refactoring.
9
+
1
10
  2.6.20 (2025-08-22)
2
11
 
3
12
  - Added a new plugin: Server (FTP/SSH) - connect to remote servers using FTP, SFTP, and SSH. Execute remote commands, upload, download, and more (beta).
pygpt_net/__init__.py CHANGED
@@ -6,15 +6,15 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.22 00:00:00 #
9
+ # Updated Date: 2025.08.24 00:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  __author__ = "Marcin Szczygliński"
13
13
  __copyright__ = "Copyright 2025, Marcin Szczygliński"
14
14
  __credits__ = ["Marcin Szczygliński"]
15
15
  __license__ = "MIT"
16
- __version__ = "2.6.20"
17
- __build__ = "2025-08-22"
16
+ __version__ = "2.6.21"
17
+ __build__ = "2025-08-24"
18
18
  __maintainer__ = "Marcin Szczygliński"
19
19
  __github__ = "https://github.com/szczyglis-dev/py-gpt"
20
20
  __report__ = "https://github.com/szczyglis-dev/py-gpt/issues"
@@ -6,9 +6,18 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.03 14:00:00 #
9
+ # Updated Date: 2025.08.23 15:00:00 #
10
10
  # ================================================== #
11
11
 
12
+ from pygpt_net.core.events import BaseEvent, Event
13
+ from pygpt_net.core.types import (
14
+ MODE_AGENT,
15
+ MODE_AGENT_LLAMA,
16
+ MODE_AGENT_OPENAI,
17
+ )
18
+ from pygpt_net.item.ctx import CtxItem
19
+ from pygpt_net.utils import trans
20
+
12
21
  from .common import Common
13
22
  from .experts import Experts
14
23
  from .legacy import Legacy
@@ -28,7 +37,7 @@ class Agent:
28
37
  self.legacy = Legacy(window)
29
38
 
30
39
  def setup(self):
31
- """Setup agent controller"""
40
+ """Setup agents"""
32
41
  self.legacy.setup()
33
42
  self.llama.setup()
34
43
 
@@ -41,3 +50,122 @@ class Agent:
41
50
  """Force stop all agents"""
42
51
  self.legacy.on_stop()
43
52
  self.llama.on_stop()
53
+
54
+ def handle(self, event: BaseEvent):
55
+ """
56
+ Handle events
57
+
58
+ :param event: BaseEvent: Event to handle
59
+ """
60
+ name = event.name
61
+
62
+ # on input begin, unlock experts and reset evaluation steps
63
+ if name == Event.INPUT_BEGIN:
64
+ mode = event.data.get("mode", "")
65
+ force = event.data.get("force", False)
66
+ self.experts.unlock() # unlock experts
67
+ self.llama.reset_eval_step() # reset evaluation steps
68
+
69
+ if not force:
70
+ # if agent mode: iterations check, show alert confirm if infinity loop
71
+ if self.common.is_infinity_loop(mode):
72
+ event.data["stop"] = True # stop flow
73
+ self.common.display_infinity_loop_confirm()
74
+ return
75
+
76
+ # check if agent is selected
77
+ if mode in (MODE_AGENT_OPENAI, MODE_AGENT_LLAMA):
78
+ preset = self.window.controller.presets.get_current()
79
+ if not preset or preset.name == "*":
80
+ event.data["stop"] = True # stop flow
81
+ self.window.ui.dialogs.alert(
82
+ trans("dialog.agent.not_selected"))
83
+ return
84
+
85
+ # on user send, start agent flow
86
+ elif name == Event.USER_SEND:
87
+ mode = event.data.get("mode", "")
88
+ text = event.data.get("value", "")
89
+ if mode == MODE_AGENT:
90
+ self.legacy.on_user_send(text) # begin Legacy (autonomous) agent flow
91
+ elif mode in (
92
+ MODE_AGENT_LLAMA,
93
+ MODE_AGENT_OPENAI,
94
+ ):
95
+ self.llama.on_user_send(text) # begin LlamaIndex adn OpenAI agent flow
96
+
97
+ # on input before, process text before sending input
98
+ elif name == Event.INPUT_BEFORE:
99
+ mode = event.data.get("mode", "")
100
+ if mode == MODE_AGENT:
101
+ text = event.data.get("value", "")
102
+ self.window.controller.chat.log(f"Agent: input before: {text}")
103
+ event.data["value"] = self.legacy.on_input_before(text)
104
+
105
+ # on pre-prompt
106
+ elif name == Event.PRE_PROMPT:
107
+ mode = event.data.get("mode", "")
108
+ sys_prompt = event.data.get("value", "")
109
+ is_expert = event.data.get("is_expert", False)
110
+ if is_expert:
111
+ return # abort if expert call
112
+ event.data["value"] = self.experts.append_prompts(
113
+ mode,
114
+ sys_prompt,
115
+ )
116
+ # on ctx after
117
+ elif name == Event.CTX_BEFORE:
118
+ mode = event.data.get("mode", "")
119
+ if mode == MODE_AGENT:
120
+ self.legacy.on_ctx_before(event.ctx)
121
+
122
+ # on bridge before, prepare bridge context
123
+ elif name == Event.BRIDGE_BEFORE:
124
+ mode = event.data.get("mode", "")
125
+ bridge_context = event.data.get("context", None)
126
+ extra = event.data.get("extra", {})
127
+ if mode in (MODE_AGENT_LLAMA, MODE_AGENT_OPENAI):
128
+
129
+ # agent provider
130
+ agent_provider = None # agent provider (llama or openai)
131
+ if mode == MODE_AGENT_LLAMA:
132
+ agent_provider = self.window.core.config.get("agent.llama.provider")
133
+ elif mode == MODE_AGENT_OPENAI:
134
+ agent_provider =self.window.core.config.get("agent.openai.provider")
135
+ agent_idx = self.window.core.config.get("agent.llama.idx")
136
+
137
+ extra["agent_idx"] = agent_idx
138
+ extra["agent_provider"] = agent_provider
139
+
140
+ # update assistant ID if assistant agent
141
+ if mode == MODE_AGENT_LLAMA:
142
+ preset = self.window.controller.presets.get_current()
143
+ if preset is not None:
144
+ bridge_context.assistant_id = preset.assistant_id
145
+
146
+ # on ctx after
147
+ elif name == Event.CTX_AFTER:
148
+ mode = event.data.get("mode", "")
149
+ if mode == MODE_AGENT:
150
+ self.legacy.on_ctx_after(event.ctx)
151
+
152
+ # on ctx end
153
+ elif name == Event.CTX_END:
154
+ mode = event.data.get("mode", "")
155
+ if mode == MODE_AGENT:
156
+ iterations = int(self.window.core.config.get("agent.iterations"))
157
+ self.window.controller.chat.log(f"Agent: ctx end, iterations: {iterations}")
158
+ self.legacy.on_ctx_end(
159
+ event.ctx,
160
+ iterations=iterations,
161
+ )
162
+
163
+ def on_reply(self, ctx: CtxItem):
164
+ """
165
+ On reply event
166
+
167
+ :param ctx: CtxItem: Context item to handle reply for
168
+ """
169
+ if ctx.internal and self.legacy.enabled():
170
+ self.legacy.add_run()
171
+ self.legacy.update()
@@ -6,10 +6,10 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.07.25 22:00:00 #
9
+ # Updated Date: 2025.08.23 15:00:00 #
10
10
  # ================================================== #
11
11
 
12
- from typing import Optional, Any
12
+ from typing import Any
13
13
 
14
14
  from pygpt_net.core.types import (
15
15
  MODE_AGENT,
@@ -31,79 +31,42 @@ class Experts:
31
31
  self.window = window
32
32
  self.is_stop = False
33
33
 
34
- def stop(self):
35
- """
36
- Stop experts
37
- """
38
- self.is_stop = True
39
-
40
- def stopped(self) -> bool:
41
- """
42
- Check if experts are stopped
43
-
44
- :return: True if experts are stopped
45
- """
46
- return self.is_stop
47
-
48
- def unlock(self):
49
- """Unlock experts"""
50
- self.is_stop = False
51
-
52
- def enabled(self, check_inline = True) -> bool:
53
- """
54
- Check if experts are enabled
55
-
56
- :param check_inline: check inline mode
57
- :return: True if experts are enabled
58
- """
59
- modes = [MODE_EXPERT]
60
- mode = self.window.core.config.get('mode')
61
- if not check_inline:
62
- if mode in modes:
63
- return True
64
- else:
65
- return False
66
- else:
67
- if mode in modes or self.window.controller.plugins.is_type_enabled("expert"):
68
- return True
69
- else:
70
- return False
71
-
72
34
  def append_prompts(
73
35
  self,
74
36
  mode: str,
75
37
  sys_prompt: str,
76
- parent_id: Optional[str] = None
77
- ):
38
+ ) -> str:
78
39
  """
79
40
  Append prompt to the window
80
41
 
81
42
  :param mode: Mode
82
43
  :param sys_prompt: Prompt text
83
- :param parent_id: Parent ID
44
+ :return: Updated system prompt
84
45
  """
46
+ core = self.window.core
47
+ controller = self.window.controller
48
+
85
49
  # if agent enabled
86
- if self.window.controller.agent.legacy.enabled():
50
+ if controller.agent.legacy.enabled():
87
51
  prev_prompt = sys_prompt
88
- sys_prompt = self.window.core.prompt.get("agent.instruction")
52
+ sys_prompt = core.prompt.get("agent.instruction")
89
53
  if prev_prompt is not None and prev_prompt.strip() != "":
90
54
  sys_prompt = sys_prompt + "\n\n" + prev_prompt # append previous prompt
91
55
 
92
- # expert or agent mode
93
- if ((self.enabled() or self.window.controller.agent.legacy.enabled(check_inline=False))
94
- and parent_id is None): # master expert has special prompt
95
- if self.window.controller.agent.legacy.enabled(): # if agent then leave agent prompt
96
- sys_prompt += "\n\n" + self.window.core.experts.get_prompt() # both, agent + experts
56
+ # if expert or agent mode
57
+ if self.enabled() or controller.agent.legacy.enabled(check_inline=False): # master expert has special prompt
58
+ if controller.agent.legacy.enabled(): # if agent then leave agent prompt
59
+ sys_prompt += "\n\n" + core.experts.get_prompt() # both, agent + experts
97
60
  else:
98
- sys_prompt = self.window.core.experts.get_prompt()
61
+ sys_prompt = core.experts.get_prompt()
99
62
  # mode = "chat" # change mode to chat for expert
100
63
 
101
64
  # if global mode is agent
102
65
  if mode == MODE_AGENT:
103
- sys_prompt = self.window.controller.agent.legacy.on_system_prompt(
66
+ sys_prompt = controller.agent.legacy.on_system_prompt(
104
67
  sys_prompt,
105
68
  append_prompt=None, # sys prompt from preset is used here
106
- auto_stop=self.window.core.config.get('agent.auto_stop'),
69
+ auto_stop=core.config.get('agent.auto_stop'),
107
70
  )
108
71
 
109
72
  return sys_prompt
@@ -113,60 +76,94 @@ class Experts:
113
76
  Handle mentions (calls) to experts
114
77
 
115
78
  :param ctx: CtxItem
79
+ :return: Number of calls made to experts
116
80
  """
117
- stream_mode = self.window.core.config.get('stream')
81
+ core = self.window.core
82
+ controller = self.window.controller
83
+ dispatch = self.window.dispatch
84
+ log = self.log
85
+ stream = core.config.get('stream')
118
86
  num_calls = 0
119
87
 
120
88
  # extract expert mentions
121
- if self.enabled() or self.window.controller.agent.legacy.enabled(check_inline=False):
89
+ if self.enabled() or controller.agent.legacy.enabled(check_inline=False):
122
90
  # re-send to master
123
91
  if ctx.sub_reply:
124
- self.window.core.ctx.update_item(ctx)
125
- self.window.core.experts.reply(ctx)
92
+ core.ctx.update_item(ctx)
93
+ core.experts.reply(ctx)
126
94
  else:
95
+ # abort if reply
96
+ if ctx.reply:
97
+ return num_calls
98
+
127
99
  # call experts
128
- if not ctx.reply:
129
- mentions = self.window.core.experts.extract_calls(ctx)
130
- if mentions:
131
- num_calls = 0
132
- self.log("Calling experts...")
133
- data = {
134
- "meta": ctx.meta,
135
- "ctx": ctx,
136
- "stream": stream_mode,
137
- }
138
- event = RenderEvent(RenderEvent.END, data)
139
- self.window.dispatch(event) # close previous render
140
- for expert_id in mentions:
141
- if not self.window.core.experts.exists(expert_id):
142
- self.log("Expert not found: " + expert_id)
143
- continue
144
- self.log("Calling: " + expert_id)
145
- ctx.sub_calls += 1
146
-
147
- # add to reply stack
148
- reply = ReplyContext()
149
- reply.type = ReplyContext.EXPERT_CALL
150
- reply.ctx = ctx
151
- reply.parent_id = expert_id
152
- reply.input = mentions[expert_id]
153
-
154
- # send to kernel
155
- context = BridgeContext()
156
- context.ctx = ctx
157
- context.reply_context = reply
158
- event = KernelEvent(KernelEvent.AGENT_CALL, {
159
- 'context': context,
160
- 'extra': {},
161
- })
162
- self.window.dispatch(event)
163
-
164
- num_calls += 1
165
- if num_calls > 0:
166
- return num_calls # abort continue if expert call detected
100
+ mentions = core.experts.extract_calls(ctx)
101
+
102
+ if mentions:
103
+ log("Calling experts...")
104
+ dispatch(RenderEvent(RenderEvent.END, {
105
+ "meta": ctx.meta,
106
+ "ctx": ctx,
107
+ "stream": stream,
108
+ })) # close previous render
109
+
110
+ for expert_id in mentions:
111
+ if not core.experts.exists(expert_id):
112
+ log(f"Expert not found: {expert_id}")
113
+ continue
114
+
115
+ log(f"Calling: {expert_id}")
116
+ ctx.sub_calls += 1
117
+
118
+ # add to reply stack
119
+ reply = ReplyContext()
120
+ reply.type = ReplyContext.EXPERT_CALL
121
+ reply.ctx = ctx
122
+ reply.parent_id = expert_id
123
+ reply.input = mentions[expert_id]
124
+
125
+ # send to kernel
126
+ context = BridgeContext()
127
+ context.ctx = ctx
128
+ context.reply_context = reply
129
+ dispatch(KernelEvent(KernelEvent.AGENT_CALL, {
130
+ 'context': context,
131
+ 'extra': {},
132
+ }))
133
+ num_calls += 1
167
134
 
168
135
  return num_calls
169
136
 
137
+ def enabled(self, check_inline: bool = True) -> bool:
138
+ """
139
+ Check if experts are enabled
140
+
141
+ :param check_inline: check inline mode
142
+ :return: True if experts are enabled
143
+ """
144
+ modes = [MODE_EXPERT]
145
+ mode = self.window.core.config.get('mode')
146
+ if not check_inline:
147
+ return mode in modes
148
+ else:
149
+ return mode in modes or self.window.controller.plugins.is_type_enabled("expert")
150
+
151
+ def stopped(self) -> bool:
152
+ """
153
+ Check if experts are stopped
154
+
155
+ :return: True if experts are stopped
156
+ """
157
+ return self.is_stop
158
+
159
+ def stop(self):
160
+ """Stop experts"""
161
+ self.is_stop = True
162
+
163
+ def unlock(self):
164
+ """Unlock experts"""
165
+ self.is_stop = False
166
+
170
167
  def log(self, data: Any):
171
168
  """
172
169
  Log data to debug
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.01 03:00:00 #
9
+ # Updated Date: 2025.08.24 02:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Any
@@ -136,6 +136,7 @@ class Llama:
136
136
  # check max steps
137
137
  max_steps = int(self.window.core.config.get("agent.llama.max_eval"))
138
138
  if max_steps != 0 and self.get_eval_step() >= max_steps:
139
+ self.window.update_status(f"Stopped. Limit of max steps: {max_steps}") # show info
139
140
  self.on_end()
140
141
  return # abort if max steps reached
141
142
 
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.03 14:00:00 #
9
+ # Updated Date: 2025.08.23 15:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Optional
@@ -56,6 +56,23 @@ class Assistant:
56
56
  self.files.update_list()
57
57
  self.select_current()
58
58
 
59
+ def check(self) -> bool:
60
+ """
61
+ Check if assistants are loaded
62
+
63
+ :return: True if assistants are loaded
64
+ """
65
+ # check if assistant is selected
66
+ if self.window.core.config.get('assistant') is None \
67
+ or self.window.core.config.get('assistant') == "":
68
+ self.window.ui.dialogs.alert(trans('error.assistant_not_selected'))
69
+ return False
70
+ return True
71
+
72
+ def resume(self):
73
+ """Reset assistants state"""
74
+ self.threads.stop = False
75
+
59
76
  def update_list(self):
60
77
  """Update assistants list"""
61
78
  items = self.window.core.assistants.get_all()
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2024.12.16 01:00:00 #
9
+ # Updated Date: 2025.08.23 15:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import os
@@ -79,6 +79,22 @@ class Attachment:
79
79
  # update tokens counter (vision plugin, etc.)
80
80
  self.window.controller.ui.update_tokens()
81
81
 
82
+ def cleanup(self, ctx: CtxItem) -> bool:
83
+ """
84
+ Clear attachments list on ctx end
85
+
86
+ :param ctx: CtxItem
87
+ :return: True if cleared
88
+ """
89
+ auto_clear = self.window.core.config.get('attachments_send_clear')
90
+ if self.clear_allowed(ctx):
91
+ if auto_clear and not self.is_locked():
92
+ self.clear(force=True, auto=True)
93
+ self.update()
94
+ self.window.controller.chat.log("Attachments cleared.") # log
95
+ return True
96
+ return False
97
+
82
98
  def update_tab(self, mode: str):
83
99
  """
84
100
  Update tab label
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.06 01:00:00 #
9
+ # Updated Date: 2025.08.23 15:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import datetime
@@ -19,6 +19,7 @@ from PySide6.QtGui import QImage, QPixmap, Qt
19
19
 
20
20
  from pygpt_net.core.events import AppEvent, KernelEvent
21
21
  from pygpt_net.core.camera import CaptureWorker
22
+ from pygpt_net.core.types import MODE_ASSISTANT
22
23
  from pygpt_net.utils import trans
23
24
 
24
25
 
@@ -145,12 +146,19 @@ class Camera(QObject):
145
146
 
146
147
  return result
147
148
 
148
- def handle_auto_capture(self):
149
- """Handle auto capture"""
150
- if self.is_enabled():
151
- if self.is_auto():
152
- self.capture_frame(switch=False)
153
- self.window.controller.chat.log("Captured frame from camera.") # log
149
+ def handle_auto_capture(self, mode: str):
150
+ """
151
+ Handle auto capture
152
+
153
+ :param mode: current mode
154
+ """
155
+ if mode == MODE_ASSISTANT:
156
+ return # abort in Assistants mode
157
+ if self.window.controller.ui.vision.has_vision():
158
+ if self.is_enabled():
159
+ if self.is_auto():
160
+ self.capture_frame(switch=False)
161
+ self.window.controller.chat.log("Captured frame from camera.") # log
154
162
 
155
163
  def get_current_frame(self, flip_colors: bool = True):
156
164
  """
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.15 23:00:00 #
9
+ # Updated Date: 2025.08.23 15:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Any
@@ -94,7 +94,7 @@ class Chat:
94
94
  """
95
95
  upper_mode = mode.upper()
96
96
  if self.window.core.config.get("log.ctx"):
97
- self.log(f"[ctx] {upper_mode}: {ctx.dump()}") # log
97
+ self.log(f"[ctx] {upper_mode}: {ctx.dump()}")
98
98
  else:
99
99
  self.log(f"[ctx] {upper_mode}.")
100
100