pygpt-net 2.6.17.post1__py3-none-any.whl → 2.6.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. pygpt_net/CHANGELOG.txt +10 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/app.py +2 -0
  4. pygpt_net/controller/chat/output.py +2 -2
  5. pygpt_net/controller/chat/response.py +17 -28
  6. pygpt_net/core/agents/observer/evaluation.py +9 -9
  7. pygpt_net/core/agents/runner.py +53 -42
  8. pygpt_net/core/agents/tools.py +59 -40
  9. pygpt_net/core/experts/experts.py +32 -366
  10. pygpt_net/core/experts/worker.py +362 -0
  11. pygpt_net/core/idx/chat.py +49 -82
  12. pygpt_net/core/idx/context.py +10 -14
  13. pygpt_net/core/idx/idx.py +19 -8
  14. pygpt_net/core/idx/indexing.py +35 -38
  15. pygpt_net/core/idx/response.py +91 -2
  16. pygpt_net/core/idx/worker.py +3 -8
  17. pygpt_net/core/render/web/body.py +18 -1
  18. pygpt_net/core/render/web/renderer.py +28 -13
  19. pygpt_net/core/types/__init__.py +2 -1
  20. pygpt_net/core/types/tools.py +22 -0
  21. pygpt_net/data/config/config.json +4 -4
  22. pygpt_net/data/config/models.json +3 -3
  23. pygpt_net/data/config/presets/current.llama_index.json +1 -1
  24. pygpt_net/provider/agents/openai/evolve.py +1 -1
  25. pygpt_net/provider/gpt/summarizer.py +4 -0
  26. pygpt_net/ui/widget/filesystem/explorer.py +7 -9
  27. pygpt_net/utils.py +2 -0
  28. {pygpt_net-2.6.17.post1.dist-info → pygpt_net-2.6.19.dist-info}/METADATA +12 -2
  29. {pygpt_net-2.6.17.post1.dist-info → pygpt_net-2.6.19.dist-info}/RECORD +32 -30
  30. {pygpt_net-2.6.17.post1.dist-info → pygpt_net-2.6.19.dist-info}/LICENSE +0 -0
  31. {pygpt_net-2.6.17.post1.dist-info → pygpt_net-2.6.19.dist-info}/WHEEL +0 -0
  32. {pygpt_net-2.6.17.post1.dist-info → pygpt_net-2.6.19.dist-info}/entry_points.txt +0 -0
pygpt_net/CHANGELOG.txt CHANGED
@@ -1,3 +1,13 @@
1
+ 2.6.19 (2025-08-22)
2
+
3
+ - Fixed: added prevention for summarizing an empty context.
4
+ - Improved the speed of context item refreshing.
5
+
6
+ 2.6.18 (2025-08-21)
7
+
8
+ - Refactor and optimizations.
9
+ - Fix: Evolve agent stop event calling.
10
+
1
11
  2.6.17 (2025-08-21)
2
12
 
3
13
  - Optimized profile switching.
pygpt_net/__init__.py CHANGED
@@ -6,15 +6,15 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.21 00:00:00 #
9
+ # Updated Date: 2025.08.22 00:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  __author__ = "Marcin Szczygliński"
13
13
  __copyright__ = "Copyright 2025, Marcin Szczygliński"
14
14
  __credits__ = ["Marcin Szczygliński"]
15
15
  __license__ = "MIT"
16
- __version__ = "2.6.17"
17
- __build__ = "2025-08-21"
16
+ __version__ = "2.6.19"
17
+ __build__ = "2025-08-22"
18
18
  __maintainer__ = "Marcin Szczygliński"
19
19
  __github__ = "https://github.com/szczyglis-dev/py-gpt"
20
20
  __report__ = "https://github.com/szczyglis-dev/py-gpt/issues"
pygpt_net/app.py CHANGED
@@ -25,12 +25,14 @@ if platform.system() == 'Windows':
25
25
  # enable debug logging
26
26
  # os.environ["QT_LOGGING_RULES"] = "*.debug=true"
27
27
  # os.environ["QTWEBENGINE_REMOTE_DEBUGGING"] = "9222"
28
+ """
28
29
  os.environ["QTWEBENGINE_CHROMIUM_FLAGS"] = (
29
30
  "--renderer-process-limit=1 "
30
31
  "--process-per-site "
31
32
  "--enable-precise-memory-info "
32
33
  "--js-flags=--expose-gc"
33
34
  )
35
+ """
34
36
 
35
37
  _original_open = builtins.open
36
38
 
@@ -291,8 +291,8 @@ class Output:
291
291
 
292
292
  if mode != MODE_ASSISTANT:
293
293
  self.window.controller.kernel.stack.handle() # handle reply
294
- # event = RenderEvent(RenderEvent.RELOAD)
295
- # self.window.dispatch(event) # reload chat window
294
+ event = RenderEvent(RenderEvent.RELOAD)
295
+ self.window.dispatch(event) # reload chat window
296
296
 
297
297
  mem_clean()
298
298
 
@@ -103,7 +103,6 @@ class Response:
103
103
  extra["error"] = e
104
104
  self.failed(context, extra)
105
105
 
106
-
107
106
  if stream:
108
107
  if mode not in self.window.controller.chat.output.not_stream_modes:
109
108
  return # handled in stream:handleEnd()
@@ -205,7 +204,6 @@ class Response:
205
204
  # if next in agent cycle
206
205
  if ctx.partial:
207
206
  self.window.dispatch(AppEvent(AppEvent.CTX_END)) # app event
208
- self.window.dispatch(RenderEvent(RenderEvent.RELOAD)) # reload chat window
209
207
 
210
208
  # handle current step
211
209
  ctx.current = False # reset current state
@@ -214,33 +212,33 @@ class Response:
214
212
  internal = ctx.internal
215
213
 
216
214
  self.window.core.ctx.set_last_item(ctx)
217
- data = {
215
+ event = RenderEvent(RenderEvent.BEGIN, {
218
216
  "meta": ctx.meta,
219
217
  "ctx": ctx,
220
218
  "stream": stream,
221
- }
222
- event = RenderEvent(RenderEvent.BEGIN, data)
219
+ })
223
220
  self.window.dispatch(event)
224
221
 
225
222
  # append step input to chat window
226
- data = {
223
+ event = RenderEvent(RenderEvent.INPUT_APPEND, {
227
224
  "meta": ctx.meta,
228
225
  "ctx": ctx,
229
- }
230
- event = RenderEvent(RenderEvent.INPUT_APPEND, data)
226
+ })
231
227
  self.window.dispatch(event)
228
+
229
+ # CTX OUTPUT INFO:
230
+ # - ctx.output may be empty here if stream in OpenAI agents
231
+ # - ctx.live_output may be used against output in LlamaIndex agents
232
232
  if ctx.id is None:
233
233
  self.window.core.ctx.add(ctx)
234
- self.window.controller.ctx.update(
235
- reload=True,
236
- all=False,
237
- )
238
- self.window.core.ctx.update_item(ctx)
234
+ else:
235
+ self.window.core.ctx.update_item(ctx)
239
236
 
240
237
  # update ctx meta
241
238
  if mode in (MODE_AGENT_LLAMA, MODE_AGENT_OPENAI) and ctx.meta is not None:
242
- self.window.core.ctx.replace(ctx.meta)
239
+ self.window.core.ctx.replace(ctx.meta) # update meta in items
243
240
  self.window.core.ctx.save(ctx.meta.id)
241
+
244
242
  # update preset if exists
245
243
  preset = self.window.controller.presets.get_current()
246
244
  if preset is not None:
@@ -257,31 +255,22 @@ class Response:
257
255
 
258
256
  # post-handle, execute cmd, etc.
259
257
  self.window.controller.chat.output.post_handle(ctx, mode, stream, reply, internal)
260
-
261
- data = {
262
- "meta": ctx.meta,
263
- }
264
- event = RenderEvent(RenderEvent.TOOL_BEGIN, data)
265
- self.window.dispatch(event) # show cmd waiting
266
258
  self.window.controller.chat.output.handle_end(ctx, mode) # handle end.
267
259
 
268
- data = {
269
- "meta": ctx.meta,
270
- "ctx": ctx,
271
- "stream": self.window.core.config.get("stream", False),
272
- }
273
- event = RenderEvent(RenderEvent.END, data)
260
+ event = RenderEvent(RenderEvent.RELOAD)
274
261
  self.window.dispatch(event)
275
262
 
276
263
  # if continue reasoning
277
264
  if global_mode not in (MODE_AGENT_LLAMA, MODE_AGENT_OPENAI):
278
265
  return # no agent mode, nothing to do
279
266
 
280
- if ctx.extra is None or (type(ctx.extra) == dict and "agent_finish" not in ctx.extra):
267
+ # not agent final response
268
+ if ctx.extra is None or (isinstance(ctx.extra, dict) and "agent_finish" not in ctx.extra):
281
269
  self.window.update_status(trans("status.agent.reasoning"))
282
270
  self.window.controller.chat.common.lock_input() # lock input, re-enable stop button
283
271
 
284
- if ctx.extra is not None and (type(ctx.extra) == dict and "agent_finish" in ctx.extra):
272
+ # agent final response
273
+ if ctx.extra is not None and (isinstance(ctx.extra, dict) and "agent_finish" in ctx.extra):
285
274
  self.window.controller.agent.llama.on_finish(ctx) # evaluate response and continue if needed
286
275
 
287
276
  def end(
@@ -139,9 +139,8 @@ class Evaluation:
139
139
  for ctx in history:
140
140
  if self.is_input(ctx): # ensure ctx is input
141
141
  if not use_prev and "agent_evaluate" in ctx.extra: # exclude evaluation inputs
142
- continue
143
- if ctx.input:
144
- last_input = ctx.input
142
+ continue
143
+ last_input = ctx.input
145
144
  return last_input
146
145
 
147
146
  def is_input(self, ctx: CtxItem) -> bool:
@@ -151,7 +150,7 @@ class Evaluation:
151
150
  :param ctx: context item
152
151
  :return: True if input, False otherwise
153
152
  """
154
- return ctx.extra is not None and "agent_input" in ctx.extra
153
+ return ctx.extra is not None and "agent_input" in ctx.extra and ctx.input
155
154
 
156
155
  def is_output(self, ctx: CtxItem) -> bool:
157
156
  """
@@ -174,9 +173,8 @@ class Evaluation:
174
173
  task = ""
175
174
  for ctx in history:
176
175
  if self.is_input(ctx):
177
- if ctx.input:
178
- task = ctx.input
179
- break
176
+ task = ctx.input
177
+ break
180
178
  return task
181
179
 
182
180
  def get_final_response(self, history: List[CtxItem]) -> str:
@@ -187,14 +185,16 @@ class Evaluation:
187
185
  :return: final response from agent
188
186
  """
189
187
  outputs = []
188
+ i = 0
190
189
  for ctx in history:
191
- # if next input then clear outputs - use only output after last user input
192
- if self.is_input(ctx):
190
+ # if next input (but not last) then clear outputs - use only output after last user input
191
+ if self.is_input(ctx) and i < len(history) - 1:
193
192
  outputs.clear()
194
193
 
195
194
  if self.is_output(ctx):
196
195
  if ctx.output:
197
196
  outputs.append(ctx.output)
197
+ i += 1
198
198
 
199
199
  return "\n\n".join(outputs) if outputs else ""
200
200
 
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.14 13:00:00 #
9
+ # Updated Date: 2025.08.21 07:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import asyncio
@@ -39,6 +39,7 @@ class Runner:
39
39
  APPEND_SYSTEM_PROMPT_TO_MSG = [
40
40
  "react", # llama-index
41
41
  ]
42
+ ADDITIONAL_CONTEXT_PREFIX = "ADDITIONAL CONTEXT:"
42
43
 
43
44
  def __init__(self, window=None):
44
45
  """
@@ -76,9 +77,13 @@ class Runner:
76
77
  return True # abort if stopped
77
78
 
78
79
  agent_id = extra.get("agent_provider", "openai")
79
- verbose = self.window.core.config.get("agent.llama.verbose", False)
80
+ verbose = self.is_verbose()
80
81
 
81
82
  try:
83
+ # first, check if agent exists
84
+ if not self.window.core.agents.provider.has(agent_id):
85
+ raise Exception(f"Agent not found: {agent_id}")
86
+
82
87
  # prepare input ctx
83
88
  ctx = context.ctx
84
89
  ctx.extra["agent_input"] = True # mark as user input
@@ -102,8 +107,22 @@ class Runner:
102
107
  vector_store_idx = preset.idx
103
108
 
104
109
  # tools
105
- self.window.core.agents.tools.context = context
106
- self.window.core.agents.tools.agent_idx = vector_store_idx
110
+ agent_tools = self.window.core.agents.tools
111
+ agent_tools.set_context(context)
112
+ agent_tools.set_idx(vector_store_idx)
113
+
114
+ tools = agent_tools.prepare(context, extra, force=True)
115
+ function_tools = agent_tools.get_function_tools(ctx, extra, force=True)
116
+ plugin_tools = agent_tools.get_plugin_tools(context, extra, force=True)
117
+ plugin_specs = agent_tools.get_plugin_specs(context, extra, force=True)
118
+ retriever_tool = agent_tools.get_retriever_tool(context, extra)
119
+
120
+ # disable tools if cmd is not enabled
121
+ if not is_cmd:
122
+ function_tools = []
123
+ plugin_tools = []
124
+ plugin_specs = []
125
+ tools = []
107
126
 
108
127
  # --- ADDITIONAL CONTEXT ---
109
128
  # append additional context from RAG if available
@@ -118,24 +137,11 @@ class Runner:
118
137
  if ctx.hidden_input is None:
119
138
  ctx.hidden_input = ""
120
139
  if not ctx.hidden_input: # may be not empty (appended before from attachments)
121
- to_append = "ADDITIONAL CONTEXT:"
140
+ to_append = self.ADDITIONAL_CONTEXT_PREFIX
122
141
  ctx.hidden_input += to_append
123
142
  to_append += "\n" + ad_context
124
143
  ctx.hidden_input += to_append
125
144
  prompt += "\n\n" + to_append
126
-
127
- tools = self.window.core.agents.tools.prepare(context, extra, force=True)
128
- function_tools = self.window.core.agents.tools.get_function_tools(ctx, extra, force=True)
129
- plugin_tools = self.window.core.agents.tools.get_plugin_tools(context, extra, force=True)
130
- plugin_specs = self.window.core.agents.tools.get_plugin_specs(context, extra, force=True)
131
- retriever_tool = self.window.core.agents.tools.get_retriever_tool(context, extra)
132
-
133
- # disable tools if cmd is not enabled
134
- if not is_cmd:
135
- function_tools = []
136
- plugin_tools = []
137
- plugin_specs = []
138
- tools = []
139
145
 
140
146
  # append system prompt
141
147
  if agent_id in self.APPEND_SYSTEM_PROMPT_TO_MSG:
@@ -164,14 +170,11 @@ class Runner:
164
170
  "preset": context.preset if context else None,
165
171
  }
166
172
 
167
- if self.window.core.agents.provider.has(agent_id):
168
- provider = self.window.core.agents.provider.get(agent_id)
169
- agent = provider.get_agent(self.window, agent_kwargs)
170
- agent_run = provider.run
171
- if verbose:
172
- print("Using Agent: " + str(agent_id) + ", model: " + str(model.id))
173
- else:
174
- raise Exception("Agent not found: " + str(agent_id))
173
+ provider = self.window.core.agents.provider.get(agent_id)
174
+ agent = provider.get_agent(self.window, agent_kwargs)
175
+ agent_run = provider.run
176
+ if verbose:
177
+ print(f"Using Agent: {agent_id}, model: {model.id}")
175
178
 
176
179
  # run agent
177
180
  mode = provider.get_mode()
@@ -200,8 +203,7 @@ class Runner:
200
203
  return asyncio.run(self.openai_workflow.run(**kwargs))
201
204
 
202
205
  except Exception as e:
203
- print("Error in agent runner:", e)
204
- self.window.core.debug.error(e)
206
+ self.window.core.debug.log(e)
205
207
  self.last_error = e
206
208
  return False
207
209
 
@@ -223,9 +225,13 @@ class Runner:
223
225
  return True # abort if stopped
224
226
 
225
227
  agent_id = extra.get("agent_provider", "openai")
226
- verbose = self.window.core.config.get("agent.llama.verbose", False)
228
+ verbose = self.is_verbose()
227
229
 
228
230
  try:
231
+ # first, check if agent exists
232
+ if not self.window.core.agents.provider.has(agent_id):
233
+ raise Exception(f"Agent not found: {agent_id}")
234
+
229
235
  # prepare input ctx
230
236
  ctx = context.ctx
231
237
  ctx.extra["agent_input"] = True # mark as user input
@@ -244,13 +250,14 @@ class Runner:
244
250
  workdir = self.window.core.config.get_workdir_prefix()
245
251
 
246
252
  # tools
247
- self.window.core.agents.tools.context = context
248
- self.window.core.agents.tools.agent_idx = vector_store_idx
253
+ agent_tools = self.window.core.agents.tools
254
+ agent_tools.set_context(context)
255
+ agent_tools.set_idx(vector_store_idx)
249
256
 
250
257
  if "agent_tools" in extra:
251
258
  tools = extra["agent_tools"] # use tools from extra if provided
252
259
  else:
253
- tools = self.window.core.agents.tools.prepare(context, extra, force=True)
260
+ tools = agent_tools.prepare(context, extra, force=True)
254
261
  if not is_cmd:
255
262
  tools = [] # disable tools if cmd is not enabled, force agent tools
256
263
 
@@ -282,13 +289,10 @@ class Runner:
282
289
  "preset": context.preset if context else None,
283
290
  }
284
291
 
285
- if self.window.core.agents.provider.has(agent_id):
286
- provider = self.window.core.agents.provider.get(agent_id)
287
- agent = provider.get_agent(self.window, agent_kwargs)
288
- if verbose:
289
- print("Using Agent: " + str(agent_id) + ", model: " + str(model.id))
290
- else:
291
- raise Exception("Agent not found: " + str(agent_id))
292
+ provider = self.window.core.agents.provider.get(agent_id)
293
+ agent = provider.get_agent(self.window, agent_kwargs)
294
+ if verbose:
295
+ print(f"Using Agent: {agent_id}, model: {model.id}")
292
296
 
293
297
  # run agent and return result
294
298
  mode = provider.get_mode()
@@ -304,12 +308,19 @@ class Runner:
304
308
  }
305
309
  # TODO: add support for other modes
306
310
  if mode == AGENT_MODE_WORKFLOW:
307
- return asyncio.run(self.llama_workflow.run_once(**kwargs))
311
+ return asyncio.run(self.llama_workflow.run_once(**kwargs)) # return CtxItem
308
312
 
309
313
  except Exception as e:
310
- self.window.core.debug.error(e)
314
+ self.window.core.debug.log(e)
311
315
  self.last_error = e
312
- return None
316
+
317
+ def is_verbose(self) -> bool:
318
+ """
319
+ Check if verbose mode is enabled
320
+
321
+ :return: True if verbose mode is enabled
322
+ """
323
+ return self.window.core.config.get("agent.llama.verbose", False)
313
324
 
314
325
  def get_error(self) -> Optional[Exception]:
315
326
  """
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.07.30 00:00:00 #
9
+ # Updated Date: 2025.08.21 07:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import json
@@ -21,17 +21,16 @@ from llama_index.core.tools import BaseTool, FunctionTool, QueryEngineTool, Tool
21
21
 
22
22
  from pygpt_net.core.bridge.context import BridgeContext
23
23
  from pygpt_net.core.events import Event
24
+ from pygpt_net.core.types import (
25
+ TOOL_QUERY_ENGINE_NAME,
26
+ TOOL_QUERY_ENGINE_DESCRIPTION,
27
+ TOOL_QUERY_ENGINE_SPEC,
28
+ )
24
29
  from pygpt_net.item.ctx import CtxItem
25
30
 
26
31
 
27
32
  class Tools:
28
33
 
29
- QUERY_ENGINE_TOOL_NAME = "rag_get_context"
30
- QUERY_ENGINE_TOOL_DESCRIPTION = "Get additional context for provided question. Use this whenever you need additional context to provide an answer. "
31
- QUERY_ENGINE_TOOL_SPEC = ("**"+QUERY_ENGINE_TOOL_NAME+"**: "
32
- + QUERY_ENGINE_TOOL_DESCRIPTION +
33
- "available params: {'query': {'type': 'string', 'description': 'query string'}}, required: [query]")
34
-
35
34
  def __init__(self, window=None):
36
35
  """
37
36
  Agent tools
@@ -97,7 +96,7 @@ class Tools:
97
96
 
98
97
  # add query engine tool if idx is provided
99
98
  idx = extra.get("agent_idx", None)
100
- if idx is not None and idx != "_":
99
+ if self.window.core.idx.is_valid(idx):
101
100
  llm, embed_model = self.window.core.idx.llm.get_service_context(model=context.model)
102
101
  index = self.window.core.idx.storage.get(idx, llm, embed_model) # get index
103
102
  if index is not None:
@@ -106,8 +105,8 @@ class Tools:
106
105
  QueryEngineTool(
107
106
  query_engine=query_engine,
108
107
  metadata=ToolMetadata(
109
- name=self.QUERY_ENGINE_TOOL_NAME,
110
- description=self.QUERY_ENGINE_TOOL_DESCRIPTION,
108
+ name=TOOL_QUERY_ENGINE_NAME,
109
+ description=TOOL_QUERY_ENGINE_DESCRIPTION,
111
110
  ),
112
111
  ),
113
112
  ]
@@ -127,7 +126,7 @@ class Tools:
127
126
  """
128
127
  async def run_function(run_ctx: RunContextWrapper[Any], args: str) -> str:
129
128
  name = run_ctx.tool_name
130
- print("[Plugin] Tool call: " + name + " with args: " + str(args))
129
+ print(f"[Plugin] Tool call: {name} with args: {args}")
131
130
  cmd = {
132
131
  "cmd": name,
133
132
  "params": json.loads(args) # args should be a JSON string
@@ -140,9 +139,9 @@ class Tools:
140
139
  "description": "The query string to search in the index."
141
140
  }
142
141
  }, "additionalProperties": False}
143
- description = self.QUERY_ENGINE_TOOL_DESCRIPTION + f" Index: {idx}"
142
+ description = TOOL_QUERY_ENGINE_DESCRIPTION + f" Index: {idx}"
144
143
  return OpenAIFunctionTool(
145
- name=self.QUERY_ENGINE_TOOL_NAME,
144
+ name=TOOL_QUERY_ENGINE_NAME,
146
145
  description=description,
147
146
  params_json_schema=schema,
148
147
  on_invoke_tool=run_function,
@@ -153,14 +152,14 @@ class Tools:
153
152
  ctx: CtxItem,
154
153
  verbose: bool = False,
155
154
  force: bool = False
156
- ) -> list:
155
+ ) -> List[BaseTool]:
157
156
  """
158
157
  Parse plugin functions
159
158
 
160
159
  :param ctx: CtxItem
161
160
  :param verbose: verbose mode
162
161
  :param force: force to get functions even if not needed
163
- :return: List of functions
162
+ :return: List of BaseTool instances
164
163
  """
165
164
  tools = []
166
165
  functions = self.window.core.command.get_functions(force=force)
@@ -175,7 +174,7 @@ class Tools:
175
174
 
176
175
  def make_func(name, description):
177
176
  def func(**kwargs):
178
- self.log("[Plugin] Tool call: " + name + " " + str(kwargs))
177
+ self.log(f"[Plugin] Tool call: {name} {kwargs}")
179
178
  cmd = {
180
179
  "cmd": name,
181
180
  "params": kwargs,
@@ -202,7 +201,7 @@ class Tools:
202
201
  )
203
202
  tools.append(tool)
204
203
  except Exception as e:
205
- print(e)
204
+ self.window.core.debug.log(e)
206
205
  return tools
207
206
 
208
207
  def get_function_tools(
@@ -210,7 +209,7 @@ class Tools:
210
209
  ctx: CtxItem,
211
210
  verbose: bool = False,
212
211
  force: bool = False
213
- ) -> list:
212
+ ) -> List[OpenAIFunctionTool]:
214
213
  """
215
214
  Parse plugin functions and return as OpenAI FunctionTool instances
216
215
 
@@ -231,7 +230,7 @@ class Tools:
231
230
 
232
231
  async def run_function(run_ctx: RunContextWrapper[Any], args: str) -> str:
233
232
  name = run_ctx.tool_name
234
- print("[Plugin] Tool call: " + name + " with args: " + str(args))
233
+ print(f"[Plugin] Tool call: {name} with args: {args}")
235
234
  cmd = {
236
235
  "cmd": name,
237
236
  "params": json.loads(args) # args should be a JSON string
@@ -268,10 +267,10 @@ class Tools:
268
267
  )
269
268
  tools.append(tool)
270
269
  except Exception as e:
271
- print(e)
270
+ self.window.core.debug.log(e)
272
271
 
273
272
  # append query engine tool if idx is provided
274
- if self.agent_idx is not None and self.agent_idx != "_":
273
+ if self.window.core.idx.is_valid(self.agent_idx):
275
274
  tools.append(self.get_openai_retriever_tool(self.agent_idx))
276
275
 
277
276
  return tools
@@ -282,7 +281,7 @@ class Tools:
282
281
  extra: Dict[str, Any],
283
282
  verbose: bool = False,
284
283
  force: bool = False
285
- ) -> dict:
284
+ ) -> Dict[str, BaseTool]:
286
285
  """
287
286
  Parse plugin functions
288
287
 
@@ -290,6 +289,7 @@ class Tools:
290
289
  :param extra: extra data
291
290
  :param verbose: verbose mode
292
291
  :param force: force to get functions even if not needed
292
+ :return: Dictionary of tool names and BaseTool instances
293
293
  """
294
294
  tools = {}
295
295
  functions = self.window.core.command.get_functions(force=force)
@@ -303,7 +303,7 @@ class Tools:
303
303
 
304
304
  def make_func(name, description):
305
305
  def func(**kwargs):
306
- self.log("[Plugin] Tool call: " + name + " " + str(kwargs))
306
+ self.log(f"[Plugin] Tool call: {name} {kwargs}")
307
307
  cmd = {
308
308
  "cmd": name,
309
309
  "params": kwargs,
@@ -321,10 +321,10 @@ class Tools:
321
321
  func = make_func(name, description)
322
322
  tools[name] = func
323
323
  except Exception as e:
324
- print(e)
324
+ self.window.core.debug.log(e)
325
325
 
326
326
  # add query engine tool if idx is provided
327
- if self.agent_idx is not None and self.agent_idx != "_":
327
+ if self.window.core.idx.is_valid(self.agent_idx):
328
328
  extra = {
329
329
  "agent_idx": self.agent_idx, # agent index for query engine tool
330
330
  }
@@ -344,7 +344,7 @@ class Tools:
344
344
  extra: Dict[str, Any],
345
345
  verbose: bool = False,
346
346
  force: bool = False
347
- ) -> list:
347
+ ) -> List[str]:
348
348
  """
349
349
  Parse plugin functions
350
350
 
@@ -352,13 +352,14 @@ class Tools:
352
352
  :param extra: extra data
353
353
  :param verbose: verbose mode
354
354
  :param force: force to get functions even if not needed
355
+ :return: List of tool specifications as strings
355
356
  """
356
357
  specs = []
357
358
  functions = self.window.core.command.get_functions(force=force)
358
359
 
359
360
  # add query engine tool spec if idx is provided
360
- if self.agent_idx is not None and self.agent_idx != "_":
361
- specs.append(self.QUERY_ENGINE_TOOL_SPEC)
361
+ if self.window.core.idx.is_valid(self.agent_idx):
362
+ specs.append(TOOL_QUERY_ENGINE_SPEC)
362
363
 
363
364
  for func in functions:
364
365
  try:
@@ -371,7 +372,7 @@ class Tools:
371
372
  f"**{name}**: {description}, available params: {schema.get('properties', {})}, required: {schema.get('required', [])}\n"
372
373
  )
373
374
  except Exception as e:
374
- print(e)
375
+ self.window.core.debug.log(e)
375
376
  return specs
376
377
 
377
378
  def tool_exec(self, cmd: str, params: Dict[str, Any]) -> str:
@@ -382,22 +383,23 @@ class Tools:
382
383
  :param params: command parameters
383
384
  :return: command output
384
385
  """
385
- print("[Plugin] Tool call: " + cmd + " " + str(params))
386
+ print(f"[Plugin] Tool call: {cmd}, {params}")
387
+
386
388
  # special case for query engine tool
387
- if cmd == self.QUERY_ENGINE_TOOL_NAME:
389
+ if cmd == TOOL_QUERY_ENGINE_NAME:
388
390
  if "query" not in params:
389
391
  return "Query parameter is required for query_engine tool."
390
392
  if self.context is None:
391
393
  return "Context is not set for query_engine tool."
392
- if self.agent_idx is None or self.agent_idx == "_":
394
+ if not self.window.core.idx.is_valid(self.agent_idx):
393
395
  return "Agent index is not set for query_engine tool."
394
396
  llm, embed_model = self.window.core.idx.llm.get_service_context(model=self.context.model)
395
397
  index = self.window.core.idx.storage.get(self.agent_idx, llm, embed_model) # get index
396
398
  if index is not None:
397
399
  query_engine = index.as_query_engine(similarity_top_k=3)
398
400
  response = query_engine.query(params["query"])
399
- print("[Plugin] Query engine response: " + str(response))
400
- self.log("[Plugin] Query engine response: " + str(response))
401
+ print(f"[Plugin] Query engine response: {response}")
402
+ self.log(f"[Plugin] Query engine response: {response}")
401
403
  return str(response)
402
404
  else:
403
405
  return "Index not found for query_engine tool."
@@ -428,12 +430,13 @@ class Tools:
428
430
  """
429
431
  data = []
430
432
  for output in response.sources:
431
- item = {}
432
- item["ToolOutput"] = {
433
- "content": str(output.content),
434
- "tool_name": str(output.tool_name),
435
- "raw_input": str(output.raw_input),
436
- "raw_output": str(output.raw_output),
433
+ item = {
434
+ "ToolOutput": {
435
+ "content": str(output.content),
436
+ "tool_name": str(output.tool_name),
437
+ "raw_input": str(output.raw_input),
438
+ "raw_output": str(output.raw_output),
439
+ }
437
440
  }
438
441
  data.append(item)
439
442
  return data
@@ -499,6 +502,22 @@ class Tools:
499
502
  if clear:
500
503
  self.clear_last_tool_output() # clear after use
501
504
 
505
+ def set_idx(self, agent_idx: str):
506
+ """
507
+ Set agent index for query engine tool
508
+
509
+ :param agent_idx: agent index
510
+ """
511
+ self.agent_idx = agent_idx
512
+
513
+ def set_context(self, context: BridgeContext):
514
+ """
515
+ Set context for tool execution
516
+
517
+ :param context: BridgeContext instance
518
+ """
519
+ self.context = context
520
+
502
521
  def log(self, msg: str):
503
522
  """
504
523
  Log message