pygpt-net 2.6.17.post1__py3-none-any.whl → 2.6.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. pygpt_net/CHANGELOG.txt +10 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/app.py +2 -0
  4. pygpt_net/controller/chat/output.py +2 -2
  5. pygpt_net/controller/chat/response.py +17 -28
  6. pygpt_net/core/agents/observer/evaluation.py +9 -9
  7. pygpt_net/core/agents/runner.py +53 -42
  8. pygpt_net/core/agents/tools.py +59 -40
  9. pygpt_net/core/experts/experts.py +32 -366
  10. pygpt_net/core/experts/worker.py +362 -0
  11. pygpt_net/core/idx/chat.py +49 -82
  12. pygpt_net/core/idx/context.py +10 -14
  13. pygpt_net/core/idx/idx.py +19 -8
  14. pygpt_net/core/idx/indexing.py +35 -38
  15. pygpt_net/core/idx/response.py +91 -2
  16. pygpt_net/core/idx/worker.py +3 -8
  17. pygpt_net/core/render/web/body.py +18 -1
  18. pygpt_net/core/render/web/renderer.py +28 -13
  19. pygpt_net/core/types/__init__.py +2 -1
  20. pygpt_net/core/types/tools.py +22 -0
  21. pygpt_net/data/config/config.json +4 -4
  22. pygpt_net/data/config/models.json +3 -3
  23. pygpt_net/data/config/presets/current.llama_index.json +1 -1
  24. pygpt_net/provider/agents/openai/evolve.py +1 -1
  25. pygpt_net/provider/gpt/summarizer.py +4 -0
  26. pygpt_net/ui/widget/filesystem/explorer.py +7 -9
  27. pygpt_net/utils.py +2 -0
  28. {pygpt_net-2.6.17.post1.dist-info → pygpt_net-2.6.19.dist-info}/METADATA +12 -2
  29. {pygpt_net-2.6.17.post1.dist-info → pygpt_net-2.6.19.dist-info}/RECORD +32 -30
  30. {pygpt_net-2.6.17.post1.dist-info → pygpt_net-2.6.19.dist-info}/LICENSE +0 -0
  31. {pygpt_net-2.6.17.post1.dist-info → pygpt_net-2.6.19.dist-info}/WHEEL +0 -0
  32. {pygpt_net-2.6.17.post1.dist-info → pygpt_net-2.6.19.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,362 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.08.21 07:00:00 #
10
+ # ================================================== #
11
+
12
+ from typing import List, Optional
13
+
14
+ from PySide6.QtCore import QRunnable, QObject, Signal, Slot
15
+ from llama_index.core.tools import QueryEngineTool
16
+
17
+ from pygpt_net.core.types import (
18
+ MODE_EXPERT,
19
+ TOOL_EXPERT_CALL_NAME,
20
+ )
21
+ from pygpt_net.core.bridge.context import BridgeContext
22
+ from pygpt_net.core.events import Event, KernelEvent, RenderEvent
23
+ from pygpt_net.item.ctx import CtxItem
24
+
25
+
26
+ class WorkerSignals(QObject):
27
+ """Signals for worker to communicate with main thread."""
28
+ finished = Signal() # when worker is finished
29
+ response = Signal(object, str) # when worker has response
30
+ error = Signal(str) # when worker has error
31
+ event = Signal(object) # when worker has event to dispatch
32
+ output = Signal(object, str) # when worker has output to handle
33
+ lock_input = Signal() # when worker locks input for UI
34
+ cmd = Signal(object, object, str, str, str) # when worker has command to handle
35
+
36
+
37
+ class ExpertWorker(QRunnable):
38
+ """Worker for handling expert calls in a separate thread."""
39
+
40
+ def __init__(
41
+ self,
42
+ window,
43
+ master_ctx: CtxItem,
44
+ expert_id: str,
45
+ query: str
46
+ ):
47
+ super().__init__()
48
+ self.window = window
49
+ self.master_ctx = master_ctx
50
+ self.expert_id = expert_id
51
+ self.query = query
52
+ self.signals = WorkerSignals()
53
+
54
+ @Slot()
55
+ def run(self):
56
+ master_ctx = self.master_ctx
57
+ expert_id = self.expert_id
58
+ query = self.query
59
+
60
+ try:
61
+ # get or create children (slave) meta
62
+ slave = self.window.core.ctx.get_or_create_slave_meta(master_ctx, expert_id)
63
+ expert = self.window.core.experts.get_expert(expert_id) # preset
64
+ reply = True
65
+ hidden = False
66
+ internal = False
67
+
68
+ if self.window.core.experts.agent_enabled(): # hide in agent mode
69
+ internal = False
70
+ hidden = True
71
+
72
+ mode = self.window.core.config.get("mode")
73
+ base_mode = mode
74
+ model = expert.model
75
+ expert_name = expert.name
76
+ ai_name = ""
77
+ sys_prompt = expert.prompt
78
+ model_data = self.window.core.models.get(model)
79
+
80
+ files = []
81
+ file_ids = []
82
+ functions = []
83
+ tools_outputs = []
84
+
85
+ # from current config
86
+ max_tokens = self.window.core.config.get('max_output_tokens')
87
+ stream_mode = self.window.core.config.get('stream')
88
+ verbose = self.window.core.config.get('agent.llama.verbose')
89
+ use_agent = self.window.core.config.get('experts.use_agent', False)
90
+ db_idx = expert.idx # get idx from expert preset
91
+
92
+ mode = MODE_EXPERT # force expert mode, mode will change in bridge
93
+
94
+ # create slave item
95
+ ctx = CtxItem()
96
+ ctx.meta = slave # use slave-meta
97
+ ctx.internal = internal
98
+ ctx.hidden = hidden
99
+ ctx.current = True # mark as current context item
100
+ ctx.mode = mode # store current selected mode (not inline changed)
101
+ ctx.model = model # store model list key, not real model id
102
+ ctx.set_input(query, str(ai_name))
103
+ ctx.set_output(None, expert_name)
104
+ ctx.sub_call = True # mark as sub-call
105
+ ctx.pid = master_ctx.pid # copy PID from parent to allow reply
106
+
107
+ # render: begin
108
+ event = RenderEvent(RenderEvent.BEGIN, {
109
+ "meta": ctx.meta,
110
+ "ctx": ctx,
111
+ "stream": stream_mode,
112
+ })
113
+ self.signals.event.emit(event) # dispatch render event
114
+ self.window.core.ctx.provider.append_item(slave, ctx) # to slave meta
115
+
116
+ # build sys prompt
117
+ sys_prompt_raw = sys_prompt # store raw prompt
118
+ event = Event(Event.PRE_PROMPT, {
119
+ 'mode': mode,
120
+ 'value': sys_prompt,
121
+ 'is_expert': True,
122
+ })
123
+ self.signals.event.emit(event) # dispatch pre-prompt event
124
+ sys_prompt = event.data['value']
125
+ sys_prompt = self.window.core.prompt.prepare_sys_prompt(
126
+ mode,
127
+ model_data,
128
+ sys_prompt,
129
+ ctx,
130
+ reply,
131
+ internal,
132
+ is_expert=True, # mark as expert, blocks expert prompt append in plugin
133
+ )
134
+
135
+ # index to use
136
+ use_index = False
137
+ if self.window.core.idx.is_valid(db_idx):
138
+ use_index = True
139
+ self.window.core.experts.last_idx = db_idx # store last index used in call
140
+ else:
141
+ self.window.core.experts.last_idx = None
142
+ if use_index:
143
+ index, llm = self.window.core.idx.chat.get_index(db_idx, model_data, stream=False)
144
+ else:
145
+ llm = self.window.core.idx.llm.get(model_data, stream=False)
146
+
147
+ history = self.window.core.ctx.all(
148
+ meta_id=slave.id
149
+ ) # get history for slave ctx, not master ctx
150
+
151
+ if use_agent:
152
+ # call the agent (planner) with tools and index
153
+ ctx.agent_call = True # directly return tool call response
154
+ ctx.use_agent_final_response = True # use agent final response as output
155
+ bridge_context = BridgeContext(
156
+ ctx=ctx,
157
+ history=history,
158
+ mode=mode,
159
+ parent_mode=base_mode,
160
+ model=model_data,
161
+ system_prompt=sys_prompt,
162
+ system_prompt_raw=sys_prompt_raw,
163
+ prompt=query,
164
+ stream=False,
165
+ attachments=files,
166
+ file_ids=file_ids,
167
+ assistant_id=self.window.core.config.get('assistant'),
168
+ idx=db_idx,
169
+ idx_mode=self.window.core.config.get('llama.idx.mode'),
170
+ external_functions=functions,
171
+ tools_outputs=tools_outputs,
172
+ max_tokens=max_tokens,
173
+ is_expert_call=True, # mark as expert call
174
+ preset=expert,
175
+ )
176
+ extra = {}
177
+ if use_index:
178
+ extra["agent_idx"] = db_idx
179
+
180
+ tools = self.window.core.agents.tools.prepare(
181
+ bridge_context, extra, verbose=False, force=True)
182
+
183
+ # remove expert_call tool from tools
184
+ for tool in list(tools):
185
+ if tool.metadata.name == TOOL_EXPERT_CALL_NAME:
186
+ tools.remove(tool)
187
+
188
+ result = self.call_agent(
189
+ context=bridge_context,
190
+ tools=tools,
191
+ ctx=ctx,
192
+ query=query,
193
+ llm=llm,
194
+ system_prompt=sys_prompt,
195
+ verbose=verbose,
196
+ )
197
+ ctx.reply = False # reset reply flag, we handle reply here
198
+
199
+ if not result: # abort if bridge call failed
200
+ self.signals.finished.emit()
201
+ return
202
+ else:
203
+ # native func call
204
+ if self.window.core.command.is_native_enabled(force=False, model=model):
205
+
206
+ # get native functions, without expert_call here
207
+ functions = self.window.core.command.get_functions(master_ctx.id)
208
+
209
+ # append retrieval tool if index is selected
210
+ if use_index:
211
+ retriever_tool = self.window.core.experts.get_retriever_tool()
212
+ func_list = self.window.core.command.cmds_to_functions([retriever_tool])
213
+ functions.append(func_list[0]) # append only first function
214
+
215
+ # call bridge
216
+ bridge_context = BridgeContext(
217
+ ctx=ctx,
218
+ history=history,
219
+ mode=mode,
220
+ parent_mode=base_mode,
221
+ model=model_data,
222
+ system_prompt=sys_prompt,
223
+ system_prompt_raw=sys_prompt_raw,
224
+ prompt=query,
225
+ stream=False,
226
+ attachments=files,
227
+ file_ids=file_ids,
228
+ assistant_id=self.window.core.config.get('assistant'),
229
+ idx=db_idx,
230
+ idx_mode=self.window.core.config.get('llama.idx.mode'),
231
+ external_functions=functions,
232
+ tools_outputs=tools_outputs,
233
+ max_tokens=max_tokens,
234
+ is_expert_call=True, # mark as expert call
235
+ preset=expert,
236
+ force_sync=True, # force sync call, no async bridge call
237
+ request=True, # use normal request instead of quick call
238
+ )
239
+
240
+ self.signals.lock_input.emit() # emit lock input signal
241
+ event = KernelEvent(KernelEvent.CALL, {
242
+ 'context': bridge_context, # call using slave ctx history
243
+ 'extra': {},
244
+ })
245
+ self.window.dispatch(event)
246
+ result = event.data.get("response")
247
+ # result: <tool>{"cmd": "read_file", "params": {"path": ["xxxx.txt"]}}</tool>
248
+ # ctx:
249
+ # input: please read the file xxx.txt
250
+ # output: <tool>cmd read</tool>
251
+ if not result and not ctx.tool_calls: # abort if bridge call failed
252
+ self.signals.finished.emit()
253
+ return
254
+
255
+ # handle output
256
+ ctx.current = False # reset current state
257
+ ctx.output = result # store expert output in their context
258
+
259
+ self.window.core.ctx.update_item(ctx)
260
+
261
+ ctx.from_previous() # append previous result if exists
262
+ ctx.clear_reply() # reset results
263
+
264
+ if not use_agent:
265
+ ctx.sub_tool_call = True
266
+ self.signals.cmd.emit(ctx, master_ctx, expert_id, expert_name, result) # emit cmd signal
267
+ # tool call here and reply to window, from <tool></tool>
268
+ return
269
+
270
+ # if command to execute then end here, and reply is returned to reply() above from stack, and ctx.reply = TRUE here
271
+ ctx.from_previous() # append previous result again before save
272
+ self.window.core.ctx.update_item(ctx) # update ctx in DB
273
+
274
+ # if commands reply after bridge call, then stop (already handled in sync dispatcher)
275
+ if ctx.reply:
276
+ self.signals.finished.emit()
277
+ return
278
+
279
+ # make copy of ctx for reply, and change input name to expert name
280
+ reply_ctx = CtxItem()
281
+ reply_ctx.from_dict(ctx.to_dict())
282
+ reply_ctx.meta = master_ctx.meta
283
+
284
+ # assign expert output
285
+ reply_ctx.output = result
286
+ reply_ctx.input_name = expert_name
287
+ reply_ctx.output_name = ""
288
+ reply_ctx.cmds = [] # clear cmds
289
+ reply_ctx.sub_call = True # this flag is not copied in to_dict
290
+
291
+ # reply to main thread
292
+
293
+ # send to reply()
294
+ # input: something (no tool results here)
295
+ # output: ... (call the master)
296
+ self.signals.response.emit(reply_ctx, str(expert_id)) # emit response signal
297
+
298
+ except Exception as e:
299
+ self.window.core.debug.log(e)
300
+ self.signals.error.emit(str(e))
301
+
302
+ finally:
303
+ self.signals.finished.emit()
304
+ self.cleanup()
305
+
306
+ def cleanup(self):
307
+ """Cleanup resources after worker execution."""
308
+ sig = self.signals
309
+ self.signals = None
310
+ if sig is not None:
311
+ try:
312
+ sig.deleteLater()
313
+ except RuntimeError:
314
+ pass
315
+
316
+ def call_agent(
317
+ self,
318
+ context: BridgeContext,
319
+ tools: Optional[List[QueryEngineTool]] = None,
320
+ ctx: Optional[CtxItem] = None,
321
+ query: str = "",
322
+ llm=None,
323
+ system_prompt: str = "",
324
+ verbose: bool = False,
325
+
326
+ ) -> str:
327
+ """
328
+ Call agent with tools and index
329
+
330
+ :param context: Bridge context
331
+ :param tools: Tools
332
+ :param ctx: CtxItem
333
+ :param query: Input prompt
334
+ :param llm: LLM provider
335
+ :param system_prompt: System prompt to use for agent
336
+ :param verbose: Verbose mode, default is False
337
+ :return: Response from agent as string
338
+ """
339
+ history = self.window.core.agents.memory.prepare(context)
340
+ bridge_context = BridgeContext(
341
+ ctx=ctx,
342
+ system_prompt=system_prompt,
343
+ model=context.model,
344
+ prompt=query,
345
+ stream=False,
346
+ is_expert_call=True, # mark as expert call
347
+ )
348
+ extra = {
349
+ "agent_provider": "react", # use react workflow provider
350
+ "agent_idx": context.idx, # index to use
351
+ "agent_tools": tools, # tools to use
352
+ "agent_history": history, # already prepared history
353
+ }
354
+ response_ctx = self.window.core.agents.runner.call_once(
355
+ context=bridge_context,
356
+ extra=extra,
357
+ signals=None,
358
+ )
359
+ if response_ctx:
360
+ return str(response_ctx.output)
361
+ else:
362
+ return "No response from expert."
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.14 01:00:00 #
9
+ # Updated Date: 2025.08.21 07:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import json
@@ -21,15 +21,17 @@ from pygpt_net.core.types import (
21
21
  MODE_CHAT,
22
22
  MODE_AGENT_LLAMA,
23
23
  MODE_AGENT_OPENAI,
24
+ TOOL_QUERY_ENGINE_NAME,
25
+ TOOL_QUERY_ENGINE_DESCRIPTION,
24
26
  )
25
27
  from pygpt_net.core.bridge.worker import BridgeSignals
26
28
  from pygpt_net.core.bridge.context import BridgeContext
27
29
  from pygpt_net.item.model import ModelItem
28
30
  from pygpt_net.item.ctx import CtxItem
31
+
29
32
  from .context import Context
30
33
  from .response import Response
31
34
 
32
-
33
35
  class Chat:
34
36
  def __init__(self, window=None, storage=None):
35
37
  """
@@ -122,11 +124,7 @@ class Chat:
122
124
  raise Exception("Model config not provided")
123
125
 
124
126
  self.log("Query index...")
125
- self.log("Idx: {}, query: {}, model: {}".format(
126
- idx,
127
- query,
128
- model.id,
129
- ))
127
+ self.log(f"Idx: {idx}, query: {query}, model: {model.id}")
130
128
 
131
129
  index, llm = self.get_index(idx, model, stream=stream)
132
130
  input_tokens = self.window.core.tokens.from_llama_messages(
@@ -137,7 +135,7 @@ class Chat:
137
135
  # query index
138
136
  tpl = self.get_custom_prompt(system_prompt)
139
137
  if tpl is not None:
140
- self.log("Query index with custom prompt: {}...".format(system_prompt))
138
+ self.log(f"Query index with custom prompt: {system_prompt}...")
141
139
  response = index.as_query_engine(
142
140
  llm=llm,
143
141
  streaming=stream,
@@ -189,16 +187,13 @@ class Chat:
189
187
  verbose = self.window.core.config.get("log.llama", False)
190
188
 
191
189
  self.log("Retrieval...")
192
- self.log("Idx: {}, retrieve only: {}".format(
193
- idx,
194
- query,
195
- ))
190
+ self.log(f"Idx: {idx}, retrieve only: {query}")
196
191
 
197
192
  index, llm = self.get_index(idx, model, stream=stream)
198
193
  retriever = index.as_retriever()
199
194
  nodes = retriever.retrieve(query)
200
195
  outputs = []
201
- self.log("Retrieved {} nodes...".format(len(nodes)))
196
+ self.log(f"Retrieved {len(nodes)} nodes...")
202
197
  for node in nodes:
203
198
  outputs.append({
204
199
  "text": node.text,
@@ -207,7 +202,7 @@ class Chat:
207
202
  if outputs:
208
203
  response = ""
209
204
  for output in outputs:
210
- response += "**Score: {}**\n\n{}".format(output["score"], output["text"])
205
+ response += f"**Score: {output['score']}**\n\n{output['text']}"
211
206
  if output != outputs[-1]:
212
207
  response += "\n\n-------\n\n"
213
208
  ctx.set_output(response)
@@ -248,7 +243,7 @@ class Chat:
248
243
  if disable_cmd:
249
244
  cmd_enabled = False
250
245
 
251
- if idx is None or idx == "_":
246
+ if not self.window.core.idx.is_valid(idx):
252
247
  chat_mode = "simple" # do not use query engine if no index
253
248
  use_index = False
254
249
 
@@ -271,29 +266,19 @@ class Chat:
271
266
 
272
267
  # -- log ---
273
268
  self.log("Chat with index...")
274
- self.log("Idx: {idx}, "
275
- "chat_mode: {mode}, "
276
- "model: {model}, "
277
- "stream: {stream}, "
278
- "native tool calls: {tool_calls}, "
279
- "use react: {react}, "
280
- "use index: {use_index}, "
281
- "cmd enabled: {cmd_enabled}, "
282
- "num_attachments: {num_attachments}, "
283
- "additional ctx: {additional_ctx}, "
284
- "query: {query}".format(
285
- idx=idx,
286
- mode=chat_mode,
287
- model=model.id,
288
- stream=stream,
289
- tool_calls=allow_native_tool_calls,
290
- react=use_react,
291
- use_index=use_index,
292
- cmd_enabled=cmd_enabled,
293
- num_attachments=len(context.attachments) if context.attachments else 0,
294
- additional_ctx=additional_ctx,
295
- query=query,
296
- ))
269
+ self.log(
270
+ f"Idx: {idx}, "
271
+ f"chat_mode: {chat_mode}, "
272
+ f"model: {model.id}, "
273
+ f"stream: {stream}, "
274
+ f"native tool calls: {allow_native_tool_calls}, "
275
+ f"use react: {use_react}, "
276
+ f"use index: {use_index}, "
277
+ f"cmd enabled: {cmd_enabled}, "
278
+ f"num_attachments: {len(context.attachments) if context.attachments else 0}, "
279
+ f"additional ctx: {additional_ctx}, "
280
+ f"query: {query}"
281
+ )
297
282
 
298
283
  # use index only if idx is not empty, otherwise use only LLM
299
284
  index = None
@@ -424,35 +409,18 @@ class Chat:
424
409
  messages=history,
425
410
  )
426
411
 
427
- # handle response
412
+ # handle response, append output to ctx, etc.
428
413
  if response:
429
- # tools
430
- if cmd_enabled:
431
- if use_react:
432
- self.response.from_react(ctx, model, response) # TOOLS + REACT, non-stream
433
- else:
434
- if stream:
435
- if use_index:
436
- self.response.from_index_stream(ctx, model, response) # INDEX + STREAM
437
- else:
438
- self.response.from_llm_stream(ctx, model, llm, response) # LLM + STREAM
439
- else:
440
- if use_index:
441
- self.response.from_index(ctx, model, response) # TOOLS + INDEX
442
- else:
443
- self.response.from_llm(ctx, model, llm, response) # TOOLS + LLM
444
- else:
445
- # no tools
446
- if stream:
447
- if use_index:
448
- self.response.from_index_stream(ctx, model, response) # INDEX + STREAM
449
- else:
450
- self.response.from_llm_stream(ctx, model, llm, response) # LLM + STREAM
451
- else:
452
- if use_index:
453
- self.response.from_index(ctx, model, response) # INDEX
454
- else:
455
- self.response.from_llm(ctx, model, llm, response) # LLM
414
+ self.response.handle(
415
+ ctx=ctx,
416
+ model=model,
417
+ llm=llm,
418
+ response=response,
419
+ cmd_enabled=cmd_enabled,
420
+ use_react=use_react,
421
+ use_index=use_index,
422
+ stream=stream,
423
+ )
456
424
 
457
425
  # append attachment images to context
458
426
  self.context.append_images(ctx)
@@ -515,8 +483,8 @@ class Chat:
515
483
  )
516
484
  index_tool = QueryEngineTool.from_defaults(
517
485
  query_engine=query_engine,
518
- name="get_context",
519
- description="Get additional context to answer the question.",
486
+ name=TOOL_QUERY_ENGINE_NAME,
487
+ description=TOOL_QUERY_ENGINE_DESCRIPTION,
520
488
  return_direct=True, # return direct response from index
521
489
  )
522
490
  tools.append(index_tool)
@@ -577,8 +545,8 @@ class Chat:
577
545
  llm, embed_model = self.window.core.idx.llm.get_service_context(model=model, stream=False)
578
546
  tmp_id, index = self.storage.get_tmp(path, llm, embed_model) # get or create tmp index
579
547
 
580
- idx = "tmp:{}".format(path) # tmp index id
581
- self.log("Indexing to temporary in-memory index: {}...".format(idx))
548
+ idx = f"tmp:{path}" # tmp index id
549
+ self.log(f"Indexing to temporary in-memory index: {idx}...")
582
550
 
583
551
  # index file to tmp index
584
552
  files, errors = self.window.core.idx.indexing.index_files(
@@ -591,7 +559,7 @@ class Chat:
591
559
  # query tmp index
592
560
  output = None
593
561
  if len(files) > 0:
594
- self.log("Querying temporary in-memory index: {}...".format(idx))
562
+ self.log(f"Querying temporary in-memory index: {idx}...")
595
563
  response = index.as_query_engine(
596
564
  llm=llm,
597
565
  streaming=False,
@@ -601,9 +569,9 @@ class Chat:
601
569
  output = response.response
602
570
 
603
571
  # clean tmp index
604
- self.log("Removing temporary in-memory index: {} ({})...".format(idx, tmp_id))
572
+ self.log(f"Removing temporary in-memory index: {idx} ({tmp_id})...")
605
573
  self.storage.clean_tmp(tmp_id) # clean memory
606
- self.log("Returning response: {}".format(output))
574
+ self.log(f"Returning response: {output}")
607
575
  return output
608
576
 
609
577
  def query_web(
@@ -637,8 +605,8 @@ class Chat:
637
605
  llm, embed_model = self.window.core.idx.llm.get_service_context(model=model, stream=False)
638
606
  tmp_id, index = self.storage.get_tmp(id, llm, embed_model) # get or create tmp index
639
607
 
640
- idx = "tmp:{}".format(id) # tmp index id
641
- self.log("Indexing to temporary in-memory index: {}...".format(idx))
608
+ idx = f"tmp:{id}" # tmp index id
609
+ self.log(f"Indexing to temporary in-memory index: {idx}...")
642
610
 
643
611
  # index file to tmp index
644
612
  num, errors = self.window.core.idx.indexing.index_urls(
@@ -653,7 +621,7 @@ class Chat:
653
621
  # query tmp index
654
622
  output = None
655
623
  if num > 0:
656
- self.log("Querying temporary in-memory index: {}...".format(idx))
624
+ self.log(f"Querying temporary in-memory index: {idx}...")
657
625
  response = index.as_query_engine(
658
626
  llm=llm,
659
627
  streaming=False,
@@ -663,9 +631,9 @@ class Chat:
663
631
  output = response.response
664
632
 
665
633
  # clean tmp index
666
- self.log("Removing temporary in-memory index: {} ({})...".format(idx, tmp_id))
634
+ self.log(f"Removing temporary in-memory index: {idx} ({tmp_id})...")
667
635
  self.storage.clean_tmp(tmp_id) # clean memory
668
- self.log("Returning response: {}...".format(output))
636
+ self.log(f"Returning response: {output}...")
669
637
  return output
670
638
 
671
639
  def query_attachment(
@@ -705,7 +673,7 @@ class Chat:
705
673
  if response:
706
674
  output = str(response)
707
675
  if verbose:
708
- print("Found using retrieval, {} (score: {})".format(output, score))
676
+ print(f"Found using retrieval: {output} (score: {score})")
709
677
  else:
710
678
  if verbose:
711
679
  print("Not found using retrieval, trying with query engine...")
@@ -814,7 +782,6 @@ class Chat:
814
782
  :param idx: idx name (id)
815
783
  :param model: model instance
816
784
  :param stream: stream mode
817
- :return:
818
785
  """
819
786
  # check if index exists
820
787
  if not self.storage.exists(idx):
@@ -867,7 +834,7 @@ class Chat:
867
834
  :param msg: message
868
835
  """
869
836
  # disabled logging for thread safety
870
- if self.window.core.config.get("mode") in [MODE_AGENT_LLAMA, MODE_AGENT_OPENAI]:
837
+ if self.window.core.config.get("mode") in (MODE_AGENT_LLAMA, MODE_AGENT_OPENAI):
871
838
  return
872
839
  is_log = False
873
840
  if self.window.core.config.has("log.llama") \
@@ -875,5 +842,5 @@ class Chat:
875
842
  is_log = True
876
843
  self.window.core.debug.info(msg, not is_log)
877
844
  if is_log:
878
- print("[LLAMA-INDEX] {}".format(msg))
845
+ print(f"[LlamaIndex] {msg}")
879
846
  self.window.idx_logger_message.emit(msg)