pygpt-net 2.6.17.post1__py3-none-any.whl → 2.6.18__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -6,33 +6,38 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.20 09:00:00 #
9
+ # Updated Date: 2025.08.21 07:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import json
13
- from typing import Dict, List, Optional
13
+ from typing import Dict, List
14
14
 
15
- from PySide6.QtCore import QRunnable, QObject, Signal, Slot
16
- from llama_index.core.tools import QueryEngineTool
15
+ from PySide6.QtCore import Slot
17
16
 
17
+ from pygpt_net.core.experts.worker import ExpertWorker
18
18
  from pygpt_net.core.types import (
19
19
  MODE_AGENT,
20
20
  MODE_CHAT,
21
21
  MODE_COMPLETION,
22
22
  MODE_EXPERT,
23
- MODE_LANGCHAIN,
24
23
  MODE_LLAMA_INDEX,
25
24
  MODE_VISION,
26
25
  MODE_AUDIO,
27
26
  MODE_RESEARCH,
27
+ TOOL_EXPERT_CALL_NAME,
28
+ TOOL_EXPERT_CALL_DESCRIPTION,
29
+ TOOL_EXPERT_CALL_PARAM_ID_DESCRIPTION,
30
+ TOOL_EXPERT_CALL_PARAM_QUERY_DESCRIPTION,
31
+ TOOL_QUERY_ENGINE_NAME,
32
+ TOOL_QUERY_ENGINE_DESCRIPTION,
33
+ TOOL_QUERY_ENGINE_PARAM_QUERY_DESCRIPTION,
28
34
  )
29
35
  from pygpt_net.core.bridge.context import BridgeContext
30
- from pygpt_net.core.events import Event, KernelEvent, RenderEvent
36
+ from pygpt_net.core.events import Event, KernelEvent
31
37
  from pygpt_net.item.ctx import CtxItem
32
38
  from pygpt_net.item.preset import PresetItem
33
39
  from pygpt_net.utils import trans
34
40
 
35
-
36
41
  class Experts:
37
42
  def __init__(self, window=None):
38
43
  """
@@ -50,7 +55,7 @@ class Experts:
50
55
  MODE_AUDIO,
51
56
  MODE_RESEARCH,
52
57
  ]
53
- self.allowed_cmds = ["expert_call"]
58
+ self.allowed_cmds = [TOOL_EXPERT_CALL_NAME]
54
59
  self.worker = None
55
60
  self.last_expert_id = None # last expert id used in call
56
61
  self.last_idx = None # last index used in call
@@ -172,9 +177,9 @@ class Experts:
172
177
  if k.startswith("current."): # skip current presets
173
178
  continue
174
179
  if experts[k].description.strip() == "":
175
- experts_list.append(" - " + str(k) + ": " + str(experts[k].name))
180
+ experts_list.append(f" - {k}: {experts[k].name}")
176
181
  else:
177
- experts_list.append(" - " + str(k) + ": " + str(experts[k].name) + " (" + experts[k].description + ")")
182
+ experts_list.append(f" - {k}: {experts[k].name} ({experts[k].description})")
178
183
  return prompt.replace("{presets}", "\n".join(experts_list))
179
184
 
180
185
  def extract_calls(self, ctx: CtxItem) -> Dict[str, str]:
@@ -204,7 +209,7 @@ class Experts:
204
209
  return {}
205
210
  for item in my_commands:
206
211
  try:
207
- if item["cmd"] == "expert_call":
212
+ if item["cmd"] == TOOL_EXPERT_CALL_NAME:
208
213
  if "params" not in item:
209
214
  continue
210
215
  if "id" not in item["params"] or "query" not in item["params"]:
@@ -215,7 +220,7 @@ class Experts:
215
220
  query = item["params"]["query"]
216
221
  calls[id] = query
217
222
  except Exception as e:
218
- self.window.core.debug.error(e)
223
+ self.window.core.debug.log(e)
219
224
  return {}
220
225
  return calls
221
226
 
@@ -228,11 +233,11 @@ class Experts:
228
233
  for call in ctx.tool_calls:
229
234
  if (call["type"] == "function"
230
235
  and "function" in call
231
- and call["function"]["name"] == "get_context"):
232
- ctx.force_call = True # force call if get_context tool is used
236
+ and call["function"]["name"] == TOOL_QUERY_ENGINE_NAME):
237
+ ctx.force_call = True # force call if query engine tool is used
233
238
  ctx.cmds_before = [
234
239
  {
235
- "cmd": "get_context",
240
+ "cmd": TOOL_QUERY_ENGINE_NAME,
236
241
  "params": {
237
242
  "query": call["function"]["arguments"]["query"],
238
243
  "idx": self.last_idx,
@@ -331,7 +336,7 @@ class Experts:
331
336
  self.master_ctx = master_ctx
332
337
  expert_name = self.get_expert_name_by_id(expert_id)
333
338
  event = KernelEvent(KernelEvent.STATE_BUSY, {
334
- "msg": trans("expert.wait.status") + " ({})".format(expert_name),
339
+ "msg": f"{trans('expert.wait.status')} ({expert_name})",
335
340
  })
336
341
  self.window.dispatch(event) # dispatch busy event
337
342
  self.window.threadpool.start(self.worker)
@@ -397,7 +402,7 @@ class Experts:
397
402
 
398
403
  self.window.core.ctx.update_item(ctx) # update context in db
399
404
  self.window.update_status('...')
400
- ctx.output = "<tool>" + str(ctx.cmds) + "</tool>"
405
+ ctx.output = f"<tool>{ctx.cmds}</tool>"
401
406
  self.window.core.ctx.update_item(ctx) # update ctx in DB
402
407
  self.handle_finished()
403
408
  self.call(
@@ -463,7 +468,7 @@ class Experts:
463
468
 
464
469
  # handle error from worker
465
470
  context = BridgeContext()
466
- context.prompt = trans("expert.wait.failed") + ": " + str(error)
471
+ context.prompt = f"{trans('expert.wait.failed')}: {error}"
467
472
  extra = {
468
473
  "force": True,
469
474
  "reply": False,
@@ -478,7 +483,7 @@ class Experts:
478
483
  event = KernelEvent(KernelEvent.STATE_IDLE, {})
479
484
  self.window.dispatch(event) # dispatch idle event
480
485
 
481
- @Slot(CtxItem, str)
486
+ @Slot()
482
487
  def handle_finished(self):
483
488
  """Handle worker finished signal"""
484
489
  event = KernelEvent(KernelEvent.STATE_IDLE, {})
@@ -528,18 +533,18 @@ class Experts:
528
533
  """
529
534
  cmds = [
530
535
  {
531
- "cmd": "expert_call",
532
- "instruction": "Call the expert",
536
+ "cmd": TOOL_EXPERT_CALL_NAME,
537
+ "instruction": TOOL_EXPERT_CALL_DESCRIPTION,
533
538
  "params": [
534
539
  {
535
540
  "name": "id",
536
- "description": "expert id",
541
+ "description": TOOL_EXPERT_CALL_PARAM_ID_DESCRIPTION,
537
542
  "required": True,
538
543
  "type": "str",
539
544
  },
540
545
  {
541
546
  "name": "query",
542
- "description": "query to expert",
547
+ "description": TOOL_EXPERT_CALL_PARAM_QUERY_DESCRIPTION,
543
548
  "required": True,
544
549
  "type": "str",
545
550
  }
@@ -555,12 +560,12 @@ class Experts:
555
560
  :return: retriever tool definition
556
561
  """
557
562
  return {
558
- "cmd": "get_context",
559
- "instruction": "get additional context for a given query",
563
+ "cmd": TOOL_QUERY_ENGINE_NAME,
564
+ "instruction": TOOL_QUERY_ENGINE_DESCRIPTION,
560
565
  "params": [
561
566
  {
562
567
  "name": "query",
563
- "description": "query to retrieve additional context for",
568
+ "description": TOOL_QUERY_ENGINE_PARAM_QUERY_DESCRIPTION,
564
569
  "required": True,
565
570
  "type": "str",
566
571
  }
@@ -572,6 +577,7 @@ class Experts:
572
577
  Check if context has expert calls
573
578
 
574
579
  :param ctx: CtxItem
580
+ :return: True if expert calls found
575
581
  """
576
582
  if not ctx.sub_reply and not ctx.reply:
577
583
  mentions = self.window.core.experts.extract_calls(ctx)
@@ -581,343 +587,3 @@ class Experts:
581
587
  continue
582
588
  return True
583
589
  return False
584
-
585
-
586
- class WorkerSignals(QObject):
587
- """
588
- Signals for worker to communicate with main thread.
589
- """
590
- finished = Signal() # when worker is finished
591
- response = Signal(object, str) # when worker has response
592
- error = Signal(str) # when worker has error
593
- event = Signal(object) # when worker has event to dispatch
594
- output = Signal(object, str) # when worker has output to handle
595
- lock_input = Signal() # when worker locks input for UI
596
- cmd = Signal(object, object, str, str, str) # when worker has command to handle
597
-
598
-
599
- class ExpertWorker(QRunnable):
600
- """Worker for handling expert calls in a separate thread."""
601
- def __init__(
602
- self,
603
- window,
604
- master_ctx: CtxItem,
605
- expert_id: str,
606
- query: str
607
- ):
608
- super().__init__()
609
- self.window = window
610
- self.master_ctx = master_ctx
611
- self.expert_id = expert_id
612
- self.query = query
613
- self.signals = WorkerSignals()
614
-
615
- @Slot()
616
- def run(self):
617
- master_ctx = self.master_ctx
618
- expert_id = self.expert_id
619
- query = self.query
620
-
621
- try:
622
- # get or create children (slave) meta
623
- slave = self.window.core.ctx.get_or_create_slave_meta(master_ctx, expert_id)
624
- expert = self.window.core.experts.get_expert(expert_id) # preset
625
- reply = True
626
- hidden = False
627
- internal = False
628
-
629
- if self.window.core.experts.agent_enabled(): # hide in agent mode
630
- internal = False
631
- hidden = True
632
-
633
- mode = self.window.core.config.get("mode")
634
- base_mode = mode
635
- model = expert.model
636
- expert_name = expert.name
637
- ai_name = ""
638
- sys_prompt = expert.prompt
639
- model_data = self.window.core.models.get(model)
640
-
641
- files = []
642
- file_ids = []
643
- functions = []
644
- tools_outputs = []
645
-
646
- # from current config
647
- max_tokens = self.window.core.config.get('max_output_tokens')
648
- stream_mode = self.window.core.config.get('stream')
649
- verbose = self.window.core.config.get('agent.llama.verbose')
650
- use_agent = self.window.core.config.get('experts.use_agent', False)
651
- db_idx = expert.idx # get idx from expert preset
652
-
653
- mode = MODE_EXPERT # force expert mode, mode will change in bridge
654
-
655
- # create slave item
656
- ctx = CtxItem()
657
- ctx.meta = slave # use slave-meta
658
- ctx.internal = internal
659
- ctx.hidden = hidden
660
- ctx.current = True # mark as current context item
661
- ctx.mode = mode # store current selected mode (not inline changed)
662
- ctx.model = model # store model list key, not real model id
663
- ctx.set_input(query, str(ai_name))
664
- ctx.set_output(None, expert_name)
665
- ctx.sub_call = True # mark as sub-call
666
- ctx.pid = master_ctx.pid # copy PID from parent to allow reply
667
-
668
- # render: begin
669
- event = RenderEvent(RenderEvent.BEGIN, {
670
- "meta": ctx.meta,
671
- "ctx": ctx,
672
- "stream": stream_mode,
673
- })
674
- self.signals.event.emit(event) # dispatch render event
675
- self.window.core.ctx.provider.append_item(slave, ctx) # to slave meta
676
-
677
- # build sys prompt
678
- sys_prompt_raw = sys_prompt # store raw prompt
679
- event = Event(Event.PRE_PROMPT, {
680
- 'mode': mode,
681
- 'value': sys_prompt,
682
- 'is_expert': True,
683
- })
684
- self.signals.event.emit(event) # dispatch pre-prompt event
685
- sys_prompt = event.data['value']
686
- sys_prompt = self.window.core.prompt.prepare_sys_prompt(
687
- mode,
688
- model_data,
689
- sys_prompt,
690
- ctx,
691
- reply,
692
- internal,
693
- is_expert=True, # mark as expert, blocks expert prompt append in plugin
694
- )
695
-
696
- # index to use
697
- use_index = False
698
- if db_idx and db_idx != '_':
699
- use_index = True
700
- self.window.core.experts.last_idx = db_idx # store last index used in call
701
- else:
702
- self.window.core.experts.last_idx = None
703
- if use_index:
704
- index, llm = self.window.core.idx.chat.get_index(db_idx, model_data, stream=False)
705
- else:
706
- llm = self.window.core.idx.llm.get(model_data, stream=False)
707
-
708
- history = self.window.core.ctx.all(
709
- meta_id=slave.id
710
- ) # get history for slave ctx, not master ctx
711
-
712
- if use_agent:
713
- # call the agent (planner) with tools and index
714
- ctx.agent_call = True # directly return tool call response
715
- ctx.use_agent_final_response = True # use agent final response as output
716
- bridge_context = BridgeContext(
717
- ctx=ctx,
718
- history=history,
719
- mode=mode,
720
- parent_mode=base_mode,
721
- model=model_data,
722
- system_prompt=sys_prompt,
723
- system_prompt_raw=sys_prompt_raw,
724
- prompt=query,
725
- stream=False,
726
- attachments=files,
727
- file_ids=file_ids,
728
- assistant_id=self.window.core.config.get('assistant'),
729
- idx=db_idx,
730
- idx_mode=self.window.core.config.get('llama.idx.mode'),
731
- external_functions=functions,
732
- tools_outputs=tools_outputs,
733
- max_tokens=max_tokens,
734
- is_expert_call=True, # mark as expert call
735
- preset=expert,
736
- )
737
- extra = {}
738
- if use_index:
739
- extra["agent_idx"] = db_idx
740
-
741
- tools = self.window.core.agents.tools.prepare(
742
- bridge_context, extra, verbose=False, force=True)
743
-
744
- # remove expert_call tool from tools
745
- for tool in list(tools):
746
- if tool.metadata.name == "expert_call":
747
- tools.remove(tool)
748
-
749
- result = self.call_agent(
750
- context=bridge_context,
751
- tools=tools,
752
- ctx=ctx,
753
- query=query,
754
- llm=llm,
755
- system_prompt=sys_prompt,
756
- verbose=verbose,
757
- )
758
- ctx.reply = False # reset reply flag, we handle reply here
759
-
760
- if not result: # abort if bridge call failed
761
- self.signals.finished.emit()
762
- return
763
- else:
764
- # native func call
765
- if self.window.core.command.is_native_enabled(force=False, model=model):
766
-
767
- # get native functions, without expert_call here
768
- functions = self.window.core.command.get_functions(master_ctx.id)
769
-
770
- # append retrieval tool if index is selected
771
- if use_index:
772
- retriever_tool = self.window.core.experts.get_retriever_tool()
773
- func_list = self.window.core.command.cmds_to_functions([retriever_tool])
774
- functions.append(func_list[0]) # append only first function
775
-
776
- # call bridge
777
- bridge_context = BridgeContext(
778
- ctx=ctx,
779
- history=history,
780
- mode=mode,
781
- parent_mode=base_mode,
782
- model=model_data,
783
- system_prompt=sys_prompt,
784
- system_prompt_raw=sys_prompt_raw,
785
- prompt=query,
786
- stream=False,
787
- attachments=files,
788
- file_ids=file_ids,
789
- assistant_id=self.window.core.config.get('assistant'),
790
- idx=db_idx,
791
- idx_mode=self.window.core.config.get('llama.idx.mode'),
792
- external_functions=functions,
793
- tools_outputs=tools_outputs,
794
- max_tokens=max_tokens,
795
- is_expert_call=True, # mark as expert call
796
- preset=expert,
797
- force_sync=True, # force sync call, no async bridge call
798
- request=True, # use normal request instead of quick call
799
- )
800
-
801
- self.signals.lock_input.emit() # emit lock input signal
802
- event = KernelEvent(KernelEvent.CALL, {
803
- 'context': bridge_context, # call using slave ctx history
804
- 'extra': {},
805
- })
806
- self.window.dispatch(event)
807
- result = event.data.get("response")
808
- # result: <tool>{"cmd": "read_file", "params": {"path": ["xxxx.txt"]}}</tool>
809
- # ctx:
810
- # input: please read the file xxx.txt
811
- # output: <tool>cmd read</tool>
812
- if not result and not ctx.tool_calls: # abort if bridge call failed
813
- self.signals.finished.emit()
814
- return
815
-
816
- # handle output
817
- ctx.current = False # reset current state
818
- ctx.output = result # store expert output in their context
819
-
820
- self.window.core.ctx.update_item(ctx)
821
-
822
- ctx.from_previous() # append previous result if exists
823
- ctx.clear_reply() # reset results
824
-
825
- if not use_agent:
826
- ctx.sub_tool_call = True
827
- self.signals.cmd.emit(ctx, master_ctx, expert_id, expert_name, result) # emit cmd signal
828
- # tool call here and reply to window, from <tool></tool>
829
- return
830
-
831
- # if command to execute then end here, and reply is returned to reply() above from stack, and ctx.reply = TRUE here
832
- ctx.from_previous() # append previous result again before save
833
- self.window.core.ctx.update_item(ctx) # update ctx in DB
834
-
835
- # if commands reply after bridge call, then stop (already handled in sync dispatcher)
836
- if ctx.reply:
837
- self.signals.finished.emit()
838
- return
839
-
840
- # make copy of ctx for reply, and change input name to expert name
841
- reply_ctx = CtxItem()
842
- reply_ctx.from_dict(ctx.to_dict())
843
- reply_ctx.meta = master_ctx.meta
844
-
845
- # assign expert output
846
- reply_ctx.output = result
847
- reply_ctx.input_name = expert_name
848
- reply_ctx.output_name = ""
849
- reply_ctx.cmds = [] # clear cmds
850
- reply_ctx.sub_call = True # this flag is not copied in to_dict
851
-
852
- # reply to main thread
853
-
854
- # send to reply()
855
- # input: something (no tool results here)
856
- # output: ... (call the master)
857
- self.signals.response.emit(reply_ctx, str(expert_id)) # emit response signal
858
-
859
- except Exception as e:
860
- self.window.core.debug.log(e)
861
- self.signals.error.emit(str(e))
862
-
863
- finally:
864
- self.signals.finished.emit()
865
- self.cleanup()
866
-
867
- def cleanup(self):
868
- """Cleanup resources after worker execution."""
869
- sig = self.signals
870
- self.signals = None
871
- if sig is not None:
872
- try:
873
- sig.deleteLater()
874
- except RuntimeError:
875
- pass
876
-
877
- def call_agent(
878
- self,
879
- context: BridgeContext,
880
- tools: Optional[List[QueryEngineTool]] = None,
881
- ctx: Optional[CtxItem] = None,
882
- query: str = "",
883
- llm=None,
884
- system_prompt: str = "",
885
- verbose: bool = False,
886
-
887
- ) -> str:
888
- """
889
- Call agent with tools and index
890
-
891
- :param context: Bridge context
892
- :param tools: Tools
893
- :param ctx: CtxItem
894
- :param query: Input prompt
895
- :param llm: LLM provider
896
- :param system_prompt: System prompt to use for agent
897
- :param verbose: Verbose mode, default is False
898
- :return: True if success, False otherwise
899
- """
900
- history = self.window.core.agents.memory.prepare(context)
901
- bridge_context = BridgeContext(
902
- ctx=ctx,
903
- system_prompt=system_prompt,
904
- model=context.model,
905
- prompt=query,
906
- stream=False,
907
- is_expert_call=True, # mark as expert call
908
- )
909
- extra = {
910
- "agent_provider": "react", # use react workflow provider
911
- "agent_idx": context.idx, # index to use
912
- "agent_tools": tools, # tools to use
913
- "agent_history": history, # already prepared history
914
- }
915
- response_ctx = self.window.core.agents.runner.call_once(
916
- context=bridge_context,
917
- extra=extra,
918
- signals=None,
919
- )
920
- if response_ctx:
921
- return str(response_ctx.output)
922
- else:
923
- return "No response from expert."