pygpt-net 2.6.58__py3-none-any.whl → 2.6.60__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. pygpt_net/CHANGELOG.txt +10 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/app.py +9 -5
  4. pygpt_net/controller/__init__.py +1 -0
  5. pygpt_net/controller/presets/editor.py +442 -39
  6. pygpt_net/core/agents/custom/__init__.py +275 -0
  7. pygpt_net/core/agents/custom/debug.py +64 -0
  8. pygpt_net/core/agents/custom/factory.py +109 -0
  9. pygpt_net/core/agents/custom/graph.py +71 -0
  10. pygpt_net/core/agents/custom/llama_index/__init__.py +10 -0
  11. pygpt_net/core/agents/custom/llama_index/factory.py +89 -0
  12. pygpt_net/core/agents/custom/llama_index/router_streamer.py +106 -0
  13. pygpt_net/core/agents/custom/llama_index/runner.py +529 -0
  14. pygpt_net/core/agents/custom/llama_index/stream.py +56 -0
  15. pygpt_net/core/agents/custom/llama_index/utils.py +242 -0
  16. pygpt_net/core/agents/custom/logging.py +50 -0
  17. pygpt_net/core/agents/custom/memory.py +51 -0
  18. pygpt_net/core/agents/custom/router.py +116 -0
  19. pygpt_net/core/agents/custom/router_streamer.py +187 -0
  20. pygpt_net/core/agents/custom/runner.py +454 -0
  21. pygpt_net/core/agents/custom/schema.py +125 -0
  22. pygpt_net/core/agents/custom/utils.py +181 -0
  23. pygpt_net/core/agents/provider.py +72 -7
  24. pygpt_net/core/agents/runner.py +7 -4
  25. pygpt_net/core/agents/runners/helpers.py +1 -1
  26. pygpt_net/core/agents/runners/llama_workflow.py +3 -0
  27. pygpt_net/core/agents/runners/openai_workflow.py +8 -1
  28. pygpt_net/core/filesystem/parser.py +37 -24
  29. pygpt_net/{ui/widget/builder → core/node_editor}/__init__.py +2 -2
  30. pygpt_net/core/{builder → node_editor}/graph.py +11 -218
  31. pygpt_net/core/node_editor/models.py +111 -0
  32. pygpt_net/core/node_editor/types.py +76 -0
  33. pygpt_net/core/node_editor/utils.py +17 -0
  34. pygpt_net/core/render/web/renderer.py +10 -8
  35. pygpt_net/data/config/config.json +3 -3
  36. pygpt_net/data/config/models.json +3 -3
  37. pygpt_net/data/locale/locale.en.ini +4 -4
  38. pygpt_net/data/locale/plugin.cmd_system.en.ini +68 -0
  39. pygpt_net/item/agent.py +5 -1
  40. pygpt_net/item/preset.py +19 -1
  41. pygpt_net/plugin/cmd_system/config.py +377 -1
  42. pygpt_net/plugin/cmd_system/plugin.py +52 -8
  43. pygpt_net/plugin/cmd_system/runner.py +508 -32
  44. pygpt_net/plugin/cmd_system/winapi.py +481 -0
  45. pygpt_net/plugin/cmd_system/worker.py +88 -15
  46. pygpt_net/provider/agents/base.py +33 -2
  47. pygpt_net/provider/agents/llama_index/flow_from_schema.py +92 -0
  48. pygpt_net/provider/agents/llama_index/workflow/supervisor.py +0 -0
  49. pygpt_net/provider/agents/openai/flow_from_schema.py +96 -0
  50. pygpt_net/provider/core/agent/json_file.py +11 -5
  51. pygpt_net/provider/llms/openai.py +6 -4
  52. pygpt_net/tools/agent_builder/tool.py +217 -52
  53. pygpt_net/tools/agent_builder/ui/dialogs.py +119 -24
  54. pygpt_net/tools/agent_builder/ui/list.py +37 -10
  55. pygpt_net/tools/code_interpreter/ui/html.py +2 -1
  56. pygpt_net/ui/dialog/preset.py +16 -1
  57. pygpt_net/ui/main.py +1 -1
  58. pygpt_net/{core/builder → ui/widget/node_editor}/__init__.py +2 -2
  59. pygpt_net/ui/widget/node_editor/command.py +373 -0
  60. pygpt_net/ui/widget/node_editor/editor.py +2038 -0
  61. pygpt_net/ui/widget/node_editor/item.py +492 -0
  62. pygpt_net/ui/widget/node_editor/node.py +1205 -0
  63. pygpt_net/ui/widget/node_editor/utils.py +17 -0
  64. pygpt_net/ui/widget/node_editor/view.py +247 -0
  65. pygpt_net/ui/widget/textarea/web.py +1 -1
  66. {pygpt_net-2.6.58.dist-info → pygpt_net-2.6.60.dist-info}/METADATA +135 -61
  67. {pygpt_net-2.6.58.dist-info → pygpt_net-2.6.60.dist-info}/RECORD +69 -42
  68. pygpt_net/core/agents/custom.py +0 -150
  69. pygpt_net/ui/widget/builder/editor.py +0 -2001
  70. {pygpt_net-2.6.58.dist-info → pygpt_net-2.6.60.dist-info}/LICENSE +0 -0
  71. {pygpt_net-2.6.58.dist-info → pygpt_net-2.6.60.dist-info}/WHEEL +0 -0
  72. {pygpt_net-2.6.58.dist-info → pygpt_net-2.6.60.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,92 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.09.24 23:00:00 #
10
+ # ================================================== #
11
+
12
+ from __future__ import annotations
13
+ from typing import Any, Dict, Optional, List, Tuple
14
+
15
+ from pygpt_net.core.agents.custom.logging import StdLogger, NullLogger
16
+ from pygpt_net.core.types import AGENT_MODE_WORKFLOW, AGENT_TYPE_LLAMA
17
+ from pygpt_net.item.model import ModelItem
18
+ from pygpt_net.item.preset import PresetItem
19
+ from pygpt_net.core.bridge import BridgeContext
20
+
21
+ from agents import TResponseInputItem
22
+
23
+ from ..base import BaseAgent
24
+ from pygpt_net.core.agents.custom.llama_index.runner import DynamicFlowWorkflowLI
25
+ from pygpt_net.core.agents.custom.llama_index.utils import make_option_getter
26
+
27
+
28
+ class Agent(BaseAgent):
29
+ """
30
+ Dynamic Flow (LlamaIndex 0.13) – provider that returns a workflow-like object
31
+ compatible with predefined LlamaWorkflow runner (run() -> handler.stream_events()).
32
+ """
33
+ def __init__(self, *args, **kwargs):
34
+ super(Agent, self).__init__(*args, **kwargs)
35
+ self.id = "llama_custom"
36
+ self.type = AGENT_TYPE_LLAMA
37
+ self.mode = AGENT_MODE_WORKFLOW
38
+ self.name = "Custom"
39
+
40
+ def get_agent(self, window, kwargs: Dict[str, Any]):
41
+ """
42
+ Build and return DynamicFlowWorkflowLI.
43
+ Expected kwargs from your app:
44
+ - schema: List[dict]
45
+ - llm: LlamaIndex LLM (base) – użyty gdy brak per-node modelu
46
+ - tools: List[BaseTool] (LI)
47
+ - messages: Optional[List[TResponseInputItem]] – initial, dla pierwszego agenta
48
+ - context: BridgeContext (preset do get_option)
49
+ - router_stream_mode / max_iterations / stream / logger / model (default ModelItem)
50
+ """
51
+ schema: List[Dict[str, Any]] = kwargs.get("schema") or []
52
+ llm = kwargs.get("llm")
53
+ tools = kwargs.get("tools", []) or []
54
+ initial_messages: Optional[List[TResponseInputItem]] = kwargs.get("chat_history")
55
+ verbose = bool(kwargs.get("verbose", False))
56
+
57
+ context: BridgeContext = kwargs.get("context", BridgeContext())
58
+ preset: Optional[PresetItem] = context.preset
59
+ default_model: ModelItem = kwargs.get("model", ModelItem())
60
+
61
+ base_prompt = self.get_option(preset, "base", "prompt")
62
+ allow_local_tools_default = bool(self.get_option(preset, "base", "allow_local_tools"))
63
+ allow_remote_tools_default = bool(self.get_option(preset, "base", "allow_remote_tools"))
64
+ max_iterations = int(self.get_option(preset, "base", "max_iterations") or kwargs.get("max_iterations", 20))
65
+ router_stream_mode = self.get_option(preset, "router", "stream_mode") or kwargs.get("router_stream_mode", "realtime")
66
+
67
+ option_get = make_option_getter(self, preset)
68
+ stream = bool(kwargs.get("stream", False))
69
+ logger = StdLogger(prefix="[flow]") if verbose else NullLogger()
70
+
71
+ return DynamicFlowWorkflowLI(
72
+ window=window,
73
+ logger=logger,
74
+ schema=schema,
75
+ initial_messages=initial_messages,
76
+ preset=preset,
77
+ default_model=default_model,
78
+ option_get=option_get,
79
+ router_stream_mode=str(router_stream_mode).lower(),
80
+ allow_local_tools_default=allow_local_tools_default,
81
+ allow_remote_tools_default=allow_remote_tools_default,
82
+ max_iterations=max_iterations,
83
+ llm=llm,
84
+ tools=tools,
85
+ stream=stream,
86
+ base_prompt=base_prompt,
87
+ timeout=120,
88
+ verbose=True,
89
+ )
90
+
91
+ async def run(self, *args, **kwargs) -> Tuple[Any, str, str]:
92
+ raise NotImplementedError("Use get_agent() and run it via LlamaWorkflow runner.")
@@ -0,0 +1,96 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ================================================== #
4
+ # This file is a part of PYGPT package #
5
+ # Website: https://pygpt.net #
6
+ # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
+ # MIT License #
8
+ # Created By : Marcin Szczygliński #
9
+ # Updated Date: 2025.09.24 23:00:00 #
10
+ # ================================================== #
11
+
12
+ from __future__ import annotations
13
+ from typing import Any, Dict, Tuple, Optional, List
14
+
15
+ from pygpt_net.core.types import AGENT_TYPE_OPENAI, AGENT_MODE_OPENAI
16
+ from pygpt_net.item.ctx import CtxItem
17
+ from pygpt_net.item.model import ModelItem
18
+ from pygpt_net.item.preset import PresetItem
19
+
20
+ from pygpt_net.core.agents.bridge import ConnectionContext
21
+ from pygpt_net.core.bridge import BridgeContext
22
+
23
+ from agents import TResponseInputItem
24
+
25
+ from ..base import BaseAgent
26
+
27
+ from pygpt_net.core.agents.custom.logging import NullLogger, StdLogger
28
+ from pygpt_net.core.agents.custom.runner import FlowOrchestrator
29
+ from pygpt_net.core.agents.custom.utils import make_option_getter
30
+
31
+
32
+ class Agent(BaseAgent):
33
+ def __init__(self, *args, **kwargs):
34
+ super(Agent, self).__init__(*args, **kwargs)
35
+ self.id = "openai_custom"
36
+ self.type = AGENT_TYPE_OPENAI
37
+ self.mode = AGENT_MODE_OPENAI
38
+ self.name = "Custom"
39
+
40
+ async def run(
41
+ self,
42
+ window: Any = None,
43
+ agent_kwargs: Dict[str, Any] = None,
44
+ previous_response_id: str = None,
45
+ messages: List[TResponseInputItem] = None,
46
+ ctx: CtxItem = None,
47
+ stream: bool = False,
48
+ bridge: ConnectionContext = None,
49
+ use_partial_ctx: Optional[bool] = False,
50
+ schema: Optional[list] = None,
51
+ ) -> Tuple[CtxItem, str, str]:
52
+ agent_kwargs = agent_kwargs or {}
53
+ messages = messages or []
54
+
55
+ context: BridgeContext = agent_kwargs.get("context", BridgeContext())
56
+ preset: Optional[PresetItem] = context.preset
57
+ model: ModelItem = agent_kwargs.get("model", ModelItem())
58
+ function_tools: list = agent_kwargs.get("function_tools", [])
59
+
60
+ base_prompt = self.get_option(preset, "base", "prompt")
61
+ allow_local_tools_default = bool(self.get_option(preset, "base", "allow_local_tools"))
62
+ allow_remote_tools_default = bool(self.get_option(preset, "base", "allow_remote_tools"))
63
+ max_iterations = int(self.get_option(preset, "base", "max_iterations") or agent_kwargs.get("max_iterations", 20))
64
+ trace_id = self.get_option(preset, "debug", "trace_id") or agent_kwargs.get("trace_id", None)
65
+ router_stream_mode = self.get_option(preset, "router", "stream_mode") or agent_kwargs.get("router_stream_mode", "realtime")
66
+ verbose = bool(agent_kwargs.get("verbose", False))
67
+ logger = StdLogger(prefix="[flow]") if verbose else NullLogger()
68
+ option_get = make_option_getter(self, preset)
69
+
70
+ orchestrator = FlowOrchestrator(window=window, logger=logger)
71
+ logger.debug(f"[schema] {schema}")
72
+
73
+ result = await orchestrator.run_flow(
74
+ schema=schema or [],
75
+ messages=messages,
76
+ ctx=ctx,
77
+ bridge=bridge,
78
+ agent_kwargs=agent_kwargs,
79
+ preset=preset,
80
+ model=model,
81
+ stream=stream,
82
+ use_partial_ctx=use_partial_ctx or False,
83
+ base_prompt=base_prompt,
84
+ allow_local_tools_default=allow_local_tools_default,
85
+ allow_remote_tools_default=allow_remote_tools_default,
86
+ function_tools=function_tools,
87
+ trace_id=trace_id,
88
+ max_iterations=max_iterations,
89
+ router_stream_mode=str(router_stream_mode).lower(),
90
+ option_get=option_get,
91
+ )
92
+
93
+ final_output = result.final_output or ""
94
+ last_response_id = result.last_response_id or ""
95
+
96
+ return result.ctx, final_output, last_response_id
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.09.19 00:00:00 #
9
+ # Updated Date: 2025.09.24 00:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import json
@@ -60,7 +60,10 @@ class JsonFileProvider(BaseProvider):
60
60
  try:
61
61
  if os.path.exists(path):
62
62
  with open(path, 'r', encoding="utf-8") as file:
63
- data = json.load(file)
63
+ try:
64
+ data = json.load(file)
65
+ except json.JSONDecodeError:
66
+ data = {}
64
67
  if data == "" or data is None:
65
68
  return {}
66
69
  if "layout" in data:
@@ -143,6 +146,7 @@ class JsonFileProvider(BaseProvider):
143
146
  'id': item.id,
144
147
  'name': item.name,
145
148
  'layout': item.layout,
149
+ 'schema': item.schema,
146
150
  }
147
151
 
148
152
  @staticmethod
@@ -159,6 +163,8 @@ class JsonFileProvider(BaseProvider):
159
163
  item.name = data['name']
160
164
  if "layout" in data:
161
165
  item.layout = data['layout']
166
+ if "schema" in data:
167
+ item.schema = data['schema']
162
168
 
163
169
  @staticmethod
164
170
  def serialize_layout(item: BuilderLayoutItem) -> Dict[str, Any]:
@@ -169,9 +175,9 @@ class JsonFileProvider(BaseProvider):
169
175
  :return: serialized item
170
176
  """
171
177
  return {
172
- 'id': item.id,
173
- 'name': item.name,
174
- 'data': item.data,
178
+ 'id': item.id if item else "",
179
+ 'name': item.name if item else "",
180
+ 'data': item.data if item else {},
175
181
  }
176
182
 
177
183
  @staticmethod
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.09.15 01:00:00 #
9
+ # Updated Date: 2025.09.22 08:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Optional, List, Dict
@@ -89,8 +89,8 @@ class OpenAILLM(BaseLLM):
89
89
  :param stream: stream mode
90
90
  :return: LLM provider instance
91
91
  """
92
- from .llama_index.openai import OpenAI as LlamaOpenAI
93
- from .llama_index.openai import OpenAIResponses as LlamaOpenAIResponses
92
+ from llama_index.llms.openai import OpenAI as LlamaOpenAI
93
+ from llama_index.llms.openai import OpenAIResponses as LlamaOpenAIResponses
94
94
  args = self.parse_args(model.llama_index, window)
95
95
  if "api_key" not in args:
96
96
  args["api_key"] = window.core.config.get("api_key", "")
@@ -98,7 +98,9 @@ class OpenAILLM(BaseLLM):
98
98
  args["model"] = model.id
99
99
 
100
100
  args = self.inject_llamaindex_http_clients(args, window.core.config)
101
- if window.core.config.get('api_use_responses_llama', False):
101
+ mode = window.core.config.get("mode")
102
+ # dont' use Responses in agent modes
103
+ if window.core.config.get('api_use_responses_llama', False) and mode == MODE_LLAMA_INDEX:
102
104
  tools = []
103
105
  tools = window.core.api.openai.remote_tools.append_to_tools(
104
106
  mode=MODE_LLAMA_INDEX,