pygpt-net 2.6.60__py3-none-any.whl → 2.6.62__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (87) hide show
  1. pygpt_net/CHANGELOG.txt +14 -0
  2. pygpt_net/__init__.py +3 -3
  3. pygpt_net/controller/chat/common.py +115 -6
  4. pygpt_net/controller/chat/input.py +4 -1
  5. pygpt_net/controller/chat/response.py +8 -2
  6. pygpt_net/controller/presets/presets.py +121 -6
  7. pygpt_net/controller/settings/editor.py +0 -15
  8. pygpt_net/controller/settings/profile.py +16 -4
  9. pygpt_net/controller/settings/workdir.py +30 -5
  10. pygpt_net/controller/theme/common.py +4 -2
  11. pygpt_net/controller/theme/markdown.py +4 -7
  12. pygpt_net/controller/theme/theme.py +2 -1
  13. pygpt_net/controller/ui/ui.py +32 -7
  14. pygpt_net/core/agents/custom/__init__.py +7 -1
  15. pygpt_net/core/agents/custom/llama_index/factory.py +17 -6
  16. pygpt_net/core/agents/custom/llama_index/runner.py +52 -4
  17. pygpt_net/core/agents/custom/llama_index/utils.py +12 -1
  18. pygpt_net/core/agents/custom/router.py +45 -6
  19. pygpt_net/core/agents/custom/runner.py +11 -5
  20. pygpt_net/core/agents/custom/schema.py +3 -1
  21. pygpt_net/core/agents/custom/utils.py +13 -1
  22. pygpt_net/core/agents/runners/llama_workflow.py +65 -5
  23. pygpt_net/core/agents/runners/openai_workflow.py +2 -1
  24. pygpt_net/core/db/viewer.py +11 -5
  25. pygpt_net/core/node_editor/graph.py +18 -9
  26. pygpt_net/core/node_editor/models.py +9 -2
  27. pygpt_net/core/node_editor/types.py +15 -1
  28. pygpt_net/core/presets/presets.py +216 -29
  29. pygpt_net/core/render/markdown/parser.py +0 -2
  30. pygpt_net/core/render/web/renderer.py +76 -11
  31. pygpt_net/data/config/config.json +5 -6
  32. pygpt_net/data/config/models.json +3 -3
  33. pygpt_net/data/config/settings.json +2 -38
  34. pygpt_net/data/css/style.dark.css +18 -0
  35. pygpt_net/data/css/style.light.css +20 -1
  36. pygpt_net/data/locale/locale.de.ini +66 -1
  37. pygpt_net/data/locale/locale.en.ini +64 -3
  38. pygpt_net/data/locale/locale.es.ini +66 -1
  39. pygpt_net/data/locale/locale.fr.ini +66 -1
  40. pygpt_net/data/locale/locale.it.ini +66 -1
  41. pygpt_net/data/locale/locale.pl.ini +67 -2
  42. pygpt_net/data/locale/locale.uk.ini +66 -1
  43. pygpt_net/data/locale/locale.zh.ini +66 -1
  44. pygpt_net/data/locale/plugin.cmd_system.en.ini +62 -66
  45. pygpt_net/item/ctx.py +23 -1
  46. pygpt_net/provider/agents/llama_index/flow_from_schema.py +2 -2
  47. pygpt_net/provider/agents/llama_index/workflow/codeact.py +9 -6
  48. pygpt_net/provider/agents/llama_index/workflow/openai.py +38 -11
  49. pygpt_net/provider/agents/llama_index/workflow/planner.py +36 -16
  50. pygpt_net/provider/agents/llama_index/workflow/supervisor.py +60 -10
  51. pygpt_net/provider/agents/openai/agent.py +3 -1
  52. pygpt_net/provider/agents/openai/agent_b2b.py +13 -9
  53. pygpt_net/provider/agents/openai/agent_planner.py +6 -2
  54. pygpt_net/provider/agents/openai/agent_with_experts.py +4 -1
  55. pygpt_net/provider/agents/openai/agent_with_experts_feedback.py +4 -2
  56. pygpt_net/provider/agents/openai/agent_with_feedback.py +4 -2
  57. pygpt_net/provider/agents/openai/evolve.py +6 -2
  58. pygpt_net/provider/agents/openai/supervisor.py +3 -1
  59. pygpt_net/provider/api/openai/agents/response.py +1 -0
  60. pygpt_net/provider/core/config/patch.py +18 -1
  61. pygpt_net/provider/core/config/patches/patch_before_2_6_42.py +0 -6
  62. pygpt_net/tools/agent_builder/tool.py +48 -26
  63. pygpt_net/tools/agent_builder/ui/dialogs.py +36 -28
  64. pygpt_net/ui/__init__.py +2 -4
  65. pygpt_net/ui/dialog/about.py +58 -38
  66. pygpt_net/ui/dialog/db.py +142 -3
  67. pygpt_net/ui/dialog/preset.py +47 -8
  68. pygpt_net/ui/layout/toolbox/presets.py +64 -16
  69. pygpt_net/ui/main.py +2 -2
  70. pygpt_net/ui/widget/dialog/confirm.py +27 -3
  71. pygpt_net/ui/widget/dialog/db.py +0 -0
  72. pygpt_net/ui/widget/draw/painter.py +90 -1
  73. pygpt_net/ui/widget/lists/preset.py +908 -60
  74. pygpt_net/ui/widget/node_editor/command.py +10 -10
  75. pygpt_net/ui/widget/node_editor/config.py +157 -0
  76. pygpt_net/ui/widget/node_editor/editor.py +223 -153
  77. pygpt_net/ui/widget/node_editor/item.py +12 -11
  78. pygpt_net/ui/widget/node_editor/node.py +246 -13
  79. pygpt_net/ui/widget/node_editor/view.py +179 -63
  80. pygpt_net/ui/widget/tabs/output.py +1 -1
  81. pygpt_net/ui/widget/textarea/input.py +157 -23
  82. pygpt_net/utils.py +114 -2
  83. {pygpt_net-2.6.60.dist-info → pygpt_net-2.6.62.dist-info}/METADATA +26 -100
  84. {pygpt_net-2.6.60.dist-info → pygpt_net-2.6.62.dist-info}/RECORD +86 -85
  85. {pygpt_net-2.6.60.dist-info → pygpt_net-2.6.62.dist-info}/LICENSE +0 -0
  86. {pygpt_net-2.6.60.dist-info → pygpt_net-2.6.62.dist-info}/WHEEL +0 -0
  87. {pygpt_net-2.6.60.dist-info → pygpt_net-2.6.62.dist-info}/entry_points.txt +0 -0
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.24 02:00:00 #
9
+ # Updated Date: 2025.09.26 17:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import re
@@ -18,6 +18,7 @@ from llama_index.core.agent.workflow import (
18
18
  ToolCallResult,
19
19
  AgentStream,
20
20
  AgentOutput,
21
+ # AgentInput, # not needed currently
21
22
  )
22
23
  from workflows.errors import WorkflowCancelledByUser
23
24
 
@@ -38,6 +39,7 @@ class LlamaWorkflow(BaseRunner):
38
39
  """
39
40
  super(LlamaWorkflow, self).__init__(window)
40
41
  self.window = window
42
+ self.last_response_id = None
41
43
 
42
44
  async def run(
43
45
  self,
@@ -177,12 +179,13 @@ class LlamaWorkflow(BaseRunner):
177
179
 
178
180
  prev_output = ctx.live_output
179
181
  if prev_output:
180
- prev_output = self.filter_output(prev_output) # remove all <execute>...</execute>
182
+ prev_output = self.filter_output(prev_output) # remove all [!exec]...[/!exec]
181
183
 
182
184
  response_ctx.set_agent_final_response(ctx.agent_final_response) # always set to further use
183
185
  response_ctx.set_output(prev_output) # append from stream
184
186
  response_ctx.extra["agent_output"] = True # mark as output response
185
187
  response_ctx.extra["agent_finish"] = True # mark as finished
188
+ response_ctx.set_agent_name(ctx.get_agent_name()) # store last agent name
186
189
 
187
190
  if "agent_input" in response_ctx.extra:
188
191
  del response_ctx.extra["agent_input"] # remove agent input from extra
@@ -254,8 +257,10 @@ class LlamaWorkflow(BaseRunner):
254
257
  item_ctx.output = "" # empty to prevent render
255
258
  item_ctx.stream = "" # for stream
256
259
 
260
+ # Keep last known agent name to avoid redundant ctx updates.
261
+ last_agent_name: Optional[str] = None
262
+
257
263
  async for event in handler.stream_events():
258
- print(event)
259
264
  if self.is_stopped():
260
265
  # persist current output on stop
261
266
  item_ctx.output = item_ctx.live_output
@@ -297,7 +302,24 @@ class LlamaWorkflow(BaseRunner):
297
302
  begin=begin,
298
303
  stream=True,
299
304
  )
305
+ # Propagate agent name early based on StepEvent meta, if available.
306
+ try:
307
+ meta = getattr(event, "meta", {}) or {}
308
+ next_name = meta.get("agent_name")
309
+ if next_name:
310
+ last_agent_name = self._apply_agent_name_to_ctx(item_ctx, next_name, last_agent_name)
311
+ except Exception:
312
+ pass
313
+ # Optional: mark start of a new stream block
314
+ begin = True
300
315
  elif isinstance(event, AgentStream):
316
+ # Update agent name from event if present; fallback to header parsing.
317
+ name = getattr(event, "current_agent_name", None)
318
+ if not name:
319
+ name = self._guess_agent_name_from_text(getattr(event, "delta", "") or "")
320
+ if name:
321
+ last_agent_name = self._apply_agent_name_to_ctx(item_ctx, name, last_agent_name)
322
+
301
323
  if verbose:
302
324
  print(f"{event.delta}", end="", flush=True)
303
325
  if event.delta:
@@ -307,6 +329,10 @@ class LlamaWorkflow(BaseRunner):
307
329
  self.send_stream(item_ctx, signals, begin) # send stream to webview
308
330
  begin = False
309
331
  elif isinstance(event, AgentOutput):
332
+ # Ensure final agent name is applied as well.
333
+ name = getattr(event, "current_agent_name", None)
334
+ if name:
335
+ last_agent_name = self._apply_agent_name_to_ctx(item_ctx, name, last_agent_name)
310
336
  thought, answer = self.extract_final_response(str(event))
311
337
  if answer:
312
338
  item_ctx.set_agent_final_response(answer)
@@ -348,6 +374,40 @@ class LlamaWorkflow(BaseRunner):
348
374
  next_ctx.set_output("")
349
375
  next_ctx.partial = True
350
376
  next_ctx.extra["agent_output"] = True # allow usage in history
351
-
377
+ next_ctx.set_agent_name(ctx.get_agent_name()) # propagate agent name
352
378
  self.send_response(next_ctx, signals, KernelEvent.APPEND_DATA)
353
- return next_ctx
379
+
380
+ return next_ctx
381
+
382
+ # ===== helpers for agent name propagation =====
383
+
384
+ def _apply_agent_name_to_ctx(self, ctx: CtxItem, name: str, last_known: Optional[str]) -> str:
385
+ """
386
+ Apply agent name to your context, avoiding redundant updates.
387
+ Falls back to ctx.extra['agent_name'] if set_agent_name is unavailable.
388
+ """
389
+ if not name:
390
+ return last_known or ""
391
+ if last_known and last_known == name:
392
+ return last_known
393
+ try:
394
+ if hasattr(ctx, "set_agent_name") and callable(getattr(ctx, "set_agent_name")):
395
+ ctx.set_agent_name(name)
396
+ # Always mirror into extra for downstream consumers
397
+ ctx.extra["agent_name"] = name
398
+ except Exception:
399
+ ctx.extra["agent_name"] = name
400
+ return name
401
+
402
+ def _guess_agent_name_from_text(self, text: str) -> Optional[str]:
403
+ """
404
+ Try to infer agent name from header like '**Name**' which our workflow emits
405
+ before each agent block.
406
+ """
407
+ if not text:
408
+ return None
409
+ # Look for the first bold segment – keep it lenient
410
+ m = re.search(r"\*\*([^*]+?)\*\*", text)
411
+ if m:
412
+ return m.group(1).strip()
413
+ return None
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.24 03:00:00 #
9
+ # Updated Date: 2025.09.26 17:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from typing import Dict, Any, List, Optional
@@ -237,6 +237,7 @@ class OpenAIWorkflow(BaseRunner):
237
237
  response_ctx.set_agent_final_response(output) # always set to further use
238
238
  response_ctx.extra["agent_output"] = True # mark as output response
239
239
  response_ctx.extra["agent_finish"] = True # mark as finished
240
+ response_ctx.set_agent_name(ctx.get_agent_name()) # store last agent name
240
241
  response_ctx.msg_id = response_id # set response id for OpenAI
241
242
 
242
243
  if ctx.agent_final_response: # only if not empty
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.09.05 18:00:00 #
9
+ # Updated Date: 2025.09.26 03:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import json
@@ -154,14 +154,18 @@ class Viewer:
154
154
  msg = f"[DB] Created DB backup: {backup_path}"
155
155
  self.log(msg)
156
156
 
157
+ tables = self.database.get_tables()
158
+ primary_key = tables[data['table']]['primary_key']
159
+
157
160
  with self.database.get_db().begin() as conn:
158
161
  conn.execute(
159
- text(f"DELETE FROM {data['table']} WHERE id = :row_id")
162
+ text(f"DELETE FROM {data['table']} WHERE {primary_key} = :row_id")
160
163
  .bindparams(row_id=data['row_id'])
161
164
  )
162
165
  msg = f"[DB] Deleted row ID {data['row_id']} from table {data['table']}"
163
166
  self.log(msg)
164
- self.database.window.ui.debug["db"].browser.update_table_view()
167
+ # Force refresh to invalidate caches and handle pagination edge cases
168
+ self.database.window.ui.debug["db"].browser.force_refresh()
165
169
 
166
170
  def update_row(self, data: Dict[str, Any]):
167
171
  """
@@ -207,7 +211,8 @@ class Viewer:
207
211
  )
208
212
  msg = f"[DB] Updated row ID {data['id']} in table {data['table']}"
209
213
  self.log(msg)
210
- self.database.window.ui.debug["db"].browser.update_table_view()
214
+ # Force refresh to invalidate caches and handle pagination edge cases
215
+ self.database.window.ui.debug["db"].browser.force_refresh()
211
216
 
212
217
  def truncate_table(self, data: Dict[str, Any], reset: bool = False):
213
218
  """
@@ -230,7 +235,8 @@ class Viewer:
230
235
  else:
231
236
  msg = f"[DB] Deleted all rows from table {data['table']}"
232
237
  self.log(msg)
233
- self.database.window.ui.debug["db"].browser.update_table_view()
238
+ # Force refresh to invalidate caches and handle pagination edge cases
239
+ self.database.window.ui.debug["db"].browser.force_refresh()
234
240
 
235
241
  def log(self, msg: str):
236
242
  """
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.09.24 00:00:00 #
9
+ # Updated Date: 2025.09.25 00:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from __future__ import annotations
@@ -83,16 +83,24 @@ class NodeGraph(QObject):
83
83
  props: Dict[str, PropertyModel] = {}
84
84
  for ps in spec.properties:
85
85
  props[ps.id] = PropertyModel(
86
- uuid=gen_uuid(), id=ps.id, type=ps.type, name=ps.name or ps.id,
87
- editable=ps.editable, value=ps.value,
88
- allowed_inputs=ps.allowed_inputs, allowed_outputs=ps.allowed_outputs,
89
- options=ps.options
86
+ uuid=gen_uuid(),
87
+ id=ps.id,
88
+ type=ps.type,
89
+ name=ps.name or ps.id,
90
+ editable=ps.editable,
91
+ value=ps.value,
92
+ allowed_inputs=ps.allowed_inputs,
93
+ allowed_outputs=ps.allowed_outputs,
94
+ options=ps.options,
95
+ placeholder=getattr(ps, "placeholder", None),
96
+ description=getattr(ps, "description", None),
90
97
  )
91
98
  # Auto inject read-only 'base_id' property for visibility if base_id defined and not present
92
99
  if spec.base_id and "base_id" not in props:
93
100
  props["base_id"] = PropertyModel(
94
101
  uuid=gen_uuid(), id="base_id", type="str", name="Base ID",
95
- editable=False, value=base_id, allowed_inputs=0, allowed_outputs=0
102
+ editable=False, value=base_id, allowed_inputs=0, allowed_outputs=0,
103
+ placeholder=None, description="Internal base identifier (read-only)."
96
104
  )
97
105
 
98
106
  node = NodeModel(uuid=gen_uuid(), id=nid, name=name or spec.title or nid, type=type_name, properties=props)
@@ -195,7 +203,8 @@ class NodeGraph(QObject):
195
203
  "type": n.type,
196
204
  "id": n.id,
197
205
  "name": n.name,
198
- "values": {pid: p.value for pid, p in n.properties.items()},
206
+ # UI-only fields like HelpLabel are skipped
207
+ "values": {pid: p.value for pid, p in n.properties.items() if p.type != "HelpLabel"},
199
208
  }
200
209
  conns_out = [{"src": [c.src_node, c.src_prop], "dst": [c.dst_node, c.dst_prop]}
201
210
  for c in self.connections.values()]
@@ -226,8 +235,8 @@ class NodeGraph(QObject):
226
235
  "out": list(outgoing.get((n.uuid, pid), [])),
227
236
  }
228
237
  else:
229
- # Skip internal helper fields if needed
230
- if pid == "base_id":
238
+ # Skip internal/helper and UI-only fields
239
+ if pid == "base_id" or prop.type == "HelpLabel":
231
240
  continue
232
241
  slots[pid] = prop.value
233
242
  result.append({
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.09.24 00:00:00 #
9
+ # Updated Date: 2025.09.25 00:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from __future__ import annotations
@@ -22,13 +22,16 @@ from .utils import gen_uuid
22
22
  class PropertyModel:
23
23
  uuid: str
24
24
  id: str
25
- type: str # "slot", "str", "int", "float", "bool", "combo", "text"
25
+ type: str # "slot", "str", "int", "float", "bool", "combo", "text", "HelpLabel"
26
26
  name: str
27
27
  editable: bool = True
28
28
  value: Any = None
29
29
  allowed_inputs: int = 0 # 0 none, -1 unlimited, >0 limit
30
30
  allowed_outputs: int = 0 # 0 none, -1 unlimited, >0 limit
31
31
  options: Optional[List[str]] = None # for combo
32
+ # UI helpers
33
+ placeholder: Optional[str] = None
34
+ description: Optional[str] = None
32
35
 
33
36
  def to_dict(self) -> dict:
34
37
  return {
@@ -41,6 +44,8 @@ class PropertyModel:
41
44
  "allowed_inputs": self.allowed_inputs,
42
45
  "allowed_outputs": self.allowed_outputs,
43
46
  "options": self.options or [],
47
+ "placeholder": self.placeholder,
48
+ "description": self.description,
44
49
  }
45
50
 
46
51
  @staticmethod
@@ -55,6 +60,8 @@ class PropertyModel:
55
60
  allowed_inputs=d.get("allowed_inputs", 0),
56
61
  allowed_outputs=d.get("allowed_outputs", 0),
57
62
  options=d.get("options") or None,
63
+ placeholder=d.get("placeholder"),
64
+ description=d.get("description"),
58
65
  )
59
66
 
60
67
 
@@ -6,7 +6,7 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.09.24 00:00:00 #
9
+ # Updated Date: 2025.09.26 12:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  from __future__ import annotations
@@ -26,17 +26,22 @@ class PropertySpec:
26
26
  allowed_inputs: int = 0
27
27
  allowed_outputs: int = 0
28
28
  options: Optional[List[str]] = None
29
+ placeholder: Optional[str] = None # hint text for text editors
30
+ description: Optional[str] = None # tooltip/help text shown in UI
29
31
 
30
32
 
31
33
  @dataclass
32
34
  class NodeTypeSpec:
33
35
  type_name: str
34
36
  title: Optional[str] = None
37
+ # UI-only human-readable label used for menus; never persisted nor used as an identifier
38
+ display_name: Optional[str] = None
35
39
  properties: List[PropertySpec] = field(default_factory=list)
36
40
  # Below are optional extensions for agent-flow needs:
37
41
  base_id: Optional[str] = None # base prefix for friendly ids, e.g. "agent"
38
42
  export_kind: Optional[str] = None # short kind for export, e.g. "agent", "start"
39
43
  bg_color: Optional[str] = None # optional per-type background color (CSS/hex)
44
+ max_num: Optional[int] = None # optional per-layout cap; None or <=0 means unlimited
40
45
 
41
46
  class NodeTypeRegistry:
42
47
  """Registry for node type specifications. Extend/override in subclasses."""
@@ -54,6 +59,15 @@ class NodeTypeRegistry:
54
59
  def get(self, type_name: str) -> Optional[NodeTypeSpec]:
55
60
  return self._types.get(type_name)
56
61
 
62
+ def display_name(self, type_name: str) -> str:
63
+ """Return UI label for a type: spec.display_name if non-empty, otherwise type_name."""
64
+ spec = self.get(type_name)
65
+ if spec:
66
+ dn = getattr(spec, "display_name", None)
67
+ if isinstance(dn, str) and dn.strip():
68
+ return dn
69
+ return type_name
70
+
57
71
  def _install_default_types(self):
58
72
  # Example/basic nodes kept intact
59
73
  self.register(NodeTypeSpec(
@@ -6,12 +6,13 @@
6
6
  # GitHub: https://github.com/szczyglis-dev/py-gpt #
7
7
  # MIT License #
8
8
  # Created By : Marcin Szczygliński #
9
- # Updated Date: 2025.08.28 09:00:00 #
9
+ # Updated Date: 2025.09.26 03:00:00 #
10
10
  # ================================================== #
11
11
 
12
12
  import copy
13
13
  import uuid
14
- from typing import Optional, Tuple, Dict
14
+ from collections import OrderedDict
15
+ from typing import Optional, Tuple, Dict, List
15
16
 
16
17
  from packaging.version import Version
17
18
  from pygpt_net.core.types import (
@@ -165,10 +166,6 @@ class Presets:
165
166
  return MODE_COMPLETION
166
167
  if preset.img:
167
168
  return MODE_IMAGE
168
- # if preset.vision:
169
- # return MODE_VISION
170
- # if preset.langchain:
171
- # return MODE_LANGCHAIN
172
169
  if preset.assistant:
173
170
  return MODE_ASSISTANT
174
171
  if preset.llama_index:
@@ -214,12 +211,10 @@ class Presets:
214
211
  attr = self._MODE_TO_ATTR.get(mode)
215
212
  if not attr:
216
213
  return
217
- i = 0
218
- for key, item in self.items.items():
219
- if getattr(item, attr, False):
220
- if i == idx:
221
- return key
222
- i += 1
214
+ ids = list(self.get_by_mode(mode).keys())
215
+ if idx < 0 or idx >= len(ids):
216
+ return
217
+ return ids[idx]
223
218
 
224
219
  def get_by_id(self, mode: str, id: str) -> Optional[PresetItem]:
225
220
  """
@@ -265,7 +260,19 @@ class Presets:
265
260
  attr = self._MODE_TO_ATTR.get(mode)
266
261
  if not attr:
267
262
  return {}
268
- return {id: item for id, item in self.items.items() if getattr(item, attr, False)}
263
+ data = {id: item for id, item in self.items.items() if getattr(item, attr, False)}
264
+ if not self._dnd_enabled():
265
+ return data
266
+ ordered_ids = self._ordered_ids_for_mode(mode)
267
+ out = OrderedDict()
268
+ for pid in ordered_ids:
269
+ itm = data.get(pid)
270
+ if itm is not None:
271
+ out[pid] = itm
272
+ for pid, itm in data.items():
273
+ if pid not in out:
274
+ out[pid] = itm
275
+ return out
269
276
 
270
277
  def get_idx_by_id(self, mode: str, id: str) -> int:
271
278
  """
@@ -275,16 +282,13 @@ class Presets:
275
282
  :param id: preset id
276
283
  :return: preset idx
277
284
  """
278
- attr = self._MODE_TO_ATTR.get(mode)
279
- if not attr:
285
+ if id is None:
286
+ return 0
287
+ ids = list(self.get_by_mode(mode).keys())
288
+ try:
289
+ return ids.index(id)
290
+ except ValueError:
280
291
  return 0
281
- i = 0
282
- for key, item in self.items.items():
283
- if getattr(item, attr, False):
284
- if key == id:
285
- return i
286
- i += 1
287
- return 0
288
292
 
289
293
  def get_default(self, mode: str) -> Optional[str]:
290
294
  """
@@ -293,12 +297,9 @@ class Presets:
293
297
  :param mode: mode name
294
298
  :return: default prompt name
295
299
  """
296
- attr = self._MODE_TO_ATTR.get(mode)
297
- if not attr:
298
- return None
299
- for key, item in self.items.items():
300
- if getattr(item, attr, False):
301
- return key
300
+ data = self.get_by_mode(mode)
301
+ for key in data.keys():
302
+ return key
302
303
  return None
303
304
 
304
305
  def get_duplicate_name(self, id: str) -> Tuple[str, str]:
@@ -331,6 +332,7 @@ class Presets:
331
332
  self.items[id].filename = id
332
333
  self.items[id].uuid = str(uuid.uuid4())
333
334
  self.sort_by_name()
335
+ self._order_append_new_item(id)
334
336
  return id
335
337
 
336
338
  def remove(
@@ -345,7 +347,10 @@ class Presets:
345
347
  :param remove_file: also remove preset JSON config file
346
348
  """
347
349
  if id in self.items:
350
+ item = self.items[id]
351
+ rem_uuid = item.uuid
348
352
  self.items.pop(id)
353
+ self._order_remove_uuid(rem_uuid)
349
354
 
350
355
  if remove_file:
351
356
  self.provider.remove(id)
@@ -366,6 +371,7 @@ class Presets:
366
371
  :param preset: preset item
367
372
  """
368
373
  self.items[preset.filename] = preset
374
+ self._order_append_new_item(preset.filename)
369
375
 
370
376
  def update_and_save(self, preset: PresetItem):
371
377
  """
@@ -375,6 +381,7 @@ class Presets:
375
381
  """
376
382
  self.items[preset.filename] = preset
377
383
  self.save(preset.filename)
384
+ self._order_append_new_item(preset.filename)
378
385
 
379
386
  def get_all(self) -> Dict[str, PresetItem]:
380
387
  """
@@ -411,6 +418,7 @@ class Presets:
411
418
  self.patch_duplicated()
412
419
  self.sort_by_name()
413
420
  self.append_current()
421
+ self._order_sync_all()
414
422
 
415
423
  def save(self, id: str):
416
424
  """
@@ -484,4 +492,183 @@ class Presets:
484
492
  patched = True
485
493
  uuids.add(item.uuid)
486
494
  if patched:
487
- self.save_all()
495
+ self.save_all()
496
+
497
+ # ----------------------------
498
+ # Ordering (drag & drop) logic
499
+ # ----------------------------
500
+
501
+ def _cfg_get(self, key, default=None):
502
+ try:
503
+ return self.window.core.config.get(key)
504
+ except Exception:
505
+ return default
506
+
507
+ def _cfg_set(self, key, value):
508
+ try:
509
+ self.window.core.config.set(key, value)
510
+ except Exception:
511
+ pass
512
+
513
+ def _dnd_enabled(self) -> bool:
514
+ """
515
+ Check global switch for DnD ordering.
516
+ """
517
+ v = self._cfg_get('presets.drag_and_drop.enabled', False)
518
+ return bool(v)
519
+
520
+ @staticmethod
521
+ def _is_special_id(pid: str) -> bool:
522
+ """
523
+ current.* presets are special and pinned at top; not movable.
524
+ """
525
+ return pid.startswith("current.")
526
+
527
+ def _uuid_to_id_map(self) -> Dict[str, str]:
528
+ return {item.uuid: pid for pid, item in self.items.items() if item.uuid}
529
+
530
+ def _visible_ids_for_mode(self, mode: str) -> List[str]:
531
+ attr = self._MODE_TO_ATTR.get(mode)
532
+ if not attr:
533
+ return []
534
+ return [pid for pid, it in self.items.items() if getattr(it, attr, False)]
535
+
536
+ def _visible_regular_ids_for_mode(self, mode: str) -> List[str]:
537
+ return [pid for pid in self._visible_ids_for_mode(mode) if not self._is_special_id(pid)]
538
+
539
+ def _visible_regular_uuids_for_mode(self, mode: str) -> List[str]:
540
+ ids = self._visible_regular_ids_for_mode(mode)
541
+ return [self.items[pid].uuid for pid in ids if pid in self.items and self.items[pid].uuid]
542
+
543
+ def _build_global_uuid_order(self) -> List[str]:
544
+ """
545
+ Rebuild 'global' order each time based on name-sorted presets (excluding current.*).
546
+ """
547
+ regs = [(pid, it) for pid, it in self.items.items() if not self._is_special_id(pid)]
548
+ regs.sort(key=lambda x: x[1].name)
549
+ return [it.uuid for pid, it in regs if it.uuid]
550
+
551
+ def _order_get_store(self) -> Dict[str, List[str]]:
552
+ store = self._cfg_get('presets_order', {}) or {}
553
+ fixed = {}
554
+ for k, v in store.items():
555
+ if isinstance(v, dict):
556
+ try:
557
+ seq = [v[i] for i in sorted(v.keys(), key=lambda x: int(x))]
558
+ except Exception:
559
+ seq = list(v.values())
560
+ fixed[k] = seq
561
+ elif isinstance(v, list):
562
+ fixed[k] = v
563
+ return fixed
564
+
565
+ def _order_set_store(self, store: Dict[str, List[str]]):
566
+ self._cfg_set('presets_order', store)
567
+
568
+ def _order_sync_mode(self, mode: str, store: Dict[str, List[str]]) -> List[str]:
569
+ """
570
+ Ensure mode order is valid:
571
+ - Start from mode order or fallback to global
572
+ - Drop unknown UUIDs
573
+ - Append missing visible UUIDs at the end
574
+ """
575
+ visible = self._visible_regular_uuids_for_mode(mode)
576
+ visible_set = set(visible)
577
+
578
+ base = list(store.get(mode) or [])
579
+ if not base:
580
+ base = [u for u in store.get('global', []) if u in visible_set]
581
+
582
+ base = [u for u in base if u in visible_set]
583
+
584
+ seen = set(base)
585
+ for u in visible:
586
+ if u not in seen:
587
+ base.append(u)
588
+ seen.add(u)
589
+
590
+ dedup = []
591
+ s = set()
592
+ for u in base:
593
+ if u not in s:
594
+ dedup.append(u)
595
+ s.add(u)
596
+
597
+ store[mode] = dedup
598
+ return dedup
599
+
600
+ def _order_sync_all(self):
601
+ """
602
+ Sync presets_order with current items and rebuild 'global' each time.
603
+ """
604
+ store = self._order_get_store()
605
+ store['global'] = self._build_global_uuid_order()
606
+
607
+ existing = set([it.uuid for it in self.items.values() if it.uuid])
608
+ for k, lst in list(store.items()):
609
+ if isinstance(lst, list):
610
+ store[k] = [u for u in lst if u in existing]
611
+
612
+ for mode in self._MODE_TO_ATTR.keys():
613
+ self._order_sync_mode(mode, store)
614
+
615
+ self._order_set_store(store)
616
+
617
+ def _ordered_ids_for_mode(self, mode: str) -> List[str]:
618
+ """
619
+ Produce ordered preset IDs for given mode:
620
+ - current.<mode> first (if exists)
621
+ - then remaining items by order stored as UUIDs
622
+ """
623
+ attr = self._MODE_TO_ATTR.get(mode)
624
+ if not attr:
625
+ return []
626
+ store = self._order_get_store()
627
+ ordered_uuids = self._order_sync_mode(mode, store)
628
+ self._order_set_store(store)
629
+
630
+ uuid_to_id = self._uuid_to_id_map()
631
+ head_id = f"current.{mode}"
632
+ out: List[str] = []
633
+ if head_id in self.items and getattr(self.items[head_id], attr, False):
634
+ out.append(head_id)
635
+ for u in ordered_uuids:
636
+ pid = uuid_to_id.get(u)
637
+ if pid and getattr(self.items.get(pid, PresetItem()), attr, False):
638
+ out.append(pid)
639
+ return out
640
+
641
+ def _order_append_new_item(self, pid: str):
642
+ """
643
+ Append new preset (by ID) to the end of all applicable mode orders.
644
+ """
645
+ if pid not in self.items:
646
+ return
647
+ if self._is_special_id(pid):
648
+ return
649
+ item = self.items[pid]
650
+ if not item.uuid:
651
+ return
652
+ store = self._order_get_store()
653
+ modes = [m for m, attr in self._MODE_TO_ATTR.items() if getattr(item, attr, False)]
654
+ for m in modes:
655
+ seq = list(store.get(m) or [])
656
+ if item.uuid not in seq:
657
+ seq.append(item.uuid)
658
+ store[m] = seq
659
+ self._order_set_store(store)
660
+
661
+ def _order_remove_uuid(self, rem_uuid: Optional[str]):
662
+ """
663
+ Remove a UUID from all order lists (including global).
664
+ """
665
+ if not rem_uuid:
666
+ return
667
+ store = self._order_get_store()
668
+ changed = False
669
+ for k, lst in list(store.items()):
670
+ if isinstance(lst, list) and rem_uuid in lst:
671
+ store[k] = [u for u in lst if u != rem_uuid]
672
+ changed = True
673
+ if changed:
674
+ self._order_set_store(store)
@@ -63,8 +63,6 @@ class Parser:
63
63
  html = self.md.convert(text.strip())
64
64
  soup = BeautifulSoup(html, 'html.parser')
65
65
  self.strip_whitespace_lists(soup) # strip whitespace from codeblocks
66
- if self.window.core.config.get("ctx.convert_lists"):
67
- self.convert_lists_to_paragraphs(soup) # convert lists to paragraphs
68
66
  self.strip_whitespace_codeblocks(soup) # strip whitespace from codeblocks
69
67
  self.parse_code_blocks(soup) # parse code blocks
70
68
  self.format_images(soup) # add width to img tags