zrb 1.11.0__py3-none-any.whl → 1.12.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -25,6 +25,7 @@ async def read_user_prompt(ctx: AnyContext) -> str:
25
25
  is_tty = ctx.is_tty
26
26
  reader = await _setup_input_reader(is_tty)
27
27
  multiline_mode = False
28
+ current_modes = ctx.input.modes
28
29
  user_inputs = []
29
30
  while True:
30
31
  await asyncio.sleep(0.01)
@@ -38,7 +39,9 @@ async def read_user_prompt(ctx: AnyContext) -> str:
38
39
  if user_input.strip().lower() in ("/bye", "/quit", "/q", "/exit"):
39
40
  user_prompt = "\n".join(user_inputs)
40
41
  user_inputs = []
41
- result = await _trigger_ask_and_wait_for_result(ctx, user_prompt)
42
+ result = await _trigger_ask_and_wait_for_result(
43
+ ctx, user_prompt, current_modes
44
+ )
42
45
  if result is not None:
43
46
  final_result = result
44
47
  break
@@ -49,9 +52,18 @@ async def read_user_prompt(ctx: AnyContext) -> str:
49
52
  multiline_mode = False
50
53
  user_prompt = "\n".join(user_inputs)
51
54
  user_inputs = []
52
- result = await _trigger_ask_and_wait_for_result(ctx, user_prompt)
55
+ result = await _trigger_ask_and_wait_for_result(
56
+ ctx, user_prompt, current_modes
57
+ )
53
58
  if result is not None:
54
59
  final_result = result
60
+ elif user_input.strip().lower().startswith("/mode"):
61
+ mode_parts = user_input.split(" ", maxsplit=2)
62
+ if len(mode_parts) > 1:
63
+ current_modes = mode_parts[1]
64
+ ctx.print(f"Current mode: {current_modes}", plain=True)
65
+ ctx.print("", plain=True)
66
+ continue
55
67
  elif user_input.strip().lower() in ("/help", "/info"):
56
68
  _show_info(ctx)
57
69
  continue
@@ -61,7 +73,9 @@ async def read_user_prompt(ctx: AnyContext) -> str:
61
73
  continue
62
74
  user_prompt = "\n".join(user_inputs)
63
75
  user_inputs = []
64
- result = await _trigger_ask_and_wait_for_result(ctx, user_prompt)
76
+ result = await _trigger_ask_and_wait_for_result(
77
+ ctx, user_prompt, current_modes
78
+ )
65
79
  if result is not None:
66
80
  final_result = result
67
81
  return final_result
@@ -74,16 +88,26 @@ def _show_info(ctx: AnyContext):
74
88
  ctx: The context object for the task.
75
89
  """
76
90
  ctx.print(
77
- (
78
- f" {stylize_bold_yellow('/bye')} {stylize_faint('Quit from chat session')}\n"
79
- f" {stylize_bold_yellow('/multi')} {stylize_faint('Start multiline input')}\n"
80
- f" {stylize_bold_yellow('/end')} {stylize_faint('End multiline input')}\n"
81
- f" {stylize_bold_yellow('/help')} {stylize_faint('Show this message')}\n"
91
+ "\n".join(
92
+ [
93
+ _format_info_line("/bye", "Quit from chat session"),
94
+ _format_info_line("/multi", "Start multiline input"),
95
+ _format_info_line("/end", "End multiline input"),
96
+ _format_info_line("/modes", "Show current modes"),
97
+ _format_info_line("/modes <mode1,mode2,..>", "Set current modes"),
98
+ _format_info_line("/help", "Show this message"),
99
+ ]
82
100
  ),
83
101
  plain=True,
84
102
  )
85
103
 
86
104
 
105
+ def _format_info_line(command: str, description: str) -> str:
106
+ styled_command = stylize_bold_yellow(command.ljust(25))
107
+ styled_description = stylize_faint(description)
108
+ return f" {styled_command} {styled_description}"
109
+
110
+
87
111
  async def _handle_initial_message(ctx: AnyContext) -> str:
88
112
  """Processes the initial message from the command line."""
89
113
  if not ctx.input.message or ctx.input.message.strip() == "":
@@ -94,6 +118,7 @@ async def _handle_initial_message(ctx: AnyContext) -> str:
94
118
  result = await _trigger_ask_and_wait_for_result(
95
119
  ctx,
96
120
  user_prompt=ctx.input.message,
121
+ modes=ctx.input.modes,
97
122
  previous_session_name=ctx.input.previous_session,
98
123
  start_new=ctx.input.start_new,
99
124
  )
@@ -131,6 +156,7 @@ async def _read_next_line(is_interactive: bool, reader, ctx: AnyContext) -> str:
131
156
  async def _trigger_ask_and_wait_for_result(
132
157
  ctx: AnyContext,
133
158
  user_prompt: str,
159
+ modes: str,
134
160
  previous_session_name: str | None = None,
135
161
  start_new: bool = False,
136
162
  ) -> str | None:
@@ -148,7 +174,7 @@ async def _trigger_ask_and_wait_for_result(
148
174
  """
149
175
  if user_prompt.strip() == "":
150
176
  return None
151
- await _trigger_ask(ctx, user_prompt, previous_session_name, start_new)
177
+ await _trigger_ask(ctx, user_prompt, modes, previous_session_name, start_new)
152
178
  result = await _wait_ask_result(ctx)
153
179
  md_result = _render_markdown(result) if result is not None else ""
154
180
  ctx.print("\n🤖 >>", plain=True)
@@ -193,12 +219,14 @@ def get_llm_ask_input_mapping(callback_ctx: AnyContext):
193
219
  "start-new": data.get("start_new"),
194
220
  "previous-session": data.get("previous_session_name"),
195
221
  "message": data.get("message"),
222
+ "modes": data.get("modes"),
196
223
  }
197
224
 
198
225
 
199
226
  async def _trigger_ask(
200
227
  ctx: AnyContext,
201
228
  user_prompt: str,
229
+ modes: str,
202
230
  previous_session_name: str | None = None,
203
231
  start_new: bool = False,
204
232
  ):
@@ -218,6 +246,7 @@ async def _trigger_ask(
218
246
  "previous_session_name": previous_session_name,
219
247
  "start_new": start_new,
220
248
  "message": user_prompt,
249
+ "modes": modes,
221
250
  }
222
251
  )
223
252
 
@@ -65,6 +65,14 @@ _llm_ask_inputs = [
65
65
  allow_positional_parsing=False,
66
66
  always_prompt=False,
67
67
  ),
68
+ TextInput(
69
+ "modes",
70
+ description="Modes",
71
+ prompt="Modes",
72
+ default="coding",
73
+ allow_positional_parsing=False,
74
+ always_prompt=False,
75
+ ),
68
76
  BoolInput(
69
77
  "start-new",
70
78
  description="Start new conversation (LLM will forget everything)",
@@ -101,6 +109,9 @@ llm_ask: LLMTask = llm_group.add_task(
101
109
  system_prompt=lambda ctx: (
102
110
  None if ctx.input.system_prompt.strip() == "" else ctx.input.system_prompt
103
111
  ),
112
+ modes=lambda ctx: (
113
+ None if ctx.input.modes.strip() == "" else ctx.input.modes.split(",")
114
+ ),
104
115
  message="{ctx.input.message}",
105
116
  retries=0,
106
117
  ),
zrb/config/config.py CHANGED
@@ -287,6 +287,10 @@ class Config:
287
287
  """Number of seconds to sleep when throttling is required."""
288
288
  return float(os.getenv("ZRB_LLM_THROTTLE_SLEEP", "1.0"))
289
289
 
290
+ @property
291
+ def LLM_YOLO_MODE(self) -> bool:
292
+ return to_boolean(os.getenv("ZRB_LLM_YOLO_MODE", "false"))
293
+
290
294
  @property
291
295
  def LLM_SUMMARIZE_HISTORY(self) -> bool:
292
296
  return to_boolean(os.getenv("ZRB_LLM_SUMMARIZE_HISTORY", "true"))
zrb/config/llm_config.py CHANGED
@@ -22,11 +22,9 @@ class LLMConfig:
22
22
  default_interactive_system_prompt: str | None = None,
23
23
  default_special_instruction_prompt: str | None = None,
24
24
  default_summarization_prompt: str | None = None,
25
- default_context_enrichment_prompt: str | None = None,
26
25
  default_summarize_history: bool | None = None,
27
26
  default_history_summarization_token_threshold: int | None = None,
28
- default_enrich_context: bool | None = None,
29
- default_context_enrichment_token_threshold: int | None = None,
27
+ default_modes: list[str] | None = None,
30
28
  default_model: "Model | None" = None,
31
29
  default_model_settings: "ModelSettings | None" = None,
32
30
  default_model_provider: "Provider | None" = None,
@@ -40,18 +38,14 @@ class LLMConfig:
40
38
  self._default_interactive_system_prompt = default_interactive_system_prompt
41
39
  self._default_special_instruction_prompt = default_special_instruction_prompt
42
40
  self._default_summarization_prompt = default_summarization_prompt
43
- self._default_context_enrichment_prompt = default_context_enrichment_prompt
44
41
  self._default_summarize_history = default_summarize_history
45
42
  self._default_history_summarization_token_threshold = (
46
43
  default_history_summarization_token_threshold
47
44
  )
48
- self._default_enrich_context = default_enrich_context
49
- self._default_context_enrichment_token_threshold = (
50
- default_context_enrichment_token_threshold
51
- )
45
+ self._default_modes = default_modes
46
+ self._default_model = default_model
52
47
  self._default_model_settings = default_model_settings
53
48
  self._default_model_provider = default_model_provider
54
- self._default_model = default_model
55
49
 
56
50
  def _get_internal_default_prompt(self, name: str) -> str:
57
51
  if name not in self.__internal_default_prompt:
@@ -130,32 +124,18 @@ class LLMConfig:
130
124
  lambda: self._get_internal_default_prompt("persona"),
131
125
  )
132
126
 
127
+ @property
128
+ def default_modes(self) -> list[str]:
129
+ return self._get_property(
130
+ self._default_modes, CFG.LLM_MODES, lambda: ["coding"]
131
+ )
132
+
133
133
  @property
134
134
  def default_special_instruction_prompt(self) -> str:
135
135
  return self._get_property(
136
136
  self._default_special_instruction_prompt,
137
137
  CFG.LLM_SPECIAL_INSTRUCTION_PROMPT,
138
- lambda: self._get_workflow_prompt(CFG.LLM_MODES),
139
- )
140
-
141
- def _get_workflow_prompt(self, modes: list[str]) -> str:
142
- workflows = llm_context_config.get_workflows()
143
- dir_path = os.path.dirname(__file__)
144
- default_workflow_names = ("code", "content", "research")
145
- for workflow_name in default_workflow_names:
146
- if workflow_name in workflows:
147
- continue
148
- workflow_file_path = os.path.join(
149
- dir_path, "default_workflow", f"{workflow_name}.md"
150
- )
151
- with open(workflow_file_path, "r") as f:
152
- workflows[workflow_name] = f.read()
153
- return "\n".join(
154
- [
155
- make_prompt_section(header, content)
156
- for header, content in workflows.items()
157
- if header.lower() in modes
158
- ]
138
+ lambda: "",
159
139
  )
160
140
 
161
141
  @property
@@ -206,6 +186,19 @@ class LLMConfig:
206
186
  def set_default_special_instruction_prompt(self, special_instruction_prompt: str):
207
187
  self._default_special_instruction_prompt = special_instruction_prompt
208
188
 
189
+ def set_default_modes(self, modes: list[str]):
190
+ self._default_modes = modes
191
+
192
+ def add_default_mode(self, mode: str):
193
+ if self._default_modes is None:
194
+ self._default_modes = []
195
+ self._default_modes.append(mode)
196
+
197
+ def remove_default_mode(self, mode: str):
198
+ if self._default_modes is None:
199
+ self._default_modes = []
200
+ self._default_modes.remove(mode)
201
+
209
202
  def set_default_summarization_prompt(self, summarization_prompt: str):
210
203
  self._default_summarization_prompt = summarization_prompt
211
204
 
@@ -1,5 +1,3 @@
1
- # Special Instructions for Software Engineering
2
-
3
1
  When the user's request involves writing or modifying code, you MUST follow these domain-specific rules in addition to your core workflow.
4
2
 
5
3
  ## 1. Critical Prohibitions
@@ -1,5 +1,3 @@
1
- # Special Instructions for Content Creation & Management
2
-
3
1
  When the user's request involves creating, refining, or organizing textual content, you MUST follow these domain-specific rules in addition to your core workflow.
4
2
 
5
3
  ## 1. Core Principles
@@ -1,5 +1,3 @@
1
- # Special Instructions for Research, Analysis, and Summarization
2
-
3
1
  When the user's request involves finding, synthesizing, or analyzing information, you MUST follow these domain-specific rules in addition to your core workflow.
4
2
 
5
3
  ## 1. Core Principles
zrb/task/llm/prompt.py CHANGED
@@ -3,11 +3,12 @@ import platform
3
3
  import re
4
4
  from datetime import datetime, timezone
5
5
 
6
- from zrb.attr.type import StrAttr
6
+ from zrb.attr.type import StrAttr, StrListAttr
7
7
  from zrb.config.llm_config import llm_config as llm_config
8
+ from zrb.config.llm_context.config import llm_context_config
8
9
  from zrb.context.any_context import AnyContext
9
10
  from zrb.task.llm.conversation_history_model import ConversationHistory
10
- from zrb.util.attr import get_attr, get_str_attr
11
+ from zrb.util.attr import get_attr, get_str_attr, get_str_list_attr
11
12
  from zrb.util.file import read_dir, read_file_with_line_numbers
12
13
  from zrb.util.llm.prompt import make_prompt_section
13
14
 
@@ -15,13 +16,14 @@ from zrb.util.llm.prompt import make_prompt_section
15
16
  def get_persona(
16
17
  ctx: AnyContext,
17
18
  persona_attr: StrAttr | None,
19
+ render_persona: bool,
18
20
  ) -> str:
19
21
  """Gets the persona, prioritizing task-specific, then default."""
20
22
  persona = get_attr(
21
23
  ctx,
22
24
  persona_attr,
23
25
  None,
24
- auto_render=False,
26
+ auto_render=render_persona,
25
27
  )
26
28
  if persona is not None:
27
29
  return persona
@@ -31,13 +33,14 @@ def get_persona(
31
33
  def get_base_system_prompt(
32
34
  ctx: AnyContext,
33
35
  system_prompt_attr: StrAttr | None,
36
+ render_system_prompt: bool,
34
37
  ) -> str:
35
38
  """Gets the base system prompt, prioritizing task-specific, then default."""
36
39
  system_prompt = get_attr(
37
40
  ctx,
38
41
  system_prompt_attr,
39
42
  None,
40
- auto_render=False,
43
+ auto_render=render_system_prompt,
41
44
  )
42
45
  if system_prompt is not None:
43
46
  return system_prompt
@@ -47,33 +50,95 @@ def get_base_system_prompt(
47
50
  def get_special_instruction_prompt(
48
51
  ctx: AnyContext,
49
52
  special_instruction_prompt_attr: StrAttr | None,
53
+ render_spcecial_instruction_prompt: bool,
50
54
  ) -> str:
51
55
  """Gets the special instruction prompt, prioritizing task-specific, then default."""
52
56
  special_instruction = get_attr(
53
57
  ctx,
54
58
  special_instruction_prompt_attr,
55
59
  None,
56
- auto_render=False,
60
+ auto_render=render_spcecial_instruction_prompt,
57
61
  )
58
62
  if special_instruction is not None:
59
63
  return special_instruction
60
64
  return llm_config.default_special_instruction_prompt
61
65
 
62
66
 
67
+ def get_modes(
68
+ ctx: AnyContext,
69
+ modes_attr: StrAttr | None,
70
+ render_modes: bool,
71
+ ) -> str:
72
+ """Gets the modes, prioritizing task-specific, then default."""
73
+ raw_modes = get_str_list_attr(
74
+ ctx,
75
+ modes_attr,
76
+ auto_render=render_modes,
77
+ )
78
+ modes = [mode.strip() for mode in raw_modes if mode.strip() != ""]
79
+ if len(modes) > 0:
80
+ return modes
81
+ return llm_config.default_modes or []
82
+
83
+
84
+ def get_workflow_prompt(
85
+ ctx: AnyContext,
86
+ modes_attr: StrAttr | None,
87
+ render_modes: bool,
88
+ ) -> str:
89
+ modes = get_modes(ctx, modes_attr, render_modes)
90
+ # Get user-defined workflows
91
+ workflows = {
92
+ workflow_name: content
93
+ for workflow_name, content in llm_context_config.get_workflows().items()
94
+ if workflow_name in modes
95
+ }
96
+ # Get requested builtin-workflow names
97
+ requested_builtin_workflow_names = [
98
+ workflow_name
99
+ for workflow_name in ("coding", "copywriting", "researching")
100
+ if workflow_name in modes and workflow_name not in workflows
101
+ ]
102
+ # add builtin-workflows if requested
103
+ if len(requested_builtin_workflow_names) > 0:
104
+ dir_path = os.path.dirname(__file__)
105
+ for workflow_name in requested_builtin_workflow_names:
106
+ workflow_file_path = os.path.join(
107
+ dir_path, "default_workflow", f"{workflow_name}.md"
108
+ )
109
+ with open(workflow_file_path, "r") as f:
110
+ workflows[workflow_name] = f.read()
111
+ return "\n".join(
112
+ [
113
+ make_prompt_section(header.capitalize(), content)
114
+ for header, content in workflows.items()
115
+ if header.lower() in modes
116
+ ]
117
+ )
118
+
119
+
63
120
  def get_system_and_user_prompt(
64
121
  ctx: AnyContext,
65
122
  user_message: str,
66
123
  persona_attr: StrAttr | None = None,
124
+ render_persona: bool = False,
67
125
  system_prompt_attr: StrAttr | None = None,
126
+ render_system_prompt: bool = False,
68
127
  special_instruction_prompt_attr: StrAttr | None = None,
128
+ render_special_instruction_prompt: bool = False,
129
+ modes_attr: StrListAttr | None = None,
130
+ render_modes: bool = False,
69
131
  conversation_history: ConversationHistory | None = None,
70
132
  ) -> tuple[str, str]:
71
133
  """Combines persona, base system prompt, and special instructions."""
72
- persona = get_persona(ctx, persona_attr)
73
- base_system_prompt = get_base_system_prompt(ctx, system_prompt_attr)
74
- special_instruction = get_special_instruction_prompt(
75
- ctx, special_instruction_prompt_attr
134
+ persona = get_persona(ctx, persona_attr, render_persona)
135
+ base_system_prompt = get_base_system_prompt(
136
+ ctx, system_prompt_attr, render_system_prompt
76
137
  )
138
+ special_instruction_prompt = get_special_instruction_prompt(
139
+ ctx, special_instruction_prompt_attr, render_special_instruction_prompt
140
+ )
141
+ workflow_prompt = get_workflow_prompt(ctx, modes_attr, render_modes)
77
142
  if conversation_history is None:
78
143
  conversation_history = ConversationHistory()
79
144
  conversation_context, new_user_message = extract_conversation_context(user_message)
@@ -81,7 +146,8 @@ def get_system_and_user_prompt(
81
146
  [
82
147
  make_prompt_section("Persona", persona),
83
148
  make_prompt_section("System Prompt", base_system_prompt),
84
- make_prompt_section("Special Instruction", special_instruction),
149
+ make_prompt_section("Special Instruction", special_instruction_prompt),
150
+ make_prompt_section("Special Workflows", workflow_prompt),
85
151
  make_prompt_section(
86
152
  "Past Conversation",
87
153
  "\n".join(
@@ -194,30 +260,15 @@ def get_user_message(
194
260
  def get_summarization_system_prompt(
195
261
  ctx: AnyContext,
196
262
  summarization_prompt_attr: StrAttr | None,
263
+ render_summarization_prompt: bool,
197
264
  ) -> str:
198
265
  """Gets the summarization prompt, rendering if configured and handling defaults."""
199
266
  summarization_prompt = get_attr(
200
267
  ctx,
201
268
  summarization_prompt_attr,
202
269
  None,
203
- auto_render=False,
270
+ auto_render=render_summarization_prompt,
204
271
  )
205
272
  if summarization_prompt is not None:
206
273
  return summarization_prompt
207
274
  return llm_config.default_summarization_prompt
208
-
209
-
210
- def get_context_enrichment_prompt(
211
- ctx: AnyContext,
212
- context_enrichment_prompt_attr: StrAttr | None,
213
- ) -> str:
214
- """Gets the context enrichment prompt, rendering if configured and handling defaults."""
215
- context_enrichment_prompt = get_attr(
216
- ctx,
217
- context_enrichment_prompt_attr,
218
- None,
219
- auto_render=False,
220
- )
221
- if context_enrichment_prompt is not None:
222
- return context_enrichment_prompt
223
- return llm_config.default_context_enrichment_prompt
@@ -5,9 +5,12 @@ import typing
5
5
  from collections.abc import Callable
6
6
  from typing import TYPE_CHECKING
7
7
 
8
+ from zrb.config.config import CFG
8
9
  from zrb.context.any_context import AnyContext
9
10
  from zrb.task.llm.error import ToolExecutionError
11
+ from zrb.util.callable import get_callable_name
10
12
  from zrb.util.run import run_async
13
+ from zrb.util.string.conversion import to_boolean
11
14
 
12
15
  if TYPE_CHECKING:
13
16
  from pydantic_ai import Tool
@@ -71,13 +74,11 @@ def _create_wrapper(
71
74
  async def wrapper(*args, **kwargs):
72
75
  # Identify AnyContext parameter name from the original signature if needed
73
76
  any_context_param_name = None
74
-
75
77
  if needs_any_context_for_injection:
76
78
  for param in original_sig.parameters.values():
77
79
  if _is_annotated_with_context(param.annotation, AnyContext):
78
80
  any_context_param_name = param.name
79
81
  break # Found it, no need to continue
80
-
81
82
  if any_context_param_name is None:
82
83
  # This should not happen if needs_any_context_for_injection is True,
83
84
  # but check for safety
@@ -87,24 +88,22 @@ def _create_wrapper(
87
88
  # Inject the captured ctx into kwargs. This will overwrite if the LLM
88
89
  # somehow provided it.
89
90
  kwargs[any_context_param_name] = ctx
90
-
91
91
  # If the dummy argument was added for schema generation and is present in kwargs,
92
92
  # remove it before calling the original function, unless the original function
93
93
  # actually expects a parameter named '_dummy'.
94
94
  if "_dummy" in kwargs and "_dummy" not in original_sig.parameters:
95
95
  del kwargs["_dummy"]
96
-
97
96
  try:
98
- # Call the original function.
99
- # pydantic-ai is responsible for injecting RunContext if takes_ctx is True.
100
- # Our wrapper injects AnyContext if needed.
101
- # The arguments received by the wrapper (*args, **kwargs) are those
102
- # provided by the LLM, potentially with RunContext already injected by
103
- # pydantic-ai if takes_ctx is True. We just need to ensure AnyContext
104
- # is injected if required by the original function.
105
- # The dummy argument handling is moved to _adjust_signature's logic
106
- # for schema generation, it's not needed here before calling the actual
107
- # function.
97
+ if not CFG.LLM_YOLO_MODE and not ctx.is_web_mode and ctx.is_tty:
98
+ func_name = get_callable_name(func)
99
+ ctx.print(f"✅ >> Allow to run tool: {func_name} (Y/n)", plain=True)
100
+ user_confirmation_str = await _read_line()
101
+ user_confirmation = to_boolean(user_confirmation_str)
102
+ if not user_confirmation:
103
+ ctx.print("❌ >> Why?", plain=True)
104
+ reason = await _read_line()
105
+ ctx.print("", plain=True)
106
+ raise ValueError(f"User disapproval: {reason}")
108
107
  return await run_async(func(*args, **kwargs))
109
108
  except Exception as e:
110
109
  error_model = ToolExecutionError(
@@ -118,6 +117,13 @@ def _create_wrapper(
118
117
  return wrapper
119
118
 
120
119
 
120
+ async def _read_line():
121
+ from prompt_toolkit import PromptSession
122
+
123
+ reader = PromptSession()
124
+ return await reader.prompt_async()
125
+
126
+
121
127
  def _adjust_signature(
122
128
  wrapper: Callable, original_sig: inspect.Signature, takes_no_args: bool
123
129
  ):
zrb/task/llm_task.py CHANGED
@@ -2,7 +2,7 @@ import json
2
2
  from collections.abc import Callable
3
3
  from typing import TYPE_CHECKING, Any
4
4
 
5
- from zrb.attr.type import BoolAttr, IntAttr, StrAttr, fstring
5
+ from zrb.attr.type import BoolAttr, IntAttr, StrAttr, StrListAttr, fstring
6
6
  from zrb.config.llm_rate_limitter import LLMRateLimiter
7
7
  from zrb.context.any_context import AnyContext
8
8
  from zrb.context.any_shared_context import AnySharedContext
@@ -16,14 +16,12 @@ from zrb.task.llm.config import (
16
16
  get_model_settings,
17
17
  )
18
18
  from zrb.task.llm.conversation_history import (
19
- ListOfDict,
20
19
  read_conversation_history,
21
20
  write_conversation_history,
22
21
  )
23
22
  from zrb.task.llm.conversation_history_model import ConversationHistory
24
23
  from zrb.task.llm.history_summarization import maybe_summarize_history
25
24
  from zrb.task.llm.prompt import (
26
- get_context_enrichment_prompt,
27
25
  get_summarization_system_prompt,
28
26
  get_system_and_user_prompt,
29
27
  get_user_message,
@@ -65,16 +63,15 @@ class LLMTask(BaseTask):
65
63
  ) = None,
66
64
  agent: "Agent | Callable[[AnySharedContext], Agent] | None" = None,
67
65
  persona: StrAttr | None = None,
66
+ render_persona: bool = False,
68
67
  system_prompt: StrAttr | None = None,
68
+ render_system_prompt: bool = False,
69
69
  special_instruction_prompt: StrAttr | None = None,
70
+ render_special_instruction_prompt: bool = False,
71
+ modes: StrListAttr | None = None,
72
+ render_modes: bool = True,
70
73
  message: StrAttr | None = None,
71
74
  render_message: bool = True,
72
- enrich_context: BoolAttr | None = None,
73
- render_enrich_context: bool = True,
74
- context_enrichment_prompt: StrAttr | None = None,
75
- render_context_enrichment_prompt: bool = True,
76
- context_enrichment_token_threshold: IntAttr | None = None,
77
- render_context_enrichment_token_threshold: bool = True,
78
75
  tools: (
79
76
  list["ToolOrCallable"]
80
77
  | Callable[[AnySharedContext], list["ToolOrCallable"]]
@@ -100,6 +97,7 @@ class LLMTask(BaseTask):
100
97
  summarize_history: BoolAttr | None = None,
101
98
  render_summarize_history: bool = True,
102
99
  summarization_prompt: StrAttr | None = None,
100
+ render_summarization_prompt: bool = False,
103
101
  history_summarization_token_threshold: IntAttr | None = None,
104
102
  render_history_summarization_token_threshold: bool = True,
105
103
  rate_limitter: LLMRateLimiter | None = None,
@@ -150,19 +148,17 @@ class LLMTask(BaseTask):
150
148
  self._model_settings = model_settings
151
149
  self._agent = agent
152
150
  self._persona = persona
151
+ self._render_persona = render_persona
153
152
  self._system_prompt = system_prompt
153
+ self._render_system_prompt = render_system_prompt
154
154
  self._special_instruction_prompt = special_instruction_prompt
155
+ self._render_special_instruction_prompt = render_special_instruction_prompt
156
+ self._modes = modes
157
+ self._render_modes = render_modes
155
158
  self._message = message
156
159
  self._render_message = render_message
157
160
  self._summarization_prompt = summarization_prompt
158
- self._should_enrich_context = enrich_context
159
- self._render_enrich_context = render_enrich_context
160
- self._context_enrichment_prompt = context_enrichment_prompt
161
- self._render_context_enrichment_prompt = render_context_enrichment_prompt
162
- self._context_enrichment_token_threshold = context_enrichment_token_threshold
163
- self._render_context_enrichment_token_threshold = (
164
- render_context_enrichment_token_threshold
165
- )
161
+ self._render_summarization_prompt = render_summarization_prompt
166
162
  self._tools = tools
167
163
  self._rate_limitter = rate_limitter
168
164
  self._additional_tools: list["ToolOrCallable"] = []
@@ -198,12 +194,6 @@ class LLMTask(BaseTask):
198
194
  for single_mcp_server in mcp_server:
199
195
  self._additional_mcp_servers.append(single_mcp_server)
200
196
 
201
- def set_should_enrich_context(self, enrich_context: bool):
202
- self._should_enrich_context = enrich_context
203
-
204
- def set_context_enrichment_token_threshold(self, enrichment_token_threshold: int):
205
- self._context_enrichment_token_threshold = enrichment_token_threshold
206
-
207
197
  def set_should_summarize_history(self, summarize_history: bool):
208
198
  self._should_summarize_history = summarize_history
209
199
 
@@ -227,6 +217,7 @@ class LLMTask(BaseTask):
227
217
  summarization_prompt = get_summarization_system_prompt(
228
218
  ctx=ctx,
229
219
  summarization_prompt_attr=self._summarization_prompt,
220
+ render_summarization_prompt=self._render_summarization_prompt,
230
221
  )
231
222
  user_message = get_user_message(ctx, self._message, self._render_message)
232
223
  # 1. Prepare initial state (read history from previous session)
@@ -243,8 +234,13 @@ class LLMTask(BaseTask):
243
234
  ctx=ctx,
244
235
  user_message=user_message,
245
236
  persona_attr=self._persona,
237
+ render_persona=self._render_persona,
246
238
  system_prompt_attr=self._system_prompt,
239
+ render_system_prompt=self._render_system_prompt,
247
240
  special_instruction_prompt_attr=self._special_instruction_prompt,
241
+ render_special_instruction_prompt=self._render_special_instruction_prompt,
242
+ modes_attr=self._modes,
243
+ render_modes=self._render_modes,
248
244
  conversation_history=conversation_history,
249
245
  )
250
246
  # 3. Get the agent instance
zrb/util/callable.py ADDED
@@ -0,0 +1,23 @@
1
+ from types import BuiltinMethodType, MethodType
2
+
3
+
4
+ def get_callable_name(obj):
5
+ import functools
6
+ import inspect
7
+
8
+ # 1. Unwrap decorated functions
9
+ obj = inspect.unwrap(obj, stop=lambda f: not hasattr(f, "__wrapped__"))
10
+ # 2. functools.partial – delegate to the wrapped function
11
+ if isinstance(obj, functools.partial):
12
+ return get_callable_name(obj.func)
13
+ # 3. Plain functions, built‑ins, methods
14
+ if hasattr(obj, "__name__"):
15
+ return obj.__name__
16
+ # 4. Bound or unbound methods of a class
17
+ if isinstance(obj, (MethodType, BuiltinMethodType)):
18
+ return obj.__func__.__name__
19
+ # 5. Instances of classes defining __call__
20
+ if callable(obj):
21
+ return type(obj).__name__
22
+ # 6. Fallback
23
+ return repr(obj)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: zrb
3
- Version: 1.11.0
3
+ Version: 1.12.0
4
4
  Summary: Your Automation Powerhouse
5
5
  Home-page: https://github.com/state-alchemists/zrb
6
6
  License: AGPL-3.0-or-later
@@ -9,10 +9,10 @@ zrb/builtin/git_subtree.py,sha256=7BKwOkVTWDrR0DXXQ4iJyHqeR6sV5VYRt8y_rEB0EHg,35
9
9
  zrb/builtin/group.py,sha256=t008xLM4_fgbjfZrPoi_fQAnSHIo6MOiQSCHBO4GDYU,2379
10
10
  zrb/builtin/http.py,sha256=sLqEczuSxGYXWzyJR6frGOHkPTviu4BeyroUr3-ZuAI,4322
11
11
  zrb/builtin/jwt.py,sha256=3M5uaQhJZbKQLjTUft1OwPz_JxtmK-xtkjxWjciOQho,2859
12
- zrb/builtin/llm/chat_session.py,sha256=8Ux5xfgNsmTVFS9XVfSjz_AVkl3jn5gYY-P2m5ZAB5c,8466
12
+ zrb/builtin/llm/chat_session.py,sha256=u8bW67uKCq22hVv4ZkOsKIZxBeOdKtJh4Bjyy552RM4,9424
13
13
  zrb/builtin/llm/history.py,sha256=LDOrL0p7r_AHLa5L8Dp7bHNsOALugmJd7OguXRWGnm4,3087
14
14
  zrb/builtin/llm/input.py,sha256=Nw-26uTWp2QhUgKJcP_IMHmtk-b542CCSQ_vCOjhvhM,877
15
- zrb/builtin/llm/llm_ask.py,sha256=oozfQwa1i2PnXV4qWbn60Pmd3fS0kgmhYCbfKlhr25o,4549
15
+ zrb/builtin/llm/llm_ask.py,sha256=18XAxyPWF7daE0TZkRkRt8opmqLUjhpM3oMVdOP-qWY,4857
16
16
  zrb/builtin/llm/previous-session.js,sha256=xMKZvJoAbrwiyHS0OoPrWuaKxWYLoyR5sguePIoCjTY,816
17
17
  zrb/builtin/llm/tool/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
18
18
  zrb/builtin/llm/tool/api.py,sha256=OhmfLc2TwWKQYIMweGelqb5s4JF4nB-YynbSO4yb_Jk,2342
@@ -217,7 +217,7 @@ zrb/callback/callback.py,sha256=PFhCqzfxdk6IAthmXcZ13DokT62xtBzJr_ciLw6I8Zg,4030
217
217
  zrb/cmd/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
218
218
  zrb/cmd/cmd_result.py,sha256=L8bQJzWCpcYexIxHBNsXj2pT3BtLmWex0iJSMkvimOA,597
219
219
  zrb/cmd/cmd_val.py,sha256=7Doowyg6BK3ISSGBLt-PmlhzaEkBjWWm51cED6fAUOQ,1014
220
- zrb/config/config.py,sha256=srXMnSQ9rjIYnX4lG1QmvIM0cM-oa92n4zqMnD6E_FM,12187
220
+ zrb/config/config.py,sha256=d_F-hdPLADjeVRHtnpOxtOkfUBu5huSLclyD53uxO4U,12306
221
221
  zrb/config/default_prompt/file_extractor_system_prompt.md,sha256=tmeZMPzF9MGExsZZw7M2PZN6V0oFVRp1nIjiqUPvQ9M,1013
222
222
  zrb/config/default_prompt/interactive_system_prompt.md,sha256=NlG5cQ4imEGF9CIRwqH03UZ5XRtqLu1gIin3nBDtQlI,2795
223
223
  zrb/config/default_prompt/persona.md,sha256=WU4JKp-p7qJePDA6NZ_CYdBggo2B3PEq8IEnNVblIHU,41
@@ -225,10 +225,7 @@ zrb/config/default_prompt/repo_extractor_system_prompt.md,sha256=EGZ-zj78RlMEg2j
225
225
  zrb/config/default_prompt/repo_summarizer_system_prompt.md,sha256=fpG5B416OK3oE41bWPrh1M6pdH5SSadCPte_NJ_79z0,858
226
226
  zrb/config/default_prompt/summarization_prompt.md,sha256=3-swyZ2m9DQFkaN68kn-AxnFHTcQYqrPSzV3qwT-vw4,2122
227
227
  zrb/config/default_prompt/system_prompt.md,sha256=uRRiVSTs_4s2DYBO-1cPuOGPVkaelA_UuGClLawfw3o,2283
228
- zrb/config/default_workflow/code.md,sha256=aFVBjPwVcpRkWGHKdwDEYvRxfIQyLWaVYfleO68Aj8s,2533
229
- zrb/config/default_workflow/content.md,sha256=Kz_ufOApkPXhhr2R8eyt06b8gVegLc5LDkBlsN6BYV4,2057
230
- zrb/config/default_workflow/research.md,sha256=CSWP-uh3a-pI9DF9sJkzODWESD2wLCGhr1nrMfUjAlw,2205
231
- zrb/config/llm_config.py,sha256=ET2ehVdQQv-dOTXIVx81sdvybF9rprL6VUhAxE6K-Ho,9215
228
+ zrb/config/llm_config.py,sha256=bNLxorctwtVW1F9hA-hEYpDBe7FLSZHC25Nx8NlR4-M,8597
232
229
  zrb/config/llm_context/config.py,sha256=swc3hUaEIoL2MjKtbati13iP0MxveNG_y_6K3nszRAw,2571
233
230
  zrb/config/llm_context/config_handler.py,sha256=oQesfigIM0qMw_A3jUCN0UDJujRjuJ3jr5mXHBiLgB0,8866
234
231
  zrb/config/llm_rate_limitter.py,sha256=P4vR7qxwiGwjlKx2kHcfdIxwGbJB98vdN-UQEH-Q2WU,4894
@@ -353,13 +350,16 @@ zrb/task/llm/agent.py,sha256=A5UoHY-l8WqyptKrf42eHVW_VhMhuYsygs2Z8XNnCzk,6681
353
350
  zrb/task/llm/config.py,sha256=TlyH925_fboIlK2Ixf34tynmenqs9s9rfsnPs4jff78,3490
354
351
  zrb/task/llm/conversation_history.py,sha256=B_PDWYL_q66s0xwWBzMSomqPN6u3gkXlIeXBD5A0Apg,4416
355
352
  zrb/task/llm/conversation_history_model.py,sha256=AU5-M4Ky3X4wII1PMT75VU5OUEG0FjqdHrrpCSl-u6M,10771
353
+ zrb/task/llm/default_workflow/coding.md,sha256=2uythvPsnBpYfIhiIH1cCinQXX0i0yUqsL474Zpemw0,2484
354
+ zrb/task/llm/default_workflow/copywriting.md,sha256=xSO7GeDolwGxiuz6kXsK2GKGpwp8UgtG0yRqTmill_s,1999
355
+ zrb/task/llm/default_workflow/researching.md,sha256=KD-aYHFHir6Ti-4FsBBtGwiI0seSVgleYbKJZi_POXA,2139
356
356
  zrb/task/llm/error.py,sha256=QR-nIohS6pBpC_16cWR-fw7Mevo1sNYAiXMBsh_CJDE,4157
357
357
  zrb/task/llm/history_summarization.py,sha256=BUwBOS51Jzp4psliD_h1jWq-5oHezNbjF1fkn7vbh7o,8109
358
358
  zrb/task/llm/print_node.py,sha256=zocTKi9gZDxl2I6KNu095TmMc13Yip6SNuWYnswS680,4060
359
- zrb/task/llm/prompt.py,sha256=OXoN3ttXmQR8DxoJnKh3vgjaVzl532i3Y567xaXdQs8,7708
360
- zrb/task/llm/tool_wrapper.py,sha256=8_bL8m_WpRf-pVKSrvQIVqT-m2sUA87a1RBQG13lhp4,6457
359
+ zrb/task/llm/prompt.py,sha256=sMipP-NJmq4ZmCtQYEG2mcHWUD79yJRwH7nH-iw-7Z4,9661
360
+ zrb/task/llm/tool_wrapper.py,sha256=N6IuWJXFDcGUJyMJnnWmpJLsqas1QNCEj0MNL3T2nXI,6647
361
361
  zrb/task/llm/typing.py,sha256=c8VAuPBw_4A3DxfYdydkgedaP-LU61W9_wj3m3CAX1E,58
362
- zrb/task/llm_task.py,sha256=TTYb9FYqZX_OIgDE6q5Z9IVuM6NcsKFeCVIi6ovQDE8,13712
362
+ zrb/task/llm_task.py,sha256=Zxmp7c7XOz5_jAX1kzwwNfD9GJ1Tok-C4e_MfqhliNk,13532
363
363
  zrb/task/make_task.py,sha256=PD3b_aYazthS8LHeJsLAhwKDEgdurQZpymJDKeN60u0,2265
364
364
  zrb/task/rsync_task.py,sha256=WfqNSaicJgYWpunNU34eYxXDqHDHOftuDHyWJKjqwg0,6365
365
365
  zrb/task/scaffolder.py,sha256=rME18w1HJUHXgi9eTYXx_T2G4JdqDYzBoNOkdOOo5-o,6806
@@ -370,6 +370,7 @@ zrb/task_status/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
370
370
  zrb/task_status/task_status.py,sha256=blZ8dxg9g_8MuViq-t7yJRLoE7yGUf5srgHf-PCsXNc,3069
371
371
  zrb/util/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
372
372
  zrb/util/attr.py,sha256=5GlYSmVAzbcSFjNDXiqqHqNMR6NWjJ6bUHZXdE35mj8,5359
373
+ zrb/util/callable.py,sha256=b6OFXbCXp2twow3wh2E_h5hNHLs2pXaLfGQz4iVyiQc,771
373
374
  zrb/util/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
374
375
  zrb/util/cli/style.py,sha256=D_548KG1gXEirQGdkAVTc81vBdCeInXtnG1gV1yabBA,6655
375
376
  zrb/util/cli/subcommand.py,sha256=umTZIlrL-9g-qc_eRRgdaQgK-whvXK1roFfvnbuY7NQ,1753
@@ -405,7 +406,7 @@ zrb/util/todo.py,sha256=r9_KYF2-hLKMNjsp6AFK9zivykMrywd-kJ4bCwfdafI,19323
405
406
  zrb/util/todo_model.py,sha256=hhzAX-uFl5rsg7iVX1ULlJOfBtblwQ_ieNUxBWfc-Os,1670
406
407
  zrb/xcom/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
407
408
  zrb/xcom/xcom.py,sha256=o79rxR9wphnShrcIushA0Qt71d_p3ZTxjNf7x9hJB78,1571
408
- zrb-1.11.0.dist-info/METADATA,sha256=pcdwF1Z7DIfcE3qZxe7vnYY1gbNRKjiKaLdw9oSbkuA,9778
409
- zrb-1.11.0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
410
- zrb-1.11.0.dist-info/entry_points.txt,sha256=-Pg3ElWPfnaSM-XvXqCxEAa-wfVI6BEgcs386s8C8v8,46
411
- zrb-1.11.0.dist-info/RECORD,,
409
+ zrb-1.12.0.dist-info/METADATA,sha256=ZegE-xKhBfEIGj-PXDaNKUmoQsJgWYR6_4E0V4-2Awk,9778
410
+ zrb-1.12.0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
411
+ zrb-1.12.0.dist-info/entry_points.txt,sha256=-Pg3ElWPfnaSM-XvXqCxEAa-wfVI6BEgcs386s8C8v8,46
412
+ zrb-1.12.0.dist-info/RECORD,,
File without changes