zrb 1.11.0__py3-none-any.whl → 1.13.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -25,6 +25,7 @@ async def read_user_prompt(ctx: AnyContext) -> str:
25
25
  is_tty = ctx.is_tty
26
26
  reader = await _setup_input_reader(is_tty)
27
27
  multiline_mode = False
28
+ current_modes = ctx.input.modes
28
29
  user_inputs = []
29
30
  while True:
30
31
  await asyncio.sleep(0.01)
@@ -38,7 +39,9 @@ async def read_user_prompt(ctx: AnyContext) -> str:
38
39
  if user_input.strip().lower() in ("/bye", "/quit", "/q", "/exit"):
39
40
  user_prompt = "\n".join(user_inputs)
40
41
  user_inputs = []
41
- result = await _trigger_ask_and_wait_for_result(ctx, user_prompt)
42
+ result = await _trigger_ask_and_wait_for_result(
43
+ ctx, user_prompt, current_modes
44
+ )
42
45
  if result is not None:
43
46
  final_result = result
44
47
  break
@@ -49,9 +52,18 @@ async def read_user_prompt(ctx: AnyContext) -> str:
49
52
  multiline_mode = False
50
53
  user_prompt = "\n".join(user_inputs)
51
54
  user_inputs = []
52
- result = await _trigger_ask_and_wait_for_result(ctx, user_prompt)
55
+ result = await _trigger_ask_and_wait_for_result(
56
+ ctx, user_prompt, current_modes
57
+ )
53
58
  if result is not None:
54
59
  final_result = result
60
+ elif user_input.strip().lower().startswith("/mode"):
61
+ mode_parts = user_input.split(" ", maxsplit=2)
62
+ if len(mode_parts) > 1:
63
+ current_modes = mode_parts[1]
64
+ ctx.print(f"Current mode: {current_modes}", plain=True)
65
+ ctx.print("", plain=True)
66
+ continue
55
67
  elif user_input.strip().lower() in ("/help", "/info"):
56
68
  _show_info(ctx)
57
69
  continue
@@ -61,7 +73,9 @@ async def read_user_prompt(ctx: AnyContext) -> str:
61
73
  continue
62
74
  user_prompt = "\n".join(user_inputs)
63
75
  user_inputs = []
64
- result = await _trigger_ask_and_wait_for_result(ctx, user_prompt)
76
+ result = await _trigger_ask_and_wait_for_result(
77
+ ctx, user_prompt, current_modes
78
+ )
65
79
  if result is not None:
66
80
  final_result = result
67
81
  return final_result
@@ -74,16 +88,26 @@ def _show_info(ctx: AnyContext):
74
88
  ctx: The context object for the task.
75
89
  """
76
90
  ctx.print(
77
- (
78
- f" {stylize_bold_yellow('/bye')} {stylize_faint('Quit from chat session')}\n"
79
- f" {stylize_bold_yellow('/multi')} {stylize_faint('Start multiline input')}\n"
80
- f" {stylize_bold_yellow('/end')} {stylize_faint('End multiline input')}\n"
81
- f" {stylize_bold_yellow('/help')} {stylize_faint('Show this message')}\n"
91
+ "\n".join(
92
+ [
93
+ _format_info_line("/bye", "Quit from chat session"),
94
+ _format_info_line("/multi", "Start multiline input"),
95
+ _format_info_line("/end", "End multiline input"),
96
+ _format_info_line("/modes", "Show current modes"),
97
+ _format_info_line("/modes <mode1,mode2,..>", "Set current modes"),
98
+ _format_info_line("/help", "Show this message"),
99
+ ]
82
100
  ),
83
101
  plain=True,
84
102
  )
85
103
 
86
104
 
105
+ def _format_info_line(command: str, description: str) -> str:
106
+ styled_command = stylize_bold_yellow(command.ljust(25))
107
+ styled_description = stylize_faint(description)
108
+ return f" {styled_command} {styled_description}"
109
+
110
+
87
111
  async def _handle_initial_message(ctx: AnyContext) -> str:
88
112
  """Processes the initial message from the command line."""
89
113
  if not ctx.input.message or ctx.input.message.strip() == "":
@@ -94,6 +118,7 @@ async def _handle_initial_message(ctx: AnyContext) -> str:
94
118
  result = await _trigger_ask_and_wait_for_result(
95
119
  ctx,
96
120
  user_prompt=ctx.input.message,
121
+ modes=ctx.input.modes,
97
122
  previous_session_name=ctx.input.previous_session,
98
123
  start_new=ctx.input.start_new,
99
124
  )
@@ -131,6 +156,7 @@ async def _read_next_line(is_interactive: bool, reader, ctx: AnyContext) -> str:
131
156
  async def _trigger_ask_and_wait_for_result(
132
157
  ctx: AnyContext,
133
158
  user_prompt: str,
159
+ modes: str,
134
160
  previous_session_name: str | None = None,
135
161
  start_new: bool = False,
136
162
  ) -> str | None:
@@ -148,7 +174,7 @@ async def _trigger_ask_and_wait_for_result(
148
174
  """
149
175
  if user_prompt.strip() == "":
150
176
  return None
151
- await _trigger_ask(ctx, user_prompt, previous_session_name, start_new)
177
+ await _trigger_ask(ctx, user_prompt, modes, previous_session_name, start_new)
152
178
  result = await _wait_ask_result(ctx)
153
179
  md_result = _render_markdown(result) if result is not None else ""
154
180
  ctx.print("\n🤖 >>", plain=True)
@@ -193,12 +219,14 @@ def get_llm_ask_input_mapping(callback_ctx: AnyContext):
193
219
  "start-new": data.get("start_new"),
194
220
  "previous-session": data.get("previous_session_name"),
195
221
  "message": data.get("message"),
222
+ "modes": data.get("modes"),
196
223
  }
197
224
 
198
225
 
199
226
  async def _trigger_ask(
200
227
  ctx: AnyContext,
201
228
  user_prompt: str,
229
+ modes: str,
202
230
  previous_session_name: str | None = None,
203
231
  start_new: bool = False,
204
232
  ):
@@ -218,6 +246,7 @@ async def _trigger_ask(
218
246
  "previous_session_name": previous_session_name,
219
247
  "start_new": start_new,
220
248
  "message": user_prompt,
249
+ "modes": modes,
221
250
  }
222
251
  )
223
252
 
@@ -65,6 +65,14 @@ _llm_ask_inputs = [
65
65
  allow_positional_parsing=False,
66
66
  always_prompt=False,
67
67
  ),
68
+ TextInput(
69
+ "modes",
70
+ description="Modes",
71
+ prompt="Modes",
72
+ default="coding",
73
+ allow_positional_parsing=False,
74
+ always_prompt=False,
75
+ ),
68
76
  BoolInput(
69
77
  "start-new",
70
78
  description="Start new conversation (LLM will forget everything)",
@@ -101,6 +109,9 @@ llm_ask: LLMTask = llm_group.add_task(
101
109
  system_prompt=lambda ctx: (
102
110
  None if ctx.input.system_prompt.strip() == "" else ctx.input.system_prompt
103
111
  ),
112
+ modes=lambda ctx: (
113
+ None if ctx.input.modes.strip() == "" else ctx.input.modes.split(",")
114
+ ),
104
115
  message="{ctx.input.message}",
105
116
  retries=0,
106
117
  ),
@@ -9,10 +9,10 @@ from zrb.task.llm.config import get_model, get_model_settings
9
9
  from zrb.task.llm.prompt import get_system_and_user_prompt
10
10
 
11
11
  if TYPE_CHECKING:
12
- from pydantic_ai import Tool
13
- from pydantic_ai.mcp import MCPServer
12
+ from pydantic_ai import Agent, Tool
14
13
  from pydantic_ai.models import Model
15
14
  from pydantic_ai.settings import ModelSettings
15
+ from pydantic_ai.toolsets import AbstractToolset
16
16
 
17
17
  ToolOrCallable = Tool | Callable
18
18
  else:
@@ -26,7 +26,7 @@ def create_sub_agent_tool(
26
26
  model: "str | Model | None" = None,
27
27
  model_settings: "ModelSettings | None" = None,
28
28
  tools: list[ToolOrCallable] = [],
29
- mcp_servers: list["MCPServer"] = [],
29
+ toolsets: list["AbstractToolset[Agent]"] = [],
30
30
  ) -> Callable[[AnyContext, str], Coroutine[Any, Any, str]]:
31
31
  """
32
32
  Creates a "tool that is another AI agent," capable of handling complex, multi-step sub-tasks.
@@ -42,7 +42,7 @@ def create_sub_agent_tool(
42
42
  model (str | Model, optional): The language model the sub-agent will use.
43
43
  model_settings (ModelSettings, optional): Specific settings for the sub-agent's model.
44
44
  tools (list, optional): A list of tools that will be exclusively available to the sub-agent.
45
- mcp_servers (list, optional): A list of MCP servers for the sub-agent.
45
+ toolsets (list, optional): A list of Toolset for the sub-agent.
46
46
 
47
47
  Returns:
48
48
  Callable: An asynchronous function that serves as the sub-agent tool. When called, it runs the sub-agent with a given query and returns its final result.
@@ -85,7 +85,7 @@ def create_sub_agent_tool(
85
85
  system_prompt=resolved_system_prompt,
86
86
  model_settings=resolved_model_settings,
87
87
  tools=tools,
88
- mcp_servers=mcp_servers,
88
+ toolsets=toolsets,
89
89
  )
90
90
 
91
91
  sub_agent_run = None
zrb/config/config.py CHANGED
@@ -287,6 +287,10 @@ class Config:
287
287
  """Number of seconds to sleep when throttling is required."""
288
288
  return float(os.getenv("ZRB_LLM_THROTTLE_SLEEP", "1.0"))
289
289
 
290
+ @property
291
+ def LLM_YOLO_MODE(self) -> bool:
292
+ return to_boolean(os.getenv("ZRB_LLM_YOLO_MODE", "false"))
293
+
290
294
  @property
291
295
  def LLM_SUMMARIZE_HISTORY(self) -> bool:
292
296
  return to_boolean(os.getenv("ZRB_LLM_SUMMARIZE_HISTORY", "true"))
@@ -21,10 +21,14 @@ You are an expert AI agent in a CLI. You MUST follow this workflow for this inte
21
21
  * **CRITICAL:** Immediately after execution, you MUST use a tool to verify the outcome (e.g., after `write_file`, use `read_file`; after `rm`, use `ls` to confirm absence).
22
22
 
23
23
  4. **Handle Errors (The Debugging Loop):**
24
- * If a tool call fails, you MUST NOT give up. You MUST enter a persistent debugging loop until the error is resolved.
25
- 1. **Analyze:** Scrutinize the complete error message (`stdout` and `stderr`).
26
- 2. **Hypothesize:** State a clear, specific hypothesis about the root cause.
27
- 3. **Act:** Propose and execute a concrete, single next step to fix the issue.
24
+ * If an action fails, you MUST NOT give up. You MUST enter a persistent debugging loop until the error is resolved.
25
+ 1. **Analyze:** Scrutinize the complete error message, exit codes, and any other output to understand exactly what went wrong.
26
+ 2. **Hypothesize:** State a clear, specific hypothesis about the root cause. For example, "The operation failed because the file path was incorrect," "The command failed because a required argument was missing," or "The test failed because the code has a logical error."
27
+ 3. **Strategize and Correct:** Formulate a new action that directly addresses the hypothesis. Do not simply repeat the failed action. Your correction strategy MUST be logical and informed by the analysis. For example:
28
+ * If a path is wrong, take action to discover the correct path.
29
+ * If a command is malformed, correct its syntax or arguments.
30
+ * If an operation failed due to invalid state (e.g., unexpected file content, a logical bug in code), take action to inspect the current state and then formulate a targeted fix.
31
+ 4. **Execute** the corrected action.
28
32
  * **CRITICAL:** Do not ask the user for help or report the failure until you have exhausted all reasonable attempts to fix it yourself. If the user provides a vague follow-up like "try again," you MUST use the context of the previous failure to inform your next action, not just repeat the failed command.
29
33
 
30
34
  5. **Report Results:**
@@ -1,42 +1,16 @@
1
- You are a silent AI tool. Your ONLY job is to call tools to update the conversation memory based on the `Recent Conversation (JSON)`. Your response MUST be only tool calls.
2
-
3
- ---
4
-
5
- ### **1. Factual Notes**
6
-
7
- **Goal:** Extract permanent facts. Do NOT log activities.
8
- * **Good Fact:** `User prefers Python.`
9
- * **Bad Activity:** `User ran tests.`
10
- * **Action:** Use `add_long_term_info` for global facts and `add_contextual_info` for project facts. **Only add *new* facts from the `Recent Conversation` that are not already present in the `Factual Notes`.**
11
-
12
- ---
13
-
14
- ### **2. Transcript**
15
-
16
- **Goal:** Create a verbatim log of the last ~4 turns.
17
- * **Format:** `[YYYY-MM-DD HH:MM:SS UTC+Z] Role: Message` or `[YYYY-MM-DD UTC+Z] Role: (calling ToolName)`
18
- * **Example:**
19
- ```
20
- [2025-07-19 10:00:01 UTC+7] User: Please create a file named todo.py.
21
- [2025-07-19 10:00:15 UTC+7] Assistant: (calling `write_to_file`)
22
- [2025-07-19 10:01:13 UTC+7] Assistant: Okay, I have created the file.
23
- ```
24
- * **Action:** Use `write_past_conversation_transcript`.
25
- * **CRITICAL:** You MUST remove all headers (e.g., `# User Message`, `# Context`).
26
- * **CRITICAL:** DO NOT truncate or alter user/assistant respond for whatever reason.
27
- ---
28
-
29
- ### **3. Narrative Summary**
30
-
31
- **Goal:** Combine the condensed past summary with a new summary of the recent conversation.
32
- * **Logic:** Timestamps MUST become less granular over time.
33
- * **Format & Examples:**
34
- * **For today:** Summarize recent key events by the hour.
35
- `[2025-07-20 14:00 UTC+7] Continued work on the 'Todo' app, fixing unit tests.`
36
- * **For previous days:** Condense the entire day's activity into a single entry.
37
- `[2025-07-19] Started project 'Bluebird' and set up the initial file structure.`
38
- * **For previous months:** Condense the entire month's activity.
39
- `[2025-06] Worked on performance optimizations for the main API.`
40
- * **Action:** Use `write_past_conversation_summary` to save the new, combined summary.
41
- * **CRITICAL:** Condense past conversation summary before combining with the more recent conversation summary.
42
-
1
+ You are a silent memory management AI. Your ONLY output is tool calls.
2
+
3
+ **Primary Directive:** Update the conversation memory based on the `Recent Conversation`.
4
+
5
+ **Actions:**
6
+ 1. **Update Conversation:**
7
+ - Call `write_past_conversation_summary` ONCE. The summary must be a narrative condensing the old summary and recent conversation.
8
+ - Call `write_past_conversation_transcript` ONCE. The transcript MUST contain at most the last 4 (four) conversation turns. The content of these turns must not be altered or truncated, furthermore the timezone has to be included. Use the format: `[YYYY-MM-DD HH:MM:SS UTC+Z] Role: Message/Tool name being called`.
9
+ 2. **Update Factual Notes:**
10
+ - Read existing notes first.
11
+ - Call `write_long_term_note` AT MOST ONCE with new or updated global facts (e.g., user preferences).
12
+ - Call `write_contextual_note` AT MOST ONCE with new or updated project-specific facts.
13
+ - **CRITICAL - Path Specificity:** Project-specific facts are tied to the directory where they were established. You MUST analyze the `Recent Conversation` to determine the correct `context_path` for the facts you are writing. For example, if a user sets a project name while the working directory is `/tmp/a`, the `context_path` for that fact MUST be `/tmp/a`.
14
+ - **CRITICAL - Note Content:** Note content MUST be raw, unformatted text. Do NOT include markdown headers. Notes must be timeless facts about the current state, not a chronological log. Only write if the content has changed.
15
+
16
+ **Final Step:** After all tool calls, you MUST output the word "DONE" on a new line. Do not output anything else.
@@ -18,10 +18,14 @@ You are an expert AI agent fulfilling a single request. You must provide a compl
18
18
  * **CRITICAL:** After each step, you MUST use a tool to verify the outcome (e.g., check command exit codes, read back file contents, list files).
19
19
 
20
20
  4. **Handle Errors (The Debugging Loop):**
21
- * If a tool call fails, you MUST NOT give up. You MUST enter a persistent debugging loop until the error is resolved.
22
- 1. **Analyze:** Scrutinize the complete error message (`stdout` and `stderr`).
23
- 2. **Hypothesize:** State a clear, specific hypothesis about the root cause.
24
- 3. **Act:** Propose and execute a concrete, single next step to fix the issue.
21
+ * If an action fails, you MUST NOT give up. You MUST enter a persistent debugging loop until the error is resolved.
22
+ 1. **Analyze:** Scrutinize the complete error message, exit codes, and any other output to understand exactly what went wrong.
23
+ 2. **Hypothesize:** State a clear, specific hypothesis about the root cause. For example, "The operation failed because the file path was incorrect," "The command failed because a required argument was missing," or "The test failed because the code has a logical error."
24
+ 3. **Strategize and Correct:** Formulate a new action that directly addresses the hypothesis. Do not simply repeat the failed action. Your correction strategy MUST be logical and informed by the analysis. For example:
25
+ * If a path is wrong, take action to discover the correct path.
26
+ * If a command is malformed, correct its syntax or arguments.
27
+ * If an operation failed due to invalid state (e.g., unexpected file content, a logical bug in code), take action to inspect the current state and then formulate a targeted fix.
28
+ 4. **Execute** the corrected action.
25
29
  * **CRITICAL:** You must exhaust all reasonable attempts to fix the issue yourself before reporting failure.
26
30
 
27
31
  5. **Report Final Outcome:**
zrb/config/llm_config.py CHANGED
@@ -22,11 +22,9 @@ class LLMConfig:
22
22
  default_interactive_system_prompt: str | None = None,
23
23
  default_special_instruction_prompt: str | None = None,
24
24
  default_summarization_prompt: str | None = None,
25
- default_context_enrichment_prompt: str | None = None,
26
25
  default_summarize_history: bool | None = None,
27
26
  default_history_summarization_token_threshold: int | None = None,
28
- default_enrich_context: bool | None = None,
29
- default_context_enrichment_token_threshold: int | None = None,
27
+ default_modes: list[str] | None = None,
30
28
  default_model: "Model | None" = None,
31
29
  default_model_settings: "ModelSettings | None" = None,
32
30
  default_model_provider: "Provider | None" = None,
@@ -40,18 +38,14 @@ class LLMConfig:
40
38
  self._default_interactive_system_prompt = default_interactive_system_prompt
41
39
  self._default_special_instruction_prompt = default_special_instruction_prompt
42
40
  self._default_summarization_prompt = default_summarization_prompt
43
- self._default_context_enrichment_prompt = default_context_enrichment_prompt
44
41
  self._default_summarize_history = default_summarize_history
45
42
  self._default_history_summarization_token_threshold = (
46
43
  default_history_summarization_token_threshold
47
44
  )
48
- self._default_enrich_context = default_enrich_context
49
- self._default_context_enrichment_token_threshold = (
50
- default_context_enrichment_token_threshold
51
- )
45
+ self._default_modes = default_modes
46
+ self._default_model = default_model
52
47
  self._default_model_settings = default_model_settings
53
48
  self._default_model_provider = default_model_provider
54
- self._default_model = default_model
55
49
 
56
50
  def _get_internal_default_prompt(self, name: str) -> str:
57
51
  if name not in self.__internal_default_prompt:
@@ -130,32 +124,18 @@ class LLMConfig:
130
124
  lambda: self._get_internal_default_prompt("persona"),
131
125
  )
132
126
 
127
+ @property
128
+ def default_modes(self) -> list[str]:
129
+ return self._get_property(
130
+ self._default_modes, CFG.LLM_MODES, lambda: ["coding"]
131
+ )
132
+
133
133
  @property
134
134
  def default_special_instruction_prompt(self) -> str:
135
135
  return self._get_property(
136
136
  self._default_special_instruction_prompt,
137
137
  CFG.LLM_SPECIAL_INSTRUCTION_PROMPT,
138
- lambda: self._get_workflow_prompt(CFG.LLM_MODES),
139
- )
140
-
141
- def _get_workflow_prompt(self, modes: list[str]) -> str:
142
- workflows = llm_context_config.get_workflows()
143
- dir_path = os.path.dirname(__file__)
144
- default_workflow_names = ("code", "content", "research")
145
- for workflow_name in default_workflow_names:
146
- if workflow_name in workflows:
147
- continue
148
- workflow_file_path = os.path.join(
149
- dir_path, "default_workflow", f"{workflow_name}.md"
150
- )
151
- with open(workflow_file_path, "r") as f:
152
- workflows[workflow_name] = f.read()
153
- return "\n".join(
154
- [
155
- make_prompt_section(header, content)
156
- for header, content in workflows.items()
157
- if header.lower() in modes
158
- ]
138
+ lambda: "",
159
139
  )
160
140
 
161
141
  @property
@@ -206,6 +186,19 @@ class LLMConfig:
206
186
  def set_default_special_instruction_prompt(self, special_instruction_prompt: str):
207
187
  self._default_special_instruction_prompt = special_instruction_prompt
208
188
 
189
+ def set_default_modes(self, modes: list[str]):
190
+ self._default_modes = modes
191
+
192
+ def add_default_mode(self, mode: str):
193
+ if self._default_modes is None:
194
+ self._default_modes = []
195
+ self._default_modes.append(mode)
196
+
197
+ def remove_default_mode(self, mode: str):
198
+ if self._default_modes is None:
199
+ self._default_modes = []
200
+ self._default_modes.remove(mode)
201
+
209
202
  def set_default_summarization_prompt(self, summarization_prompt: str):
210
203
  self._default_summarization_prompt = summarization_prompt
211
204
 
@@ -1,74 +1,129 @@
1
1
  import os
2
2
 
3
3
  from zrb.config.config import CFG
4
- from zrb.config.llm_context.config_handler import LLMContextConfigHandler
5
-
6
-
7
- def cascading_path_filter(section_path: str, base_path: str) -> bool:
8
- """
9
- Returns True if the section path is an ancestor of, the same as the base path,
10
- or if the section path is an absolute path.
11
- """
12
- return os.path.isabs(section_path) or base_path.startswith(section_path)
4
+ from zrb.config.llm_context.config_parser import markdown_to_dict
5
+ from zrb.util.llm.prompt import demote_markdown_headers
13
6
 
14
7
 
15
8
  class LLMContextConfig:
16
9
  """High-level API for interacting with cascaded configurations."""
17
10
 
18
- @property
19
- def _context_handler(self):
20
- return LLMContextConfigHandler(
21
- "Context",
22
- config_file_name=CFG.LLM_CONTEXT_FILE,
23
- filter_section_func=cascading_path_filter,
24
- resolve_section_path=True,
25
- )
26
-
27
- @property
28
- def _workflow_handler(self):
29
- return LLMContextConfigHandler(
30
- "Workflow",
31
- config_file_name=CFG.LLM_CONTEXT_FILE,
32
- resolve_section_path=False,
33
- )
11
+ def _find_config_files(self, cwd: str) -> list[str]:
12
+ configs = []
13
+ current_dir = cwd
14
+ home_dir = os.path.expanduser("~")
15
+ while True:
16
+ config_path = os.path.join(current_dir, CFG.LLM_CONTEXT_FILE)
17
+ if os.path.exists(config_path):
18
+ configs.append(config_path)
19
+ if current_dir == home_dir or current_dir == "/":
20
+ break
21
+ current_dir = os.path.dirname(current_dir)
22
+ return configs
23
+
24
+ def _parse_config(self, file_path: str) -> dict[str, str]:
25
+ with open(file_path, "r") as f:
26
+ content = f.read()
27
+ return markdown_to_dict(content)
28
+
29
+ def _get_all_sections(self, cwd: str) -> list[tuple[str, dict[str, str]]]:
30
+ config_files = self._find_config_files(cwd)
31
+ all_sections = []
32
+ for config_file in config_files:
33
+ config_dir = os.path.dirname(config_file)
34
+ sections = self._parse_config(config_file)
35
+ all_sections.append((config_dir, sections))
36
+ return all_sections
34
37
 
35
38
  def get_contexts(self, cwd: str | None = None) -> dict[str, str]:
36
39
  """Gathers all relevant contexts for a given path."""
37
40
  if cwd is None:
38
41
  cwd = os.getcwd()
39
- return self._context_handler.get_section(cwd)
42
+ all_sections = self._get_all_sections(cwd)
43
+ contexts: dict[str, str] = {}
44
+ for config_dir, sections in reversed(all_sections):
45
+ for key, value in sections.items():
46
+ if key.startswith("Context:"):
47
+ context_path = key.replace("Context:", "").strip()
48
+ if context_path == ".":
49
+ context_path = config_dir
50
+ elif not os.path.isabs(context_path):
51
+ context_path = os.path.abspath(
52
+ os.path.join(config_dir, context_path)
53
+ )
54
+ if os.path.isabs(context_path) or cwd.startswith(context_path):
55
+ contexts[context_path] = value
56
+ return contexts
40
57
 
41
58
  def get_workflows(self, cwd: str | None = None) -> dict[str, str]:
42
59
  """Gathers all relevant workflows for a given path."""
43
60
  if cwd is None:
44
61
  cwd = os.getcwd()
45
- return self._workflow_handler.get_section(cwd)
62
+ all_sections = self._get_all_sections(cwd)
63
+ workflows: dict[str, str] = {}
64
+ for _, sections in reversed(all_sections):
65
+ for key, value in sections.items():
66
+ if key.startswith("Workflow:"):
67
+ workflow_name = key.replace("Workflow:", "").strip()
68
+ if workflow_name not in workflows:
69
+ workflows[workflow_name] = value
70
+ return workflows
46
71
 
47
- def add_to_context(
72
+ def write_context(
48
73
  self, content: str, context_path: str | None = None, cwd: str | None = None
49
74
  ):
50
- """Adds content to a context block in the nearest configuration file."""
75
+ """Writes content to a context block in the nearest configuration file."""
51
76
  if cwd is None:
52
77
  cwd = os.getcwd()
53
78
  if context_path is None:
54
79
  context_path = cwd
55
- abs_path = os.path.abspath(context_path)
56
- home_dir = os.path.expanduser("~")
57
- search_dir = cwd
58
- if not abs_path.startswith(home_dir):
59
- search_dir = home_dir
60
- self._context_handler.add_to_section(content, abs_path, cwd=search_dir)
61
80
 
62
- def remove_from_context(
63
- self, content: str, context_path: str | None = None, cwd: str | None = None
64
- ) -> bool:
65
- """Removes content from a context block in all relevant config files."""
66
- if cwd is None:
67
- cwd = os.getcwd()
68
- if context_path is None:
69
- context_path = cwd
70
- abs_path = os.path.abspath(context_path)
71
- return self._context_handler.remove_from_section(content, abs_path, cwd=cwd)
81
+ config_files = self._find_config_files(cwd)
82
+ if config_files:
83
+ config_file = config_files[0] # Closest config file
84
+ else:
85
+ config_file = os.path.join(cwd, CFG.LLM_CONTEXT_FILE)
86
+
87
+ sections = {}
88
+ if os.path.exists(config_file):
89
+ sections = self._parse_config(config_file)
90
+
91
+ # Determine the section key
92
+ section_key_path = context_path
93
+ if not os.path.isabs(context_path):
94
+ config_dir = os.path.dirname(config_file)
95
+ section_key_path = os.path.abspath(os.path.join(config_dir, context_path))
96
+
97
+ # Find existing key
98
+ found_key = ""
99
+ for key in sections.keys():
100
+ if not key.startswith("Context:"):
101
+ continue
102
+ key_path = key.replace("Context:", "").strip()
103
+ if key_path == ".":
104
+ key_path = os.path.dirname(config_file)
105
+ elif not os.path.isabs(key_path):
106
+ key_path = os.path.abspath(
107
+ os.path.join(os.path.dirname(config_file), key_path)
108
+ )
109
+ if key_path == section_key_path:
110
+ found_key = key
111
+ break
112
+
113
+ if found_key != "":
114
+ sections[found_key] = content
115
+ else:
116
+ # Add new entry
117
+ new_key = f"Context: {context_path}"
118
+ sections[new_key] = content
119
+
120
+ # Serialize back to markdown
121
+ new_file_content = ""
122
+ for key, value in sections.items():
123
+ new_file_content += f"# {key}\n{demote_markdown_headers(value)}\n\n"
124
+
125
+ with open(config_file, "w") as f:
126
+ f.write(new_file_content)
72
127
 
73
128
 
74
129
  llm_context_config = LLMContextConfig()
@@ -0,0 +1,46 @@
1
+ import re
2
+
3
+ from zrb.util.llm.prompt import promote_markdown_headers
4
+
5
+
6
+ def markdown_to_dict(markdown: str) -> dict[str, str]:
7
+ sections: dict[str, str] = {}
8
+ current_title = ""
9
+ current_content: list[str] = []
10
+ fence_stack: list[str] = []
11
+
12
+ fence_pattern = re.compile(r"^([`~]{3,})(.*)$")
13
+ h1_pattern = re.compile(r"^# (.+)$")
14
+
15
+ for line in markdown.splitlines():
16
+ # Detect code fence open/close
17
+ fence_match = fence_pattern.match(line.strip())
18
+
19
+ if fence_match:
20
+ fence = fence_match.group(1)
21
+ if fence_stack and fence_stack[-1] == fence:
22
+ fence_stack.pop() # close current fence
23
+ else:
24
+ fence_stack.append(fence) # open new fence
25
+
26
+ # Only parse H1 when not inside a code fence
27
+ if not fence_stack:
28
+ h1_match = h1_pattern.match(line)
29
+ if h1_match:
30
+ # Save previous section
31
+ if current_title:
32
+ sections[current_title] = "\n".join(current_content).strip()
33
+ # Start new section
34
+ current_title = h1_match.group(1).strip()
35
+ current_content = []
36
+ continue
37
+
38
+ current_content.append(line)
39
+
40
+ # Save final section
41
+ if current_title:
42
+ sections[current_title] = "\n".join(current_content).strip()
43
+ return {
44
+ header: promote_markdown_headers(content)
45
+ for header, content in sections.items()
46
+ }
@@ -50,7 +50,10 @@ class SharedContext(AnySharedContext):
50
50
 
51
51
  @property
52
52
  def is_tty(self) -> bool:
53
- return sys.stdin.isatty()
53
+ try:
54
+ return sys.stdin.isatty()
55
+ except Exception:
56
+ return False
54
57
 
55
58
  @property
56
59
  def input(self) -> DotDict: