zrb 1.12.0__py3-none-any.whl → 1.13.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -9,10 +9,10 @@ from zrb.task.llm.config import get_model, get_model_settings
9
9
  from zrb.task.llm.prompt import get_system_and_user_prompt
10
10
 
11
11
  if TYPE_CHECKING:
12
- from pydantic_ai import Tool
13
- from pydantic_ai.mcp import MCPServer
12
+ from pydantic_ai import Agent, Tool
14
13
  from pydantic_ai.models import Model
15
14
  from pydantic_ai.settings import ModelSettings
15
+ from pydantic_ai.toolsets import AbstractToolset
16
16
 
17
17
  ToolOrCallable = Tool | Callable
18
18
  else:
@@ -26,7 +26,7 @@ def create_sub_agent_tool(
26
26
  model: "str | Model | None" = None,
27
27
  model_settings: "ModelSettings | None" = None,
28
28
  tools: list[ToolOrCallable] = [],
29
- mcp_servers: list["MCPServer"] = [],
29
+ toolsets: list["AbstractToolset[Agent]"] = [],
30
30
  ) -> Callable[[AnyContext, str], Coroutine[Any, Any, str]]:
31
31
  """
32
32
  Creates a "tool that is another AI agent," capable of handling complex, multi-step sub-tasks.
@@ -42,7 +42,7 @@ def create_sub_agent_tool(
42
42
  model (str | Model, optional): The language model the sub-agent will use.
43
43
  model_settings (ModelSettings, optional): Specific settings for the sub-agent's model.
44
44
  tools (list, optional): A list of tools that will be exclusively available to the sub-agent.
45
- mcp_servers (list, optional): A list of MCP servers for the sub-agent.
45
+ toolsets (list, optional): A list of Toolset for the sub-agent.
46
46
 
47
47
  Returns:
48
48
  Callable: An asynchronous function that serves as the sub-agent tool. When called, it runs the sub-agent with a given query and returns its final result.
@@ -85,7 +85,7 @@ def create_sub_agent_tool(
85
85
  system_prompt=resolved_system_prompt,
86
86
  model_settings=resolved_model_settings,
87
87
  tools=tools,
88
- mcp_servers=mcp_servers,
88
+ toolsets=toolsets,
89
89
  )
90
90
 
91
91
  sub_agent_run = None
@@ -21,10 +21,14 @@ You are an expert AI agent in a CLI. You MUST follow this workflow for this inte
21
21
  * **CRITICAL:** Immediately after execution, you MUST use a tool to verify the outcome (e.g., after `write_file`, use `read_file`; after `rm`, use `ls` to confirm absence).
22
22
 
23
23
  4. **Handle Errors (The Debugging Loop):**
24
- * If a tool call fails, you MUST NOT give up. You MUST enter a persistent debugging loop until the error is resolved.
25
- 1. **Analyze:** Scrutinize the complete error message (`stdout` and `stderr`).
26
- 2. **Hypothesize:** State a clear, specific hypothesis about the root cause.
27
- 3. **Act:** Propose and execute a concrete, single next step to fix the issue.
24
+ * If an action fails, you MUST NOT give up. You MUST enter a persistent debugging loop until the error is resolved.
25
+ 1. **Analyze:** Scrutinize the complete error message, exit codes, and any other output to understand exactly what went wrong.
26
+ 2. **Hypothesize:** State a clear, specific hypothesis about the root cause. For example, "The operation failed because the file path was incorrect," "The command failed because a required argument was missing," or "The test failed because the code has a logical error."
27
+ 3. **Strategize and Correct:** Formulate a new action that directly addresses the hypothesis. Do not simply repeat the failed action. Your correction strategy MUST be logical and informed by the analysis. For example:
28
+ * If a path is wrong, take action to discover the correct path.
29
+ * If a command is malformed, correct its syntax or arguments.
30
+ * If an operation failed due to invalid state (e.g., unexpected file content, a logical bug in code), take action to inspect the current state and then formulate a targeted fix.
31
+ 4. **Execute** the corrected action.
28
32
  * **CRITICAL:** Do not ask the user for help or report the failure until you have exhausted all reasonable attempts to fix it yourself. If the user provides a vague follow-up like "try again," you MUST use the context of the previous failure to inform your next action, not just repeat the failed command.
29
33
 
30
34
  5. **Report Results:**
@@ -1,42 +1,16 @@
1
- You are a silent AI tool. Your ONLY job is to call tools to update the conversation memory based on the `Recent Conversation (JSON)`. Your response MUST be only tool calls.
2
-
3
- ---
4
-
5
- ### **1. Factual Notes**
6
-
7
- **Goal:** Extract permanent facts. Do NOT log activities.
8
- * **Good Fact:** `User prefers Python.`
9
- * **Bad Activity:** `User ran tests.`
10
- * **Action:** Use `add_long_term_info` for global facts and `add_contextual_info` for project facts. **Only add *new* facts from the `Recent Conversation` that are not already present in the `Factual Notes`.**
11
-
12
- ---
13
-
14
- ### **2. Transcript**
15
-
16
- **Goal:** Create a verbatim log of the last ~4 turns.
17
- * **Format:** `[YYYY-MM-DD HH:MM:SS UTC+Z] Role: Message` or `[YYYY-MM-DD UTC+Z] Role: (calling ToolName)`
18
- * **Example:**
19
- ```
20
- [2025-07-19 10:00:01 UTC+7] User: Please create a file named todo.py.
21
- [2025-07-19 10:00:15 UTC+7] Assistant: (calling `write_to_file`)
22
- [2025-07-19 10:01:13 UTC+7] Assistant: Okay, I have created the file.
23
- ```
24
- * **Action:** Use `write_past_conversation_transcript`.
25
- * **CRITICAL:** You MUST remove all headers (e.g., `# User Message`, `# Context`).
26
- * **CRITICAL:** DO NOT truncate or alter user/assistant respond for whatever reason.
27
- ---
28
-
29
- ### **3. Narrative Summary**
30
-
31
- **Goal:** Combine the condensed past summary with a new summary of the recent conversation.
32
- * **Logic:** Timestamps MUST become less granular over time.
33
- * **Format & Examples:**
34
- * **For today:** Summarize recent key events by the hour.
35
- `[2025-07-20 14:00 UTC+7] Continued work on the 'Todo' app, fixing unit tests.`
36
- * **For previous days:** Condense the entire day's activity into a single entry.
37
- `[2025-07-19] Started project 'Bluebird' and set up the initial file structure.`
38
- * **For previous months:** Condense the entire month's activity.
39
- `[2025-06] Worked on performance optimizations for the main API.`
40
- * **Action:** Use `write_past_conversation_summary` to save the new, combined summary.
41
- * **CRITICAL:** Condense past conversation summary before combining with the more recent conversation summary.
42
-
1
+ You are a silent memory management AI. Your ONLY output is tool calls.
2
+
3
+ **Primary Directive:** Update the conversation memory based on the `Recent Conversation`.
4
+
5
+ **Actions:**
6
+ 1. **Update Conversation:**
7
+ - Call `write_past_conversation_summary` ONCE. The summary must be a narrative condensing the old summary and recent conversation.
8
+ - Call `write_past_conversation_transcript` ONCE. The transcript MUST contain at most the last 4 (four) conversation turns. The content of these turns must not be altered or truncated, furthermore the timezone has to be included. Use the format: `[YYYY-MM-DD HH:MM:SS UTC+Z] Role: Message/Tool name being called`.
9
+ 2. **Update Factual Notes:**
10
+ - Read existing notes first.
11
+ - Call `write_long_term_note` AT MOST ONCE with new or updated global facts (e.g., user preferences).
12
+ - Call `write_contextual_note` AT MOST ONCE with new or updated project-specific facts.
13
+ - **CRITICAL - Path Specificity:** Project-specific facts are tied to the directory where they were established. You MUST analyze the `Recent Conversation` to determine the correct `context_path` for the facts you are writing. For example, if a user sets a project name while the working directory is `/tmp/a`, the `context_path` for that fact MUST be `/tmp/a`.
14
+ - **CRITICAL - Note Content:** Note content MUST be raw, unformatted text. Do NOT include markdown headers. Notes must be timeless facts about the current state, not a chronological log. Only write if the content has changed.
15
+
16
+ **Final Step:** After all tool calls, you MUST output the word "DONE" on a new line. Do not output anything else.
@@ -18,10 +18,14 @@ You are an expert AI agent fulfilling a single request. You must provide a compl
18
18
  * **CRITICAL:** After each step, you MUST use a tool to verify the outcome (e.g., check command exit codes, read back file contents, list files).
19
19
 
20
20
  4. **Handle Errors (The Debugging Loop):**
21
- * If a tool call fails, you MUST NOT give up. You MUST enter a persistent debugging loop until the error is resolved.
22
- 1. **Analyze:** Scrutinize the complete error message (`stdout` and `stderr`).
23
- 2. **Hypothesize:** State a clear, specific hypothesis about the root cause.
24
- 3. **Act:** Propose and execute a concrete, single next step to fix the issue.
21
+ * If an action fails, you MUST NOT give up. You MUST enter a persistent debugging loop until the error is resolved.
22
+ 1. **Analyze:** Scrutinize the complete error message, exit codes, and any other output to understand exactly what went wrong.
23
+ 2. **Hypothesize:** State a clear, specific hypothesis about the root cause. For example, "The operation failed because the file path was incorrect," "The command failed because a required argument was missing," or "The test failed because the code has a logical error."
24
+ 3. **Strategize and Correct:** Formulate a new action that directly addresses the hypothesis. Do not simply repeat the failed action. Your correction strategy MUST be logical and informed by the analysis. For example:
25
+ * If a path is wrong, take action to discover the correct path.
26
+ * If a command is malformed, correct its syntax or arguments.
27
+ * If an operation failed due to invalid state (e.g., unexpected file content, a logical bug in code), take action to inspect the current state and then formulate a targeted fix.
28
+ 4. **Execute** the corrected action.
25
29
  * **CRITICAL:** You must exhaust all reasonable attempts to fix the issue yourself before reporting failure.
26
30
 
27
31
  5. **Report Final Outcome:**
@@ -1,74 +1,129 @@
1
1
  import os
2
2
 
3
3
  from zrb.config.config import CFG
4
- from zrb.config.llm_context.config_handler import LLMContextConfigHandler
5
-
6
-
7
- def cascading_path_filter(section_path: str, base_path: str) -> bool:
8
- """
9
- Returns True if the section path is an ancestor of, the same as the base path,
10
- or if the section path is an absolute path.
11
- """
12
- return os.path.isabs(section_path) or base_path.startswith(section_path)
4
+ from zrb.config.llm_context.config_parser import markdown_to_dict
5
+ from zrb.util.llm.prompt import demote_markdown_headers
13
6
 
14
7
 
15
8
  class LLMContextConfig:
16
9
  """High-level API for interacting with cascaded configurations."""
17
10
 
18
- @property
19
- def _context_handler(self):
20
- return LLMContextConfigHandler(
21
- "Context",
22
- config_file_name=CFG.LLM_CONTEXT_FILE,
23
- filter_section_func=cascading_path_filter,
24
- resolve_section_path=True,
25
- )
26
-
27
- @property
28
- def _workflow_handler(self):
29
- return LLMContextConfigHandler(
30
- "Workflow",
31
- config_file_name=CFG.LLM_CONTEXT_FILE,
32
- resolve_section_path=False,
33
- )
11
+ def _find_config_files(self, cwd: str) -> list[str]:
12
+ configs = []
13
+ current_dir = cwd
14
+ home_dir = os.path.expanduser("~")
15
+ while True:
16
+ config_path = os.path.join(current_dir, CFG.LLM_CONTEXT_FILE)
17
+ if os.path.exists(config_path):
18
+ configs.append(config_path)
19
+ if current_dir == home_dir or current_dir == "/":
20
+ break
21
+ current_dir = os.path.dirname(current_dir)
22
+ return configs
23
+
24
+ def _parse_config(self, file_path: str) -> dict[str, str]:
25
+ with open(file_path, "r") as f:
26
+ content = f.read()
27
+ return markdown_to_dict(content)
28
+
29
+ def _get_all_sections(self, cwd: str) -> list[tuple[str, dict[str, str]]]:
30
+ config_files = self._find_config_files(cwd)
31
+ all_sections = []
32
+ for config_file in config_files:
33
+ config_dir = os.path.dirname(config_file)
34
+ sections = self._parse_config(config_file)
35
+ all_sections.append((config_dir, sections))
36
+ return all_sections
34
37
 
35
38
  def get_contexts(self, cwd: str | None = None) -> dict[str, str]:
36
39
  """Gathers all relevant contexts for a given path."""
37
40
  if cwd is None:
38
41
  cwd = os.getcwd()
39
- return self._context_handler.get_section(cwd)
42
+ all_sections = self._get_all_sections(cwd)
43
+ contexts: dict[str, str] = {}
44
+ for config_dir, sections in reversed(all_sections):
45
+ for key, value in sections.items():
46
+ if key.startswith("Context:"):
47
+ context_path = key.replace("Context:", "").strip()
48
+ if context_path == ".":
49
+ context_path = config_dir
50
+ elif not os.path.isabs(context_path):
51
+ context_path = os.path.abspath(
52
+ os.path.join(config_dir, context_path)
53
+ )
54
+ if os.path.isabs(context_path) or cwd.startswith(context_path):
55
+ contexts[context_path] = value
56
+ return contexts
40
57
 
41
58
  def get_workflows(self, cwd: str | None = None) -> dict[str, str]:
42
59
  """Gathers all relevant workflows for a given path."""
43
60
  if cwd is None:
44
61
  cwd = os.getcwd()
45
- return self._workflow_handler.get_section(cwd)
62
+ all_sections = self._get_all_sections(cwd)
63
+ workflows: dict[str, str] = {}
64
+ for _, sections in reversed(all_sections):
65
+ for key, value in sections.items():
66
+ if key.startswith("Workflow:"):
67
+ workflow_name = key.replace("Workflow:", "").strip()
68
+ if workflow_name not in workflows:
69
+ workflows[workflow_name] = value
70
+ return workflows
46
71
 
47
- def add_to_context(
72
+ def write_context(
48
73
  self, content: str, context_path: str | None = None, cwd: str | None = None
49
74
  ):
50
- """Adds content to a context block in the nearest configuration file."""
75
+ """Writes content to a context block in the nearest configuration file."""
51
76
  if cwd is None:
52
77
  cwd = os.getcwd()
53
78
  if context_path is None:
54
79
  context_path = cwd
55
- abs_path = os.path.abspath(context_path)
56
- home_dir = os.path.expanduser("~")
57
- search_dir = cwd
58
- if not abs_path.startswith(home_dir):
59
- search_dir = home_dir
60
- self._context_handler.add_to_section(content, abs_path, cwd=search_dir)
61
80
 
62
- def remove_from_context(
63
- self, content: str, context_path: str | None = None, cwd: str | None = None
64
- ) -> bool:
65
- """Removes content from a context block in all relevant config files."""
66
- if cwd is None:
67
- cwd = os.getcwd()
68
- if context_path is None:
69
- context_path = cwd
70
- abs_path = os.path.abspath(context_path)
71
- return self._context_handler.remove_from_section(content, abs_path, cwd=cwd)
81
+ config_files = self._find_config_files(cwd)
82
+ if config_files:
83
+ config_file = config_files[0] # Closest config file
84
+ else:
85
+ config_file = os.path.join(cwd, CFG.LLM_CONTEXT_FILE)
86
+
87
+ sections = {}
88
+ if os.path.exists(config_file):
89
+ sections = self._parse_config(config_file)
90
+
91
+ # Determine the section key
92
+ section_key_path = context_path
93
+ if not os.path.isabs(context_path):
94
+ config_dir = os.path.dirname(config_file)
95
+ section_key_path = os.path.abspath(os.path.join(config_dir, context_path))
96
+
97
+ # Find existing key
98
+ found_key = ""
99
+ for key in sections.keys():
100
+ if not key.startswith("Context:"):
101
+ continue
102
+ key_path = key.replace("Context:", "").strip()
103
+ if key_path == ".":
104
+ key_path = os.path.dirname(config_file)
105
+ elif not os.path.isabs(key_path):
106
+ key_path = os.path.abspath(
107
+ os.path.join(os.path.dirname(config_file), key_path)
108
+ )
109
+ if key_path == section_key_path:
110
+ found_key = key
111
+ break
112
+
113
+ if found_key != "":
114
+ sections[found_key] = content
115
+ else:
116
+ # Add new entry
117
+ new_key = f"Context: {context_path}"
118
+ sections[new_key] = content
119
+
120
+ # Serialize back to markdown
121
+ new_file_content = ""
122
+ for key, value in sections.items():
123
+ new_file_content += f"# {key}\n{demote_markdown_headers(value)}\n\n"
124
+
125
+ with open(config_file, "w") as f:
126
+ f.write(new_file_content)
72
127
 
73
128
 
74
129
  llm_context_config = LLMContextConfig()
@@ -0,0 +1,46 @@
1
+ import re
2
+
3
+ from zrb.util.llm.prompt import promote_markdown_headers
4
+
5
+
6
+ def markdown_to_dict(markdown: str) -> dict[str, str]:
7
+ sections: dict[str, str] = {}
8
+ current_title = ""
9
+ current_content: list[str] = []
10
+ fence_stack: list[str] = []
11
+
12
+ fence_pattern = re.compile(r"^([`~]{3,})(.*)$")
13
+ h1_pattern = re.compile(r"^# (.+)$")
14
+
15
+ for line in markdown.splitlines():
16
+ # Detect code fence open/close
17
+ fence_match = fence_pattern.match(line.strip())
18
+
19
+ if fence_match:
20
+ fence = fence_match.group(1)
21
+ if fence_stack and fence_stack[-1] == fence:
22
+ fence_stack.pop() # close current fence
23
+ else:
24
+ fence_stack.append(fence) # open new fence
25
+
26
+ # Only parse H1 when not inside a code fence
27
+ if not fence_stack:
28
+ h1_match = h1_pattern.match(line)
29
+ if h1_match:
30
+ # Save previous section
31
+ if current_title:
32
+ sections[current_title] = "\n".join(current_content).strip()
33
+ # Start new section
34
+ current_title = h1_match.group(1).strip()
35
+ current_content = []
36
+ continue
37
+
38
+ current_content.append(line)
39
+
40
+ # Save final section
41
+ if current_title:
42
+ sections[current_title] = "\n".join(current_content).strip()
43
+ return {
44
+ header: promote_markdown_headers(content)
45
+ for header, content in sections.items()
46
+ }
@@ -50,7 +50,10 @@ class SharedContext(AnySharedContext):
50
50
 
51
51
  @property
52
52
  def is_tty(self) -> bool:
53
- return sys.stdin.isatty()
53
+ try:
54
+ return sys.stdin.isatty()
55
+ except Exception:
56
+ return False
54
57
 
55
58
  @property
56
59
  def input(self) -> DotDict:
zrb/task/llm/agent.py CHANGED
@@ -7,15 +7,15 @@ from zrb.context.any_context import AnyContext
7
7
  from zrb.context.any_shared_context import AnySharedContext
8
8
  from zrb.task.llm.error import extract_api_error_details
9
9
  from zrb.task.llm.print_node import print_node
10
- from zrb.task.llm.tool_wrapper import wrap_tool
10
+ from zrb.task.llm.tool_wrapper import wrap_func, wrap_tool
11
11
  from zrb.task.llm.typing import ListOfDict
12
12
 
13
13
  if TYPE_CHECKING:
14
14
  from pydantic_ai import Agent, Tool
15
15
  from pydantic_ai.agent import AgentRun
16
- from pydantic_ai.mcp import MCPServer
17
16
  from pydantic_ai.models import Model
18
17
  from pydantic_ai.settings import ModelSettings
18
+ from pydantic_ai.toolsets import AbstractToolset
19
19
 
20
20
  ToolOrCallable = Tool | Callable
21
21
  else:
@@ -28,26 +28,43 @@ def create_agent_instance(
28
28
  system_prompt: str = "",
29
29
  model_settings: "ModelSettings | None" = None,
30
30
  tools: list[ToolOrCallable] = [],
31
- mcp_servers: list["MCPServer"] = [],
31
+ toolsets: list["AbstractToolset[Agent]"] = [],
32
32
  retries: int = 3,
33
33
  ) -> "Agent":
34
34
  """Creates a new Agent instance with configured tools and servers."""
35
35
  from pydantic_ai import Agent, Tool
36
+ from pydantic_ai.tools import GenerateToolJsonSchema
36
37
 
37
38
  # Normalize tools
38
39
  tool_list = []
39
40
  for tool_or_callable in tools:
40
41
  if isinstance(tool_or_callable, Tool):
41
42
  tool_list.append(tool_or_callable)
43
+ # Update tool's function
44
+ tool = tool_or_callable
45
+ tool_list.append(
46
+ Tool(
47
+ function=wrap_func(tool.function),
48
+ takes_ctx=tool.takes_ctx,
49
+ max_retries=tool.max_retries,
50
+ name=tool.name,
51
+ description=tool.description,
52
+ prepare=tool.prepare,
53
+ docstring_format=tool.docstring_format,
54
+ require_parameter_descriptions=tool.require_parameter_descriptions,
55
+ schema_generator=GenerateToolJsonSchema,
56
+ strict=tool.strict,
57
+ )
58
+ )
42
59
  else:
43
- # Pass ctx to wrap_tool
60
+ # Turn function into tool
44
61
  tool_list.append(wrap_tool(tool_or_callable, ctx))
45
62
  # Return Agent
46
63
  return Agent(
47
64
  model=model,
48
65
  system_prompt=system_prompt,
49
66
  tools=tool_list,
50
- toolsets=mcp_servers,
67
+ toolsets=toolsets,
51
68
  model_settings=model_settings,
52
69
  retries=retries,
53
70
  )
@@ -63,8 +80,8 @@ def get_agent(
63
80
  list[ToolOrCallable] | Callable[[AnySharedContext], list[ToolOrCallable]]
64
81
  ),
65
82
  additional_tools: list[ToolOrCallable],
66
- mcp_servers_attr: "list[MCPServer] | Callable[[AnySharedContext], list[MCPServer]]",
67
- additional_mcp_servers: "list[MCPServer]",
83
+ toolsets_attr: "list[AbstractToolset[Agent]] | Callable[[AnySharedContext], list[AbstractToolset[Agent]]]", # noqa
84
+ additional_toolsets: "list[AbstractToolset[Agent]]",
68
85
  retries: int = 3,
69
86
  ) -> "Agent":
70
87
  """Retrieves the configured Agent instance or creates one if necessary."""
@@ -85,18 +102,16 @@ def get_agent(
85
102
  # Get tools for agent
86
103
  tools = list(tools_attr(ctx) if callable(tools_attr) else tools_attr)
87
104
  tools.extend(additional_tools)
88
- # Get MCP Servers for agent
89
- mcp_servers = list(
90
- mcp_servers_attr(ctx) if callable(mcp_servers_attr) else mcp_servers_attr
91
- )
92
- mcp_servers.extend(additional_mcp_servers)
105
+ # Get Toolsets for agent
106
+ tool_sets = list(toolsets_attr(ctx) if callable(toolsets_attr) else toolsets_attr)
107
+ tool_sets.extend(additional_toolsets)
93
108
  # If no agent provided, create one using the configuration
94
109
  return create_agent_instance(
95
110
  ctx=ctx,
96
111
  model=model,
97
112
  system_prompt=system_prompt,
98
113
  tools=tools,
99
- mcp_servers=mcp_servers,
114
+ toolsets=tool_sets,
100
115
  model_settings=model_settings,
101
116
  retries=retries,
102
117
  )
@@ -176,46 +176,23 @@ class ConversationHistory:
176
176
  """
177
177
  return json.dumps({"content": self._fetch_long_term_note()})
178
178
 
179
- def add_long_term_info(self, new_info: str) -> str:
179
+ def write_long_term_note(self, content: str) -> str:
180
180
  """
181
- Add new info for long-term reference.
181
+ Write the entire content of the long-term references.
182
+ This will overwrite any existing long-term notes.
182
183
 
183
184
  Args:
184
- new_info (str): New info to be added into long-term references.
185
+ content (str): The full content of the long-term notes.
185
186
 
186
187
  Returns:
187
- str: JSON with new content of the notes.
188
-
189
- Raises:
190
- Exception: If the note cannot be read.
191
- """
192
- llm_context_config.add_to_context(new_info, cwd="/")
193
- return json.dumps({"success": True, "content": self._fetch_long_term_note()})
194
-
195
- def remove_long_term_info(self, irrelevant_info: str) -> str:
188
+ str: JSON indicating success.
196
189
  """
197
- Remove irrelevant info from long-term reference.
198
-
199
- Args:
200
- irrelevant_info (str): Irrelevant info to be removed from long-term references.
201
-
202
- Returns:
203
- str: JSON with new content of the notes and deletion status.
204
-
205
- Raises:
206
- Exception: If the note cannot be read.
207
- """
208
- was_removed = llm_context_config.remove_from_context(irrelevant_info, cwd="/")
209
- return json.dumps(
210
- {
211
- "success": was_removed,
212
- "content": self._fetch_long_term_note(),
213
- }
214
- )
190
+ llm_context_config.write_context(content, context_path="/")
191
+ return json.dumps({"success": True})
215
192
 
216
193
  def read_contextual_note(self) -> str:
217
194
  """
218
- Read the content of the contextual references.
195
+ Read the content of the contextual references for the current project.
219
196
 
220
197
  This tool helps you retrieve knowledge or notes stored for contextual reference.
221
198
  If the note does not exist, you may want to create it using the write tool.
@@ -228,52 +205,25 @@ class ConversationHistory:
228
205
  """
229
206
  return json.dumps({"content": self._fetch_contextual_note()})
230
207
 
231
- def add_contextual_info(self, new_info: str, context_path: str | None) -> str:
232
- """
233
- Add new info for contextual reference.
234
-
235
- Args:
236
- new_info (str): New info to be added into contextual references.
237
- context_path (str, optional): contextual directory path for new info
238
-
239
- Returns:
240
- str: JSON with new content of the notes.
241
-
242
- Raises:
243
- Exception: If the note cannot be read.
244
- """
245
- if context_path is None:
246
- context_path = self.project_path
247
- llm_context_config.add_to_context(new_info, context_path=context_path)
248
- return json.dumps({"success": True, "content": self._fetch_contextual_note()})
249
-
250
- def remove_contextual_info(
251
- self, irrelevant_info: str, context_path: str | None
208
+ def write_contextual_note(
209
+ self, content: str, context_path: str | None = None
252
210
  ) -> str:
253
211
  """
254
- Remove irrelevant info from contextual reference.
212
+ Write the entire content of the contextual references for a specific path.
213
+ This will overwrite any existing contextual notes for that path.
255
214
 
256
215
  Args:
257
- irrelevant_info (str): Irrelevant info to be removed from contextual references.
258
- context_path (str, optional): contextual directory path of the irrelevant info
216
+ content (str): The full content of the contextual notes.
217
+ context_path (str, optional): The directory path for the context.
218
+ Defaults to the current project path.
259
219
 
260
220
  Returns:
261
- str: JSON with new content of the notes and deletion status.
262
-
263
- Raises:
264
- Exception: If the note cannot be read.
221
+ str: JSON indicating success.
265
222
  """
266
223
  if context_path is None:
267
224
  context_path = self.project_path
268
- was_removed = llm_context_config.remove_from_context(
269
- irrelevant_info, context_path=context_path
270
- )
271
- return json.dumps(
272
- {
273
- "success": was_removed,
274
- "content": self._fetch_contextual_note(),
275
- }
276
- )
225
+ llm_context_config.write_context(content, context_path=context_path)
226
+ return json.dumps({"success": True})
277
227
 
278
228
  def _fetch_long_term_note(self):
279
229
  contexts = llm_context_config.get_contexts(cwd=self.project_path)
@@ -146,11 +146,9 @@ async def summarize_history(
146
146
  conversation_history.write_past_conversation_summary,
147
147
  conversation_history.write_past_conversation_transcript,
148
148
  conversation_history.read_long_term_note,
149
- conversation_history.add_long_term_info,
150
- conversation_history.remove_long_term_info,
149
+ conversation_history.write_long_term_note,
151
150
  conversation_history.read_contextual_note,
152
- conversation_history.add_contextual_info,
153
- conversation_history.remove_contextual_info,
151
+ conversation_history.write_contextual_note,
154
152
  ],
155
153
  )
156
154
  try:
@@ -14,6 +14,7 @@ async def print_node(print_func: Callable, agent_run: Any, node: Any):
14
14
  PartDeltaEvent,
15
15
  PartStartEvent,
16
16
  TextPartDelta,
17
+ ThinkingPartDelta,
17
18
  ToolCallPartDelta,
18
19
  )
19
20
 
@@ -33,7 +34,9 @@ async def print_node(print_func: Callable, agent_run: Any, node: Any):
33
34
  )
34
35
  is_streaming = False
35
36
  elif isinstance(event, PartDeltaEvent):
36
- if isinstance(event.delta, TextPartDelta):
37
+ if isinstance(event.delta, TextPartDelta) or isinstance(
38
+ event.delta, ThinkingPartDelta
39
+ ):
37
40
  print_func(
38
41
  stylize_faint(f"{event.delta.content_delta}"),
39
42
  end="",
@@ -21,16 +21,19 @@ def wrap_tool(func: Callable, ctx: AnyContext) -> "Tool":
21
21
  from pydantic_ai import RunContext, Tool
22
22
 
23
23
  original_sig = inspect.signature(func)
24
- # Use helper function for clarity
25
24
  needs_run_context_for_pydantic = _has_context_parameter(original_sig, RunContext)
25
+ wrapper = wrap_func(func, ctx)
26
+ return Tool(wrapper, takes_ctx=needs_run_context_for_pydantic)
27
+
28
+
29
+ def wrap_func(func: Callable, ctx: AnyContext) -> Callable:
30
+ original_sig = inspect.signature(func)
26
31
  needs_any_context_for_injection = _has_context_parameter(original_sig, AnyContext)
27
32
  takes_no_args = len(original_sig.parameters) == 0
28
33
  # Pass individual flags to the wrapper creator
29
34
  wrapper = _create_wrapper(func, original_sig, ctx, needs_any_context_for_injection)
30
- # Adjust signature - _adjust_signature determines exclusions based on type
31
35
  _adjust_signature(wrapper, original_sig, takes_no_args)
32
- # takes_ctx in pydantic-ai Tool is specifically for RunContext
33
- return Tool(wrapper, takes_ctx=needs_run_context_for_pydantic)
36
+ return wrapper
34
37
 
35
38
 
36
39
  def _has_context_parameter(original_sig: inspect.Signature, context_type: type) -> bool:
@@ -98,9 +101,12 @@ def _create_wrapper(
98
101
  func_name = get_callable_name(func)
99
102
  ctx.print(f"✅ >> Allow to run tool: {func_name} (Y/n)", plain=True)
100
103
  user_confirmation_str = await _read_line()
101
- user_confirmation = to_boolean(user_confirmation_str)
104
+ try:
105
+ user_confirmation = to_boolean(user_confirmation_str)
106
+ except Exception:
107
+ user_confirmation = False
102
108
  if not user_confirmation:
103
- ctx.print("❌ >> Why?", plain=True)
109
+ ctx.print(f"❌ >> Rejecting {func_name} call. Why?", plain=True)
104
110
  reason = await _read_line()
105
111
  ctx.print("", plain=True)
106
112
  raise ValueError(f"User disapproval: {reason}")
zrb/task/llm_task.py CHANGED
@@ -31,9 +31,9 @@ from zrb.xcom.xcom import Xcom
31
31
 
32
32
  if TYPE_CHECKING:
33
33
  from pydantic_ai import Agent, Tool
34
- from pydantic_ai.mcp import MCPServer
35
34
  from pydantic_ai.models import Model
36
35
  from pydantic_ai.settings import ModelSettings
36
+ from pydantic_ai.toolsets import AbstractToolset
37
37
 
38
38
  ToolOrCallable = Tool | Callable
39
39
  else:
@@ -76,8 +76,8 @@ class LLMTask(BaseTask):
76
76
  list["ToolOrCallable"]
77
77
  | Callable[[AnySharedContext], list["ToolOrCallable"]]
78
78
  ) = [],
79
- mcp_servers: (
80
- list["MCPServer"] | Callable[[AnySharedContext], list["MCPServer"]]
79
+ toolsets: (
80
+ list["AbstractToolset[Agent]"] | Callable[[AnySharedContext], list["Tool"]]
81
81
  ) = [],
82
82
  conversation_history: (
83
83
  ConversationHistory
@@ -162,8 +162,8 @@ class LLMTask(BaseTask):
162
162
  self._tools = tools
163
163
  self._rate_limitter = rate_limitter
164
164
  self._additional_tools: list["ToolOrCallable"] = []
165
- self._mcp_servers = mcp_servers
166
- self._additional_mcp_servers: list["MCPServer"] = []
165
+ self._toolsets = toolsets
166
+ self._additional_toolsets: list["AbstractToolset[Agent]"] = []
167
167
  self._conversation_history = conversation_history
168
168
  self._conversation_history_reader = conversation_history_reader
169
169
  self._conversation_history_writer = conversation_history_writer
@@ -187,12 +187,12 @@ class LLMTask(BaseTask):
187
187
  for single_tool in tool:
188
188
  self._additional_tools.append(single_tool)
189
189
 
190
- def add_mcp_server(self, *mcp_server: "MCPServer"):
191
- self.append_mcp_server(*mcp_server)
190
+ def add_toolset(self, *toolset: "AbstractToolset[Agent]"):
191
+ self.append_toolset(*toolset)
192
192
 
193
- def append_mcp_server(self, *mcp_server: "MCPServer"):
194
- for single_mcp_server in mcp_server:
195
- self._additional_mcp_servers.append(single_mcp_server)
193
+ def append_toolset(self, *toolset: "AbstractToolset[Agent]"):
194
+ for single_toolset in toolset:
195
+ self._additional_toolsets.append(single_toolset)
196
196
 
197
197
  def set_should_summarize_history(self, summarize_history: bool):
198
198
  self._should_summarize_history = summarize_history
@@ -252,8 +252,8 @@ class LLMTask(BaseTask):
252
252
  model_settings=model_settings,
253
253
  tools_attr=self._tools,
254
254
  additional_tools=self._additional_tools,
255
- mcp_servers_attr=self._mcp_servers,
256
- additional_mcp_servers=self._additional_mcp_servers,
255
+ toolsets_attr=self._toolsets,
256
+ additional_toolsets=self._additional_toolsets,
257
257
  )
258
258
  # 4. Run the agent iteration and save the results/history
259
259
  result = await self._execute_agent(
zrb/util/llm/prompt.py CHANGED
@@ -1,7 +1,7 @@
1
1
  import re
2
2
 
3
3
 
4
- def _demote_markdown_headers(md: str) -> str:
4
+ def _adjust_markdown_headers(md: str, level_change: int) -> str:
5
5
  lines = md.split("\n")
6
6
  new_lines = []
7
7
  fence_stack = []
@@ -11,7 +11,6 @@ def _demote_markdown_headers(md: str) -> str:
11
11
 
12
12
  if fence_match:
13
13
  current_fence = fence_match.group(1)
14
- # If stack is not empty and we found a closing fence
15
14
  if (
16
15
  fence_stack
17
16
  and fence_stack[-1][0] == current_fence[0]
@@ -21,18 +20,28 @@ def _demote_markdown_headers(md: str) -> str:
21
20
  else:
22
21
  fence_stack.append(current_fence)
23
22
  new_lines.append(line)
23
+ elif fence_stack:
24
+ new_lines.append(line)
24
25
  else:
25
- if fence_stack: # If we are inside a code block
26
- new_lines.append(line)
26
+ match = re.match(r"^(#{1,6})(\s)", line)
27
+ if match:
28
+ current_level = len(match.group(1))
29
+ new_level = max(1, current_level + level_change)
30
+ new_header = "#" * new_level + line[current_level:]
31
+ new_lines.append(new_header)
27
32
  else:
28
- match = re.match(r"^(#{1,6})(\s)", line)
29
- if match:
30
- new_lines.append("#" + line)
31
- else:
32
- new_lines.append(line)
33
+ new_lines.append(line)
33
34
  return "\n".join(new_lines)
34
35
 
35
36
 
37
+ def demote_markdown_headers(md: str) -> str:
38
+ return _adjust_markdown_headers(md, level_change=1)
39
+
40
+
41
+ def promote_markdown_headers(md: str) -> str:
42
+ return _adjust_markdown_headers(md, level_change=-1)
43
+
44
+
36
45
  def make_prompt_section(header: str, content: str, as_code: bool = False) -> str:
37
46
  if content.strip() == "":
38
47
  return ""
@@ -51,4 +60,4 @@ def make_prompt_section(header: str, content: str, as_code: bool = False) -> str
51
60
  fence_len = longest_backtick_sequence + 1
52
61
  fence = "`" * fence_len
53
62
  return f"# {header}\n{fence}\n{content.strip()}\n{fence}\n"
54
- return f"# {header}\n{_demote_markdown_headers(content.strip())}\n"
63
+ return f"# {header}\n{demote_markdown_headers(content.strip())}\n"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: zrb
3
- Version: 1.12.0
3
+ Version: 1.13.0
4
4
  Summary: Your Automation Powerhouse
5
5
  Home-page: https://github.com/state-alchemists/zrb
6
6
  License: AGPL-3.0-or-later
@@ -19,7 +19,7 @@ Provides-Extra: rag
19
19
  Requires-Dist: beautifulsoup4 (>=4.13.3,<5.0.0)
20
20
  Requires-Dist: black (>=25.1.0,<25.2.0)
21
21
  Requires-Dist: chromadb (>=0.6.3,<0.7.0) ; extra == "rag" or extra == "all"
22
- Requires-Dist: fastapi[standard] (>=0.115.14,<0.116.0)
22
+ Requires-Dist: fastapi[standard] (>=0.116.1,<0.117.0)
23
23
  Requires-Dist: isort (>=6.0.1,<6.1.0)
24
24
  Requires-Dist: libcst (>=1.7.0,<2.0.0)
25
25
  Requires-Dist: openai (>=1.86.0,<2.0.0) ; extra == "rag" or extra == "all"
@@ -27,7 +27,7 @@ Requires-Dist: pdfplumber (>=0.11.6,<0.12.0) ; extra == "rag" or extra == "all"
27
27
  Requires-Dist: playwright (>=1.53.0,<2.0.0) ; extra == "playwright" or extra == "all"
28
28
  Requires-Dist: prompt-toolkit (>=3.0.51,<4.0.0)
29
29
  Requires-Dist: psutil (>=7.0.0,<8.0.0)
30
- Requires-Dist: pydantic-ai (>=0.4.4,<0.5.0)
30
+ Requires-Dist: pydantic-ai (>=0.4.5,<0.5.0)
31
31
  Requires-Dist: pyjwt (>=2.10.1,<3.0.0)
32
32
  Requires-Dist: python-dotenv (>=1.1.1,<2.0.0)
33
33
  Requires-Dist: python-jose[cryptography] (>=3.4.0,<4.0.0)
@@ -20,7 +20,7 @@ zrb/builtin/llm/tool/cli.py,sha256=dUWZrW2X5J_lONuzR__6-SbewSdi28E3RRuksjd4mWo,1
20
20
  zrb/builtin/llm/tool/code.py,sha256=GRP_IZAkeL6RIlUm407BQRF992ES57pdzPaQdC5UsJU,8218
21
21
  zrb/builtin/llm/tool/file.py,sha256=XfTuoQOHmgiAYkfi_1ew2voxOwad5vWTe_3Ww8IeVQY,22274
22
22
  zrb/builtin/llm/tool/rag.py,sha256=wB74JV7bxs0ec77b_09Z2lPjoR1WzPUvZbuXOdb9Q9g,9675
23
- zrb/builtin/llm/tool/sub_agent.py,sha256=UWBLiuCK6FT8Ku0yPfSxd_k67h_Pme1K7d2VSABacjQ,4855
23
+ zrb/builtin/llm/tool/sub_agent.py,sha256=9Su64FpNTVeE6O2qgNzo-eo4pcmv8qi_sd_QWLQBXYw,4870
24
24
  zrb/builtin/llm/tool/web.py,sha256=gQlUsmYCJOFJtNjwpjK-xk13LMvrMSpSaFHXUTnIayQ,7090
25
25
  zrb/builtin/md5.py,sha256=690RV2LbW7wQeTFxY-lmmqTSVEEZv3XZbjEUW1Q3XpE,1480
26
26
  zrb/builtin/project/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -219,15 +219,15 @@ zrb/cmd/cmd_result.py,sha256=L8bQJzWCpcYexIxHBNsXj2pT3BtLmWex0iJSMkvimOA,597
219
219
  zrb/cmd/cmd_val.py,sha256=7Doowyg6BK3ISSGBLt-PmlhzaEkBjWWm51cED6fAUOQ,1014
220
220
  zrb/config/config.py,sha256=d_F-hdPLADjeVRHtnpOxtOkfUBu5huSLclyD53uxO4U,12306
221
221
  zrb/config/default_prompt/file_extractor_system_prompt.md,sha256=tmeZMPzF9MGExsZZw7M2PZN6V0oFVRp1nIjiqUPvQ9M,1013
222
- zrb/config/default_prompt/interactive_system_prompt.md,sha256=NlG5cQ4imEGF9CIRwqH03UZ5XRtqLu1gIin3nBDtQlI,2795
222
+ zrb/config/default_prompt/interactive_system_prompt.md,sha256=ZFPeDEV2vlcksHiVG2o-TCehmqkFolDjtH0_Fzo1gGI,3566
223
223
  zrb/config/default_prompt/persona.md,sha256=WU4JKp-p7qJePDA6NZ_CYdBggo2B3PEq8IEnNVblIHU,41
224
224
  zrb/config/default_prompt/repo_extractor_system_prompt.md,sha256=EGZ-zj78RlMEg2jduRBs8WzO4VJTkXHR96IpBepZMsY,3881
225
225
  zrb/config/default_prompt/repo_summarizer_system_prompt.md,sha256=fpG5B416OK3oE41bWPrh1M6pdH5SSadCPte_NJ_79z0,858
226
- zrb/config/default_prompt/summarization_prompt.md,sha256=3-swyZ2m9DQFkaN68kn-AxnFHTcQYqrPSzV3qwT-vw4,2122
227
- zrb/config/default_prompt/system_prompt.md,sha256=uRRiVSTs_4s2DYBO-1cPuOGPVkaelA_UuGClLawfw3o,2283
226
+ zrb/config/default_prompt/summarization_prompt.md,sha256=hRXH5E78TugSze_Hgp-KTbIhCeyrMcJg-pSXvXH3C9E,1629
227
+ zrb/config/default_prompt/system_prompt.md,sha256=Jkne5n9HJcBCgfeENwxvqH-kbDO2CaiUzqR4VoWMRHY,3054
228
228
  zrb/config/llm_config.py,sha256=bNLxorctwtVW1F9hA-hEYpDBe7FLSZHC25Nx8NlR4-M,8597
229
- zrb/config/llm_context/config.py,sha256=swc3hUaEIoL2MjKtbati13iP0MxveNG_y_6K3nszRAw,2571
230
- zrb/config/llm_context/config_handler.py,sha256=oQesfigIM0qMw_A3jUCN0UDJujRjuJ3jr5mXHBiLgB0,8866
229
+ zrb/config/llm_context/config.py,sha256=zeqSVOKK5yyApvqTbcO3ayGxtyoag22qlWWaXp1nINs,4950
230
+ zrb/config/llm_context/config_parser.py,sha256=h95FbOjvVobhrsfGtG_BY3hxS-OLzQj-9F5vGZuehkY,1473
231
231
  zrb/config/llm_rate_limitter.py,sha256=P4vR7qxwiGwjlKx2kHcfdIxwGbJB98vdN-UQEH-Q2WU,4894
232
232
  zrb/config/web_auth_config.py,sha256=_PXatQTYh2mX9H3HSYSQKp13zm1RlLyVIoeIr6KYMQ8,6279
233
233
  zrb/content_transformer/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -237,7 +237,7 @@ zrb/context/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
237
237
  zrb/context/any_context.py,sha256=2hgVKbbDwmwrEl1h1L1FaTUjuUYaDd_b7YRGkaorW6Q,6362
238
238
  zrb/context/any_shared_context.py,sha256=wJawL1jGgApcKPRcpw3js7W4-MhJRA3GMbR5zTsJmt0,1929
239
239
  zrb/context/context.py,sha256=ErGhXJgjgNaAqi6iPMejWxFZ3YvWnysC6mHEU-wodKk,6884
240
- zrb/context/shared_context.py,sha256=Pn0LHEYikiB3LLGnfpJVzOFgxyosQ_NYvFtKFMK_X8w,3008
240
+ zrb/context/shared_context.py,sha256=Jaa7AYCeCksOiEAwOnY3xD6Y2Yy2wJAkpehAkbKQ-Wc,3076
241
241
  zrb/dot_dict/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
242
242
  zrb/dot_dict/dot_dict.py,sha256=ubw_x8I7AOJ59xxtFVJ00VGmq_IYdZP3mUhNlO4nEK0,556
243
243
  zrb/env/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -346,20 +346,20 @@ zrb/task/base_trigger.py,sha256=WSGcmBcGAZw8EzUXfmCjqJQkz8GEmi1RzogpF6A1V4s,6902
346
346
  zrb/task/cmd_task.py,sha256=myM8WZm6NrUD-Wv0Vb5sTOrutrAVZLt5LVsSBKwX6SM,10860
347
347
  zrb/task/http_check.py,sha256=Gf5rOB2Se2EdizuN9rp65HpGmfZkGc-clIAlHmPVehs,2565
348
348
  zrb/task/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
349
- zrb/task/llm/agent.py,sha256=A5UoHY-l8WqyptKrf42eHVW_VhMhuYsygs2Z8XNnCzk,6681
349
+ zrb/task/llm/agent.py,sha256=aZvtcL5HmZQvD3c79R9sDIOMawO0rUMcRiq2wZ1FNas,7457
350
350
  zrb/task/llm/config.py,sha256=TlyH925_fboIlK2Ixf34tynmenqs9s9rfsnPs4jff78,3490
351
351
  zrb/task/llm/conversation_history.py,sha256=B_PDWYL_q66s0xwWBzMSomqPN6u3gkXlIeXBD5A0Apg,4416
352
- zrb/task/llm/conversation_history_model.py,sha256=AU5-M4Ky3X4wII1PMT75VU5OUEG0FjqdHrrpCSl-u6M,10771
352
+ zrb/task/llm/conversation_history_model.py,sha256=DJ0KDBB0BriQuE5ugC_q0aSHhjNIBcfjUk1f0S_3I9U,9245
353
353
  zrb/task/llm/default_workflow/coding.md,sha256=2uythvPsnBpYfIhiIH1cCinQXX0i0yUqsL474Zpemw0,2484
354
354
  zrb/task/llm/default_workflow/copywriting.md,sha256=xSO7GeDolwGxiuz6kXsK2GKGpwp8UgtG0yRqTmill_s,1999
355
355
  zrb/task/llm/default_workflow/researching.md,sha256=KD-aYHFHir6Ti-4FsBBtGwiI0seSVgleYbKJZi_POXA,2139
356
356
  zrb/task/llm/error.py,sha256=QR-nIohS6pBpC_16cWR-fw7Mevo1sNYAiXMBsh_CJDE,4157
357
- zrb/task/llm/history_summarization.py,sha256=BUwBOS51Jzp4psliD_h1jWq-5oHezNbjF1fkn7vbh7o,8109
358
- zrb/task/llm/print_node.py,sha256=zocTKi9gZDxl2I6KNu095TmMc13Yip6SNuWYnswS680,4060
357
+ zrb/task/llm/history_summarization.py,sha256=_0RmzIeJdJA3KvtdTdKnd2Ga7_7x8C1J2PM0oSn-IYw,8000
358
+ zrb/task/llm/print_node.py,sha256=mwdqsO2IVf5rDz-jdH9HXz6MFGCWrZ4Pv2xbUBtoNgc,4179
359
359
  zrb/task/llm/prompt.py,sha256=sMipP-NJmq4ZmCtQYEG2mcHWUD79yJRwH7nH-iw-7Z4,9661
360
- zrb/task/llm/tool_wrapper.py,sha256=N6IuWJXFDcGUJyMJnnWmpJLsqas1QNCEj0MNL3T2nXI,6647
360
+ zrb/task/llm/tool_wrapper.py,sha256=jfKMAtTzm--HnF6TppOrbkDVsuTOIFRpowQqgwqd-7s,6756
361
361
  zrb/task/llm/typing.py,sha256=c8VAuPBw_4A3DxfYdydkgedaP-LU61W9_wj3m3CAX1E,58
362
- zrb/task/llm_task.py,sha256=Zxmp7c7XOz5_jAX1kzwwNfD9GJ1Tok-C4e_MfqhliNk,13532
362
+ zrb/task/llm_task.py,sha256=jVuVeN2ylcPkycUiaTpavkkd1tBLbvcVsjMNN5FMHnk,13536
363
363
  zrb/task/make_task.py,sha256=PD3b_aYazthS8LHeJsLAhwKDEgdurQZpymJDKeN60u0,2265
364
364
  zrb/task/rsync_task.py,sha256=WfqNSaicJgYWpunNU34eYxXDqHDHOftuDHyWJKjqwg0,6365
365
365
  zrb/task/scaffolder.py,sha256=rME18w1HJUHXgi9eTYXx_T2G4JdqDYzBoNOkdOOo5-o,6806
@@ -395,7 +395,7 @@ zrb/util/git_subtree.py,sha256=AyQWCWEi2EIzEpYXRnYN55157KMUql0WHj70QNw5PHU,4612
395
395
  zrb/util/git_subtree_model.py,sha256=P_gJ0zhOAc3gFM6sYcjc0Ack9dFBt75TI5fXdE0q320,871
396
396
  zrb/util/group.py,sha256=T82yr3qg9I5k10VPXkMyrIRIqyfzadSH813bqzwKEPI,4718
397
397
  zrb/util/init_path.py,sha256=9eN7CkWNGhDBpjTQs2j9YHVMzui7Y8DEb1WP4aTPzeo,659
398
- zrb/util/llm/prompt.py,sha256=AqDcBi2IkPISCVNZ_Ccz9Q2zFHjowPMReGHZtNndD_k,1921
398
+ zrb/util/llm/prompt.py,sha256=HMpKby27DE8lJWpytYKylp7Iw9ENwsYQI0nMMKCCi54,2190
399
399
  zrb/util/load.py,sha256=DK0KYSlu48HCoGPqnW1IxnE3pHrZSPCstfz8Fjyqqv8,2140
400
400
  zrb/util/run.py,sha256=vu-mcSWDP_WuuvIKqM_--Gk3WkABO1oTXiHmBRTvVQk,546
401
401
  zrb/util/string/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -406,7 +406,7 @@ zrb/util/todo.py,sha256=r9_KYF2-hLKMNjsp6AFK9zivykMrywd-kJ4bCwfdafI,19323
406
406
  zrb/util/todo_model.py,sha256=hhzAX-uFl5rsg7iVX1ULlJOfBtblwQ_ieNUxBWfc-Os,1670
407
407
  zrb/xcom/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
408
408
  zrb/xcom/xcom.py,sha256=o79rxR9wphnShrcIushA0Qt71d_p3ZTxjNf7x9hJB78,1571
409
- zrb-1.12.0.dist-info/METADATA,sha256=ZegE-xKhBfEIGj-PXDaNKUmoQsJgWYR6_4E0V4-2Awk,9778
410
- zrb-1.12.0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
411
- zrb-1.12.0.dist-info/entry_points.txt,sha256=-Pg3ElWPfnaSM-XvXqCxEAa-wfVI6BEgcs386s8C8v8,46
412
- zrb-1.12.0.dist-info/RECORD,,
409
+ zrb-1.13.0.dist-info/METADATA,sha256=L9X70AkHGlYTjK0tdgjlwA_Sj4U0Ao9FtPuk-g0UmJ0,9777
410
+ zrb-1.13.0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
411
+ zrb-1.13.0.dist-info/entry_points.txt,sha256=-Pg3ElWPfnaSM-XvXqCxEAa-wfVI6BEgcs386s8C8v8,46
412
+ zrb-1.13.0.dist-info/RECORD,,
@@ -1,238 +0,0 @@
1
- import os
2
- import re
3
- from typing import Callable, Generator, NamedTuple
4
-
5
-
6
- class Section(NamedTuple):
7
- name: str
8
- key: str
9
- content: str
10
- config_file: str
11
-
12
-
13
- def _parse_config_file(
14
- config_file: str, lines: list[str]
15
- ) -> Generator[Section, None, None]:
16
- """
17
- Parses a config file's lines, yielding sections.
18
- It correctly handles markdown code fences.
19
- """
20
- any_header_pattern = re.compile(r"^# (\w+):\s*(.*)")
21
- fence_pattern = re.compile(r"^([`~]{3,})")
22
- fence_stack = []
23
- active_section_name = None
24
- active_section_key = None
25
- active_section_content = []
26
-
27
- for line in lines:
28
- stripped_line = line.strip()
29
- fence_match = fence_pattern.match(stripped_line)
30
-
31
- if fence_match:
32
- current_fence = fence_match.group(1)
33
- if (
34
- fence_stack
35
- and fence_stack[-1][0] == current_fence[0]
36
- and len(current_fence) >= len(fence_stack[-1])
37
- ):
38
- fence_stack.pop()
39
- else:
40
- fence_stack.append(current_fence)
41
-
42
- if fence_stack:
43
- if active_section_key is not None:
44
- active_section_content.append(line)
45
- continue
46
-
47
- match = any_header_pattern.match(line)
48
- if match:
49
- if active_section_key is not None:
50
- content = "".join(active_section_content).strip()
51
- if content:
52
- yield Section(
53
- name=active_section_name,
54
- key=active_section_key,
55
- content=content,
56
- config_file=config_file,
57
- )
58
-
59
- active_section_name = match.group(1)
60
- active_section_key = match.group(2).strip()
61
- active_section_content = []
62
- elif active_section_key is not None:
63
- active_section_content.append(line)
64
-
65
- if active_section_key is not None:
66
- content = "".join(active_section_content).strip()
67
- if content:
68
- yield Section(
69
- name=active_section_name,
70
- key=active_section_key,
71
- content=content,
72
- config_file=config_file,
73
- )
74
-
75
-
76
- def _get_config_file_hierarchy(path: str, config_file_name: str) -> list[str]:
77
- """Finds all config files from a given path up to the home directory."""
78
- config_files = []
79
- home_dir = os.path.expanduser("~")
80
- current_path = os.path.abspath(path)
81
- while True:
82
- config_path = os.path.join(current_path, config_file_name)
83
- if os.path.exists(config_path):
84
- config_files.append(config_path)
85
- if current_path == home_dir:
86
- break
87
- parent = os.path.dirname(current_path)
88
- if parent == current_path: # Reached root
89
- break
90
- current_path = parent
91
- return config_files
92
-
93
-
94
- class LLMContextConfigHandler:
95
- """Handles the logic for a specific section of the config."""
96
-
97
- def __init__(
98
- self,
99
- section_name: str,
100
- config_file_name: str = "ZRB.md",
101
- filter_section_func: Callable[[str, str], bool] | None = None,
102
- resolve_section_path: bool = True,
103
- ):
104
- self._section_name = section_name
105
- self._config_file_name = config_file_name
106
- self._filter_func = filter_section_func
107
- self._resolve_section_path = resolve_section_path
108
-
109
- def _include_section(self, section_path: str, base_path: str) -> bool:
110
- if self._filter_func:
111
- return self._filter_func(section_path, base_path)
112
- return True
113
-
114
- def get_section(self, cwd: str) -> dict[str, str]:
115
- """Gathers all relevant sections for a given path."""
116
- abs_path = os.path.abspath(cwd)
117
- all_sections = {}
118
- config_files = _get_config_file_hierarchy(abs_path, self._config_file_name)
119
-
120
- for config_file in reversed(config_files):
121
- if not os.path.exists(config_file):
122
- continue
123
- with open(config_file, "r") as f:
124
- lines = f.readlines()
125
-
126
- for section in _parse_config_file(config_file, lines):
127
- if section.name != self._section_name:
128
- continue
129
-
130
- config_dir = os.path.dirname(section.config_file)
131
- key = (
132
- os.path.abspath(os.path.join(config_dir, section.key))
133
- if self._resolve_section_path
134
- else section.key
135
- )
136
-
137
- if self._include_section(key, abs_path):
138
- if key in all_sections:
139
- all_sections[key] = f"{all_sections[key]}\n{section.content}"
140
- else:
141
- all_sections[key] = section.content
142
-
143
- return all_sections
144
-
145
- def add_to_section(self, content: str, key: str, cwd: str):
146
- """Adds content to a section block in the nearest configuration file."""
147
- abs_search_path = os.path.abspath(cwd)
148
- config_files = _get_config_file_hierarchy(
149
- abs_search_path, self._config_file_name
150
- )
151
- closest_config_file = (
152
- config_files[0]
153
- if config_files
154
- else os.path.join(os.path.expanduser("~"), self._config_file_name)
155
- )
156
-
157
- config_dir = os.path.dirname(closest_config_file)
158
- header_key = key
159
- if self._resolve_section_path and os.path.isabs(key):
160
- if key == config_dir:
161
- header_key = "."
162
- elif key.startswith(config_dir):
163
- header_key = f"./{os.path.relpath(key, config_dir)}"
164
- header = f"# {self._section_name}: {header_key}"
165
- new_content = content.strip()
166
- lines = []
167
- if os.path.exists(closest_config_file):
168
- with open(closest_config_file, "r") as f:
169
- lines = f.readlines()
170
- header_index = next(
171
- (i for i, line in enumerate(lines) if line.strip() == header), -1
172
- )
173
- if header_index != -1:
174
- insert_index = len(lines)
175
- for i in range(header_index + 1, len(lines)):
176
- if re.match(r"^# \w+:", lines[i].strip()):
177
- insert_index = i
178
- break
179
- if insert_index > 0 and lines[insert_index - 1].strip():
180
- lines.insert(insert_index, f"\n{new_content}\n")
181
- else:
182
- lines.insert(insert_index, f"{new_content}\n")
183
- else:
184
- if lines and lines[-1].strip():
185
- lines.append("\n\n")
186
- lines.append(f"{header}\n")
187
- lines.append(f"{new_content}\n")
188
- with open(closest_config_file, "w") as f:
189
- f.writelines(lines)
190
-
191
- def remove_from_section(self, content: str, key: str, cwd: str) -> bool:
192
- """Removes content from a section block in all relevant config files."""
193
- abs_search_path = os.path.abspath(cwd)
194
- config_files = _get_config_file_hierarchy(
195
- abs_search_path, self._config_file_name
196
- )
197
- content_to_remove = content.strip()
198
- was_removed = False
199
- for config_file_path in config_files:
200
- if not os.path.exists(config_file_path):
201
- continue
202
- with open(config_file_path, "r") as f:
203
- file_content = f.read()
204
- config_dir = os.path.dirname(config_file_path)
205
- header_key = key
206
- if self._resolve_section_path and os.path.isabs(key):
207
- if key == config_dir:
208
- header_key = "."
209
- elif key.startswith(config_dir):
210
- header_key = f"./{os.path.relpath(key, config_dir)}"
211
- header = f"# {self._section_name}: {header_key}"
212
- # Use regex to find the section content
213
- section_pattern = re.compile(
214
- rf"^{re.escape(header)}\n(.*?)(?=\n# \w+:|\Z)",
215
- re.DOTALL | re.MULTILINE,
216
- )
217
- match = section_pattern.search(file_content)
218
- if not match:
219
- continue
220
-
221
- section_content = match.group(1)
222
- # Remove the target content and handle surrounding newlines
223
- new_section_content = section_content.replace(content_to_remove, "")
224
- new_section_content = "\n".join(
225
- line for line in new_section_content.splitlines() if line.strip()
226
- )
227
-
228
- if new_section_content != section_content.strip():
229
- was_removed = True
230
- # Reconstruct the file content
231
- start = match.start(1)
232
- end = match.end(1)
233
- new_file_content = (
234
- file_content[:start] + new_section_content + file_content[end:]
235
- )
236
- with open(config_file_path, "w") as f:
237
- f.write(new_file_content)
238
- return was_removed
File without changes