zrb 1.21.6__py3-none-any.whl → 1.21.28__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of zrb might be problematic. Click here for more details.

Files changed (47) hide show
  1. zrb/attr/type.py +10 -7
  2. zrb/builtin/git.py +12 -1
  3. zrb/builtin/llm/chat_completion.py +274 -0
  4. zrb/builtin/llm/chat_session_cmd.py +90 -28
  5. zrb/builtin/llm/chat_trigger.py +7 -1
  6. zrb/builtin/llm/history.py +4 -4
  7. zrb/builtin/llm/tool/api.py +3 -1
  8. zrb/builtin/llm/tool/cli.py +2 -1
  9. zrb/builtin/llm/tool/code.py +11 -3
  10. zrb/builtin/llm/tool/file.py +112 -142
  11. zrb/builtin/llm/tool/note.py +36 -16
  12. zrb/builtin/llm/tool/rag.py +17 -8
  13. zrb/builtin/llm/tool/sub_agent.py +41 -15
  14. zrb/config/config.py +108 -13
  15. zrb/config/default_prompt/file_extractor_system_prompt.md +16 -16
  16. zrb/config/default_prompt/interactive_system_prompt.md +11 -11
  17. zrb/config/default_prompt/repo_extractor_system_prompt.md +16 -16
  18. zrb/config/default_prompt/repo_summarizer_system_prompt.md +3 -3
  19. zrb/config/default_prompt/summarization_prompt.md +54 -8
  20. zrb/config/default_prompt/system_prompt.md +15 -15
  21. zrb/config/llm_rate_limitter.py +24 -5
  22. zrb/input/option_input.py +13 -1
  23. zrb/task/llm/agent.py +42 -144
  24. zrb/task/llm/agent_runner.py +152 -0
  25. zrb/task/llm/config.py +8 -7
  26. zrb/task/llm/conversation_history.py +35 -24
  27. zrb/task/llm/conversation_history_model.py +4 -11
  28. zrb/task/llm/default_workflow/coding/workflow.md +2 -3
  29. zrb/task/llm/file_replacement.py +206 -0
  30. zrb/task/llm/file_tool_model.py +57 -0
  31. zrb/task/llm/history_processor.py +206 -0
  32. zrb/task/llm/history_summarization.py +2 -179
  33. zrb/task/llm/print_node.py +14 -5
  34. zrb/task/llm/prompt.py +8 -19
  35. zrb/task/llm/subagent_conversation_history.py +41 -0
  36. zrb/task/llm/tool_wrapper.py +27 -12
  37. zrb/task/llm_task.py +55 -47
  38. zrb/util/attr.py +17 -10
  39. zrb/util/cli/text.py +6 -4
  40. zrb/util/git.py +2 -2
  41. zrb/util/yaml.py +1 -0
  42. zrb/xcom/xcom.py +10 -0
  43. {zrb-1.21.6.dist-info → zrb-1.21.28.dist-info}/METADATA +5 -5
  44. {zrb-1.21.6.dist-info → zrb-1.21.28.dist-info}/RECORD +46 -41
  45. zrb/task/llm/history_summarization_tool.py +0 -24
  46. {zrb-1.21.6.dist-info → zrb-1.21.28.dist-info}/WHEEL +0 -0
  47. {zrb-1.21.6.dist-info → zrb-1.21.28.dist-info}/entry_points.txt +0 -0
@@ -2,71 +2,15 @@ import fnmatch
2
2
  import json
3
3
  import os
4
4
  import re
5
- import sys
6
- from typing import Any, Literal, Optional
5
+ from typing import Any, Optional
7
6
 
8
7
  from zrb.builtin.llm.tool.sub_agent import create_sub_agent_tool
9
8
  from zrb.config.config import CFG
10
9
  from zrb.config.llm_rate_limitter import llm_rate_limitter
11
10
  from zrb.context.any_context import AnyContext
11
+ from zrb.task.llm.file_tool_model import FileReplacement, FileToRead, FileToWrite
12
12
  from zrb.util.file import read_file, read_file_with_line_numbers, write_file
13
13
 
14
- if sys.version_info >= (3, 12):
15
- from typing import NotRequired, TypedDict
16
- else:
17
- from typing_extensions import NotRequired, TypedDict
18
-
19
-
20
- class FileToRead(TypedDict):
21
- """
22
- Configuration for reading a file or file section.
23
-
24
- Attributes:
25
- path (str): Absolute or relative path to the file
26
- start_line (int | None): Starting line number (1-based, inclusive). If None, reads from beginning.
27
- end_line (int | None): Ending line number (1-based, exclusive). If None, reads to end.
28
- """
29
-
30
- path: str
31
- start_line: NotRequired[int | None]
32
- end_line: NotRequired[int | None]
33
-
34
-
35
- class FileToWrite(TypedDict):
36
- """
37
- Configuration for writing content to a file.
38
-
39
- Attributes:
40
- path (str): Absolute or relative path where file will be written.
41
- content (str): Content to write. CRITICAL: For JSON, ensure all special characters in this string are properly escaped.
42
- mode (str): Mode for writing: 'w' (overwrite, default), 'a' (append), 'x' (create exclusively).
43
- """
44
-
45
- path: str
46
- content: str
47
- mode: NotRequired[Literal["w", "wt", "tw", "a", "at", "ta", "x", "xt", "tx"]]
48
-
49
-
50
- class Replacement(TypedDict):
51
- """
52
- Configuration for a single text replacement operation.
53
-
54
- Attributes:
55
- old_string (str): Exact text to find and replace (must match file content exactly)
56
- new_string (str): New text to replace with
57
- """
58
-
59
- old_string: str
60
- new_string: str
61
-
62
-
63
- class FileReplacement(TypedDict):
64
- """Represents a file replacement operation with path and one or more replacements."""
65
-
66
- path: str
67
- replacements: list[Replacement]
68
-
69
-
70
14
  DEFAULT_EXCLUDED_PATTERNS = [
71
15
  # Common Python artifacts
72
16
  "__pycache__",
@@ -138,20 +82,21 @@ DEFAULT_EXCLUDED_PATTERNS = [
138
82
 
139
83
  def list_files(
140
84
  path: str = ".",
141
- recursive: bool = True,
142
85
  include_hidden: bool = False,
86
+ depth: int = 3,
143
87
  excluded_patterns: Optional[list[str]] = None,
144
88
  ) -> dict[str, list[str]]:
145
89
  """
146
- Lists files and directories recursively or non-recursively.
90
+ Lists files recursively up to a specified depth.
147
91
 
148
92
  Example:
149
- list_files(path='src', recursive=True, include_hidden=False)
93
+ list_files(path='src', include_hidden=False, depth=2)
150
94
 
151
95
  Args:
152
96
  path (str): Directory path. Defaults to current directory.
153
- recursive (bool): List recursively. Defaults to True.
154
97
  include_hidden (bool): Include hidden files. Defaults to False.
98
+ depth (int): Maximum depth to traverse. Defaults to 3.
99
+ Minimum depth is 1 (current directory only).
155
100
  excluded_patterns (list[str]): Glob patterns to exclude.
156
101
 
157
102
  Returns:
@@ -169,50 +114,30 @@ def list_files(
169
114
  if excluded_patterns is not None
170
115
  else DEFAULT_EXCLUDED_PATTERNS
171
116
  )
117
+ if depth <= 0:
118
+ depth = 1
172
119
  try:
173
- if recursive:
174
- for root, dirs, files in os.walk(abs_path, topdown=True):
175
- # Filter directories in-place
176
- dirs[:] = [
177
- d
178
- for d in dirs
179
- if (include_hidden or not _is_hidden(d))
180
- and not is_excluded(d, patterns_to_exclude)
181
- ]
182
- # Process files
183
- for filename in files:
184
- if (include_hidden or not _is_hidden(filename)) and not is_excluded(
185
- filename, patterns_to_exclude
186
- ):
187
- full_path = os.path.join(root, filename)
188
- # Check rel path for patterns like '**/node_modules/*'
189
- rel_full_path = os.path.relpath(full_path, abs_path)
190
- is_rel_path_excluded = is_excluded(
191
- rel_full_path, patterns_to_exclude
192
- )
193
- if not is_rel_path_excluded:
194
- all_files.append(full_path)
195
- else:
196
- # Non-recursive listing (top-level only)
197
- for item in os.listdir(abs_path):
198
- full_path = os.path.join(abs_path, item)
199
- # Include both files and directories if not recursive
200
- if (include_hidden or not _is_hidden(item)) and not is_excluded(
201
- item, patterns_to_exclude
120
+ initial_depth = abs_path.rstrip(os.sep).count(os.sep)
121
+ for root, dirs, files in os.walk(abs_path, topdown=True):
122
+ current_depth = root.rstrip(os.sep).count(os.sep) - initial_depth
123
+ if current_depth >= depth - 1:
124
+ del dirs[:]
125
+ dirs[:] = [
126
+ d
127
+ for d in dirs
128
+ if (include_hidden or not _is_hidden(d))
129
+ and not is_excluded(d, patterns_to_exclude)
130
+ ]
131
+ for filename in files:
132
+ if (include_hidden or not _is_hidden(filename)) and not is_excluded(
133
+ filename, patterns_to_exclude
202
134
  ):
203
- all_files.append(full_path)
204
- # Return paths relative to the original path requested
205
- try:
206
- rel_files = [os.path.relpath(f, abs_path) for f in all_files]
207
- return {"files": sorted(rel_files)}
208
- except (
209
- ValueError
210
- ) as e: # Handle case where path is '.' and abs_path is CWD root
211
- if "path is on mount '" in str(e) and "' which is not on mount '" in str(e):
212
- # If paths are on different mounts, just use absolute paths
213
- rel_files = all_files
214
- return {"files": sorted(rel_files)}
215
- raise
135
+ full_path = os.path.join(root, filename)
136
+ rel_full_path = os.path.relpath(full_path, abs_path)
137
+ if not is_excluded(rel_full_path, patterns_to_exclude):
138
+ all_files.append(rel_full_path)
139
+ return {"files": sorted(all_files)}
140
+
216
141
  except (OSError, IOError) as e:
217
142
  raise OSError(f"Error listing files in {path}: {e}")
218
143
  except Exception as e:
@@ -255,10 +180,12 @@ def read_from_file(
255
180
  Reads content from one or more files, optionally specifying line ranges.
256
181
 
257
182
  Examples:
183
+ ```
258
184
  # Read entire content of a single file
259
185
  read_from_file(file={'path': 'path/to/file.txt'})
260
186
 
261
187
  # Read specific lines from a file
188
+ # The content will be returned with line numbers in the format: "LINE_NUMBER | line content"
262
189
  read_from_file(file={'path': 'path/to/large_file.log', 'start_line': 100, 'end_line': 150})
263
190
 
264
191
  # Read multiple files
@@ -266,12 +193,14 @@ def read_from_file(
266
193
  {'path': 'path/to/file1.txt'},
267
194
  {'path': 'path/to/file2.txt', 'start_line': 1, 'end_line': 5}
268
195
  ])
196
+ ```
269
197
 
270
198
  Args:
271
199
  file (FileToRead | list[FileToRead]): A single file configuration or a list of them.
272
200
 
273
201
  Returns:
274
202
  dict: Content and metadata for a single file, or a dict of results for multiple files.
203
+ The `content` field in the returned dictionary will have line numbers in the format: "LINE_NUMBER | line content"
275
204
  """
276
205
  is_list = isinstance(file, list)
277
206
  files = file if is_list else [file]
@@ -327,17 +256,23 @@ def write_to_file(
327
256
  file: FileToWrite | list[FileToWrite],
328
257
  ) -> str | dict[str, Any]:
329
258
  """
330
- Writes content to one or more files, with options for overwrite, append, or exclusive creation.
259
+ Writes content to one or more files, with options for overwrite, append, or exclusive
260
+ creation.
331
261
 
332
- CRITICAL: The content for each file MUST NOT exceed 4000 characters. If your content is larger,
333
- you MUST split it into chunks and make multiple calls to this tool. The first call should use
334
- 'w' mode, and subsequent calls for the same file should use 'a' mode.
262
+ **CRITICAL - PREVENT JSON ERRORS:**
263
+ 1. **ESCAPING:** Do NOT double-escape quotes.
264
+ - CORRECT: "content": "He said \"Hello\""
265
+ - WRONG: "content": "He said \\"Hello\\"" <-- This breaks JSON parsing!
266
+ 2. **SIZE LIMIT:** Content MUST NOT exceed 4000 characters.
267
+ - Exceeding this causes truncation and EOF errors.
268
+ - Split larger content into multiple sequential calls (first 'w', then 'a').
335
269
 
336
270
  Examples:
271
+ ```
337
272
  # Overwrite 'file.txt' with initial content
338
273
  write_to_file(file={'path': 'path/to/file.txt', 'content': 'Initial content.'})
339
274
 
340
- # Append a second chunk to 'file.txt' (note the newline for separate lines)
275
+ # Append a second chunk to 'file.txt' (note the newline at the beginning of the content)
341
276
  write_to_file(file={'path': 'path/to/file.txt', 'content': '\nSecond chunk.', 'mode': 'a'})
342
277
 
343
278
  # Write to multiple files
@@ -345,6 +280,7 @@ def write_to_file(
345
280
  {'path': 'path/to/file1.txt', 'content': 'Content for file 1'},
346
281
  {'path': 'path/to/file2.txt', 'content': 'Content for file 2', 'mode': 'w'}
347
282
  ])
283
+ ```
348
284
 
349
285
  Args:
350
286
  file (FileToWrite | list[FileToWrite]): A single file configuration or a list of them.
@@ -490,47 +426,72 @@ def replace_in_file(
490
426
  file: FileReplacement | list[FileReplacement],
491
427
  ) -> str | dict[str, Any]:
492
428
  """
493
- Replaces exact string occurrences in one or more files.
429
+ Replaces exact text in files.
494
430
 
495
- CRITICAL: `old_string` must match file content exactly (including whitespace, newlines).
431
+ **CRITICAL INSTRUCTIONS:**
432
+ 1. **READ FIRST:** Use `read_file` to get exact content. Do not guess.
433
+ 2. **EXACT MATCH:** `old_text` must match file content EXACTLY (whitespace, newlines).
434
+ 3. **ESCAPING:** Do NOT double-escape quotes in `new_text`. Use `\"`, not `\\"`.
435
+ 4. **SIZE LIMIT:** `new_text` MUST NOT exceed 4000 chars to avoid truncation/EOF errors.
436
+ 5. **MINIMAL CONTEXT:** Keep `old_text` small (target lines + 2-3 context lines).
437
+ 6. **DEFAULT:** Replaces **ALL** occurrences. Set `count=1` for first occurrence only.
496
438
 
497
439
  Examples:
498
- # Single file replacement
499
- replace_in_file(file={'path': 'path/to/file.txt', 'replacements': [{'old_string': 'old', 'new_string': 'new'}]})
500
-
501
- # Multiple file replacements
440
+ ```
441
+ # Replace ALL occurrences
502
442
  replace_in_file(file=[
503
- {'path': 'path/to/file1.txt', 'replacements': [{'old_string': 'foo', 'new_string': 'bar'}]},
504
- {'path': 'path/to/file2.txt', 'replacements': [{'old_string': 'old', 'new_string': 'new'}]}
443
+ {'path': 'file.txt', 'old_text': 'foo', 'new_text': 'bar'},
444
+ {'path': 'file.txt', 'old_text': 'baz', 'new_text': 'qux'}
505
445
  ])
506
446
 
447
+ # Replace ONLY the first occurrence
448
+ replace_in_file(
449
+ file={'path': 'file.txt', 'old_text': 'foo', 'new_text': 'bar', 'count': 1}
450
+ )
451
+
452
+ # Replace code block (include context for safety)
453
+ replace_in_file(
454
+ file={
455
+ 'path': 'app.py',
456
+ 'old_text': ' def old_fn():\n pass',
457
+ 'new_text': ' def new_fn():\n pass'
458
+ }
459
+ )
460
+ ```
461
+
507
462
  Args:
508
- file (FileReplacement | list[FileReplacement]): A single file configuration or a list of them.
463
+ file: Single replacement config or list of them.
509
464
 
510
465
  Returns:
511
- Success message for single file, or dict with success/errors for multiple files.
466
+ Success message or error dict.
512
467
  """
513
468
  # Normalize to list
514
469
  file_replacements = file if isinstance(file, list) else [file]
470
+ # Group replacements by file path to minimize file I/O
471
+ replacements_by_path: dict[str, list[FileReplacement]] = {}
472
+ for r in file_replacements:
473
+ path = r["path"]
474
+ if path not in replacements_by_path:
475
+ replacements_by_path[path] = []
476
+ replacements_by_path[path].append(r)
515
477
  success = []
516
478
  errors = {}
517
- for file_replacement_config in file_replacements:
518
- path = file_replacement_config["path"]
519
- replacements = file_replacement_config["replacements"]
479
+ for path, replacements in replacements_by_path.items():
520
480
  try:
521
481
  abs_path = os.path.abspath(os.path.expanduser(path))
522
482
  if not os.path.exists(abs_path):
523
483
  raise FileNotFoundError(f"File not found: {path}")
524
484
  content = read_file(abs_path)
525
485
  original_content = content
526
- # Apply all replacements
486
+ # Apply all replacements for this file
527
487
  for replacement in replacements:
528
- old_string = replacement["old_string"]
529
- new_string = replacement["new_string"]
530
- if old_string not in content:
531
- raise ValueError(f"old_string not found in file: {path}")
532
- # Replace first occurrence only (maintains current behavior)
533
- content = content.replace(old_string, new_string, 1)
488
+ old_text = replacement["old_text"]
489
+ new_text = replacement["new_text"]
490
+ count = replacement.get("count", -1)
491
+ if old_text not in content:
492
+ raise ValueError(f"old_text not found in file: {path}")
493
+ # Replace occurrences
494
+ content = content.replace(old_text, new_text, count)
534
495
  # Only write if content actually changed
535
496
  if content != original_content:
536
497
  write_file(abs_path, content)
@@ -542,16 +503,15 @@ def replace_in_file(
542
503
  # Return appropriate response based on input type
543
504
  if isinstance(file, list):
544
505
  return {"success": success, "errors": errors}
545
- else:
546
- if errors:
547
- raise RuntimeError(
548
- f"Error applying replacement to {file['path']}: {errors[file['path']]}"
549
- )
550
- return f"Successfully applied replacement(s) to {file['path']}"
506
+ path = file["path"]
507
+ if errors:
508
+ error_message = errors[path]
509
+ raise RuntimeError(f"Error applying replacement to {path}: {error_message}")
510
+ return f"Successfully applied replacement(s) to {path}"
551
511
 
552
512
 
553
513
  async def analyze_file(
554
- ctx: AnyContext, path: str, query: str, token_limit: int | None = None
514
+ ctx: AnyContext, path: str, query: str, token_threshold: int | None = None
555
515
  ) -> dict[str, Any]:
556
516
  """
557
517
  Analyzes a file using a sub-agent for complex questions.
@@ -562,29 +522,39 @@ async def analyze_file(
562
522
  Args:
563
523
  ctx (AnyContext): The execution context.
564
524
  path (str): The path to the file to analyze.
565
- query (str): A specific analysis query with clear guidelines and necessary information.
566
- token_limit (int | None): Max tokens.
525
+ query (str): A specific analysis query with clear guidelines and
526
+ necessary information.
527
+ token_threshold (int | None): Max tokens.
567
528
 
568
529
  Returns:
569
530
  Analysis results.
570
531
  """
571
- if token_limit is None:
572
- token_limit = CFG.LLM_FILE_ANALYSIS_TOKEN_LIMIT
532
+ if token_threshold is None:
533
+ token_threshold = CFG.LLM_FILE_ANALYSIS_TOKEN_THRESHOLD
573
534
  abs_path = os.path.abspath(os.path.expanduser(path))
574
535
  if not os.path.exists(abs_path):
575
536
  raise FileNotFoundError(f"File not found: {path}")
576
537
  file_content = read_file(abs_path)
577
538
  _analyze_file = create_sub_agent_tool(
578
539
  tool_name="analyze_file",
579
- tool_description="Analyze file content using LLM sub-agent for complex questions about code structure, documentation quality, or file-specific analysis. Use for questions that require understanding beyond simple text reading.",
540
+ tool_description=(
541
+ "Analyze file content using LLM sub-agent "
542
+ "for complex questions about code structure, documentation "
543
+ "quality, or file-specific analysis. Use for questions that "
544
+ "require understanding beyond simple text reading."
545
+ ),
580
546
  system_prompt=CFG.LLM_FILE_EXTRACTOR_SYSTEM_PROMPT,
581
547
  tools=[read_from_file, search_files],
548
+ auto_summarize=False,
549
+ remember_history=False,
582
550
  )
583
551
  payload = json.dumps(
584
552
  {
585
553
  "instruction": query,
586
554
  "file_path": abs_path,
587
- "file_content": llm_rate_limitter.clip_prompt(file_content, token_limit),
555
+ "file_content": llm_rate_limitter.clip_prompt(
556
+ file_content, token_threshold
557
+ ),
588
558
  }
589
559
  )
590
560
  return await _analyze_file(ctx, payload)
@@ -5,10 +5,12 @@ from zrb.config.llm_context.config import llm_context_config
5
5
 
6
6
  def read_long_term_note() -> str:
7
7
  """
8
- Reads the global long-term note, shared across all projects and conversations.
8
+ Retrieves the GLOBAL long-term memory shared across ALL sessions and projects.
9
+
10
+ CRITICAL: Consult this first for user preferences, facts, and cross-project context.
9
11
 
10
12
  Returns:
11
- str: The content of the long-term note.
13
+ str: The current global note content.
12
14
  """
13
15
  contexts = llm_context_config.get_notes()
14
16
  return contexts.get("/", "")
@@ -16,28 +18,39 @@ def read_long_term_note() -> str:
16
18
 
17
19
  def write_long_term_note(content: str) -> str:
18
20
  """
19
- Writes or overwrites the global long-term note.
20
- Use to remember key user preferences, goals, or facts relevant across all projects.
21
+ Persists CRITICAL facts to the GLOBAL long-term memory.
22
+
23
+ USE EAGERLY to save or update:
24
+ - User preferences (e.g., "I prefer Python", "No unit tests").
25
+ - User information (e.g., user name, user email address).
26
+ - Important facts (e.g., "My API key is in .env").
27
+ - Cross-project goals.
28
+ - Anything that will be useful for future interaction across projects.
29
+
30
+ WARNING: This OVERWRITES the entire global note.
21
31
 
22
32
  Args:
23
- content (str): The information to save (overwrites entire note).
33
+ content (str): The text to strictly memorize.
24
34
 
25
35
  Returns:
26
- str: A confirmation message.
36
+ str: Confirmation message.
27
37
  """
28
38
  llm_context_config.write_note(content, "/")
29
- return "Long term note saved"
39
+ return "Global long-term note saved."
30
40
 
31
41
 
32
42
  def read_contextual_note(path: str | None = None) -> str:
33
43
  """
34
- Reads a contextual note for a specific file or directory.
44
+ Retrieves LOCAL memory specific to a file or directory path.
45
+
46
+ Use to recall project-specific architecture, code summaries, or past decisions
47
+ relevant to the current working location.
35
48
 
36
49
  Args:
37
- path (str | None): The file or directory path. Defaults to current working directory.
50
+ path (str | None): Target file/dir. Defaults to current working directory (CWD).
38
51
 
39
52
  Returns:
40
- str: The content of the contextual note.
53
+ str: The local note content for the path.
41
54
  """
42
55
  if path is None:
43
56
  path = os.getcwd()
@@ -48,17 +61,24 @@ def read_contextual_note(path: str | None = None) -> str:
48
61
 
49
62
  def write_contextual_note(content: str, path: str | None = None) -> str:
50
63
  """
51
- Writes or overwrites a note for a specific file or directory.
52
- Use to save findings, summaries, or conclusions about a part of the project.
64
+ Persists LOCAL facts specific to a file or directory.
65
+
66
+ USE EAGERLY to save or update:
67
+ - Architectural patterns for this project/directory.
68
+ - Summaries of large files or directories.
69
+ - Specific guidelines for this project.
70
+ - Anything related to this directory that will be useful for future interaction.
71
+
72
+ WARNING: This OVERWRITES the note for the specific path.
53
73
 
54
74
  Args:
55
- content (str): The information to save (overwrites any existing note).
56
- path (str | None): The file or directory path. Defaults to current working directory.
75
+ content (str): The text to memorize for this location.
76
+ path (str | None): Target file/dir. Defaults to CWD.
57
77
 
58
78
  Returns:
59
- str: A confirmation message.
79
+ str: Confirmation message.
60
80
  """
61
81
  if path is None:
62
82
  path = os.getcwd()
63
83
  llm_context_config.write_note(content, path)
64
- return f"Contextual note saved to {path}"
84
+ return f"Contextual note saved for: {path}"
@@ -45,22 +45,31 @@ def create_rag_from_directory(
45
45
  openai_embedding_model: str | None = None,
46
46
  ):
47
47
  """
48
- Create a powerful RAG (Retrieval-Augmented Generation) tool for querying a local knowledge base.
48
+ Create a powerful RAG (Retrieval-Augmented Generation) tool for querying a local
49
+ knowledge base.
49
50
 
50
- This factory function generates a tool that performs semantic search over a directory of documents. It automatically indexes the documents into a vector database (ChromaDB) and keeps it updated as files change.
51
+ This factory function generates a tool that performs semantic search over a directory of
52
+ documents. It automatically indexes the documents into a vector database (ChromaDB) and
53
+ keeps it updated as files change.
51
54
 
52
- The generated tool is ideal for answering questions based on a specific set of documents, such as project documentation or internal wikis.
55
+ The generated tool is ideal for answering questions based on a specific set of documents,
56
+ such as project documentation or internal wikis.
53
57
 
54
58
  Args:
55
59
  tool_name (str): The name for the generated RAG tool (e.g., "search_project_docs").
56
- tool_description (str): A clear description of what the tool does and when to use it. This is what the LLM will see.
57
- document_dir_path (str, optional): The path to the directory containing the documents to be indexed.
58
- vector_db_path (str, optional): The path where the ChromaDB vector database will be stored.
59
- vector_db_collection (str, optional): The name of the collection within the vector database.
60
+ tool_description (str): A clear description of what the tool does and when to use it.
61
+ This is what the LLM will see.
62
+ document_dir_path (str, optional): The path to the directory containing the documents
63
+ to be indexed.
64
+ vector_db_path (str, optional): The path where the ChromaDB vector database will be
65
+ stored.
66
+ vector_db_collection (str, optional): The name of the collection within the vector
67
+ database.
60
68
  chunk_size (int, optional): The size of text chunks for embedding.
61
69
  overlap (int, optional): The overlap between text chunks.
62
70
  max_result_count (int, optional): The maximum number of search results to return.
63
- file_reader (list[RAGFileReader], optional): A list of custom file readers for specific file types.
71
+ file_reader (list[RAGFileReader], optional): A list of custom file readers for
72
+ specific file types.
64
73
  openai_api_key (str, optional): Your OpenAI API key for generating embeddings.
65
74
  openai_base_url (str, optional): An optional base URL for the OpenAI API.
66
75
  openai_embedding_model (str, optional): The embedding model to use.
@@ -1,14 +1,21 @@
1
+ import json
1
2
  from collections.abc import Callable
2
3
  from textwrap import dedent
3
4
  from typing import TYPE_CHECKING, Any, Coroutine
4
5
 
5
6
  from zrb.context.any_context import AnyContext
6
- from zrb.task.llm.agent import create_agent_instance, run_agent_iteration
7
+ from zrb.task.llm.agent import create_agent_instance
8
+ from zrb.task.llm.agent_runner import run_agent_iteration
7
9
  from zrb.task.llm.config import get_model, get_model_settings
8
10
  from zrb.task.llm.prompt import get_system_and_user_prompt
11
+ from zrb.task.llm.subagent_conversation_history import (
12
+ get_ctx_subagent_history,
13
+ set_ctx_subagent_history,
14
+ )
9
15
 
10
16
  if TYPE_CHECKING:
11
17
  from pydantic_ai import Tool
18
+ from pydantic_ai._agent_graph import HistoryProcessor
12
19
  from pydantic_ai.models import Model
13
20
  from pydantic_ai.settings import ModelSettings
14
21
  from pydantic_ai.toolsets import AbstractToolset
@@ -25,29 +32,41 @@ def create_sub_agent_tool(
25
32
  tools: "list[ToolOrCallable]" = [],
26
33
  toolsets: list["AbstractToolset[None]"] = [],
27
34
  yolo_mode: bool | list[str] | None = None,
35
+ history_processors: list["HistoryProcessor"] | None = None,
28
36
  log_indent_level: int = 2,
29
- ) -> Callable[[AnyContext, str], Coroutine[Any, Any, dict[str, Any]]]:
37
+ agent_name: str | None = None,
38
+ auto_summarize: bool = True,
39
+ remember_history: bool = True,
40
+ ) -> Callable[[AnyContext, str], Coroutine[Any, Any, Any]]:
30
41
  """
31
42
  Create a tool that is another AI agent, capable of handling complex, multi-step sub-tasks.
32
43
 
33
- This factory function generates a tool that, when used, spins up a temporary, specialized AI agent. This "sub-agent" has its own system prompt, tools, and context, allowing it to focus on accomplishing a specific task without being distracted by the main conversation.
44
+ This factory function generates a tool that, when used, spins up a temporary, specialized
45
+ AI agent. This "sub-agent" has its own system prompt, tools, and context, allowing it to
46
+ focus on accomplishing a specific task without being distracted by the main conversation.
34
47
 
35
48
  This is ideal for delegating complex tasks like analyzing a file or a repository.
36
49
 
37
50
  Args:
38
51
  tool_name (str): The name for the generated sub-agent tool.
39
- tool_description (str): A clear description of the sub-agent's purpose and when to use it. This is what the LLM will see.
40
- system_prompt (str, optional): The system prompt that will guide the sub-agent's behavior.
52
+ tool_description (str): A clear description of the sub-agent's purpose and when to
53
+ use it. This is what the LLM will see.
54
+ system_prompt (str, optional): The system prompt that will guide the sub-agent's
55
+ behavior.
41
56
  model (str | Model, optional): The language model the sub-agent will use.
42
57
  model_settings (ModelSettings, optional): Specific settings for the sub-agent's model.
43
- tools (list, optional): A list of tools that will be exclusively available to the sub-agent.
58
+ tools (list, optional): A list of tools that will be exclusively available to the
59
+ sub-agent.
44
60
  toolsets (list, optional): A list of Toolsets for the sub-agent.
45
61
 
46
62
  Returns:
47
- An asynchronous function that serves as the sub-agent tool. When called, it runs the sub-agent with a given query and returns its final result.
63
+ An asynchronous function that serves as the sub-agent tool. When called, it runs the
64
+ sub-agent with a given query and returns its final result.
48
65
  """
66
+ if agent_name is None:
67
+ agent_name = f"{tool_name}_agent"
49
68
 
50
- async def run_sub_agent(ctx: AnyContext, query: str) -> dict[str, Any]:
69
+ async def run_sub_agent(ctx: AnyContext, query: str) -> Any:
51
70
  """
52
71
  Runs the sub-agent with the given query.
53
72
  """
@@ -66,7 +85,6 @@ def create_sub_agent_tool(
66
85
  ctx=ctx,
67
86
  model_settings_attr=model_settings,
68
87
  )
69
-
70
88
  if system_prompt is None:
71
89
  resolved_system_prompt, query = get_system_and_user_prompt(
72
90
  ctx=ctx,
@@ -86,24 +104,32 @@ def create_sub_agent_tool(
86
104
  tools=tools,
87
105
  toolsets=toolsets,
88
106
  yolo_mode=yolo_mode,
107
+ history_processors=history_processors,
108
+ auto_summarize=auto_summarize,
89
109
  )
90
-
91
110
  sub_agent_run = None
92
111
  # Run the sub-agent iteration
93
- # Start with an empty history for the sub-agent
112
+ history_list = (
113
+ get_ctx_subagent_history(ctx, agent_name) if remember_history else []
114
+ )
94
115
  sub_agent_run = await run_agent_iteration(
95
116
  ctx=ctx,
96
117
  agent=sub_agent_agent,
97
118
  user_prompt=query,
98
119
  attachments=[],
99
- history_list=[],
120
+ history_list=history_list,
100
121
  log_indent_level=log_indent_level,
101
122
  )
102
-
103
123
  # Return the sub-agent's final message content
104
124
  if sub_agent_run and sub_agent_run.result:
105
125
  # Return the final message content
106
- return {"result": sub_agent_run.result.output}
126
+ if remember_history:
127
+ set_ctx_subagent_history(
128
+ ctx,
129
+ agent_name,
130
+ json.loads(sub_agent_run.result.all_messages_json()),
131
+ )
132
+ return sub_agent_run.result.output
107
133
  ctx.log_warning("Sub-agent run did not produce a result.")
108
134
  raise ValueError(f"{tool_name} not returning any result")
109
135
 
@@ -117,7 +143,7 @@ def create_sub_agent_tool(
117
143
  query (str): The query or task for the sub-agent.
118
144
 
119
145
  Returns:
120
- dict[str, Any]: The final response or result from the sub-agent.
146
+ Any: The final response or result from the sub-agent.
121
147
  """
122
148
  ).strip()
123
149