zrb 1.13.3__py3-none-any.whl → 1.14.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -10,7 +10,8 @@ import sys
10
10
 
11
11
  from zrb.config.llm_config import llm_config
12
12
  from zrb.context.any_context import AnyContext
13
- from zrb.util.cli.style import stylize_bold_yellow, stylize_faint
13
+ from zrb.util.cli.style import stylize_blue, stylize_bold_yellow, stylize_faint
14
+ from zrb.util.string.conversion import to_boolean
14
15
 
15
16
 
16
17
  async def read_user_prompt(ctx: AnyContext) -> str:
@@ -26,6 +27,7 @@ async def read_user_prompt(ctx: AnyContext) -> str:
26
27
  reader = await _setup_input_reader(is_tty)
27
28
  multiline_mode = False
28
29
  current_modes = ctx.input.modes
30
+ current_yolo_mode = ctx.input.yolo
29
31
  user_inputs = []
30
32
  while True:
31
33
  await asyncio.sleep(0.01)
@@ -40,7 +42,7 @@ async def read_user_prompt(ctx: AnyContext) -> str:
40
42
  user_prompt = "\n".join(user_inputs)
41
43
  user_inputs = []
42
44
  result = await _trigger_ask_and_wait_for_result(
43
- ctx, user_prompt, current_modes
45
+ ctx, user_prompt, current_modes, current_yolo_mode
44
46
  )
45
47
  if result is not None:
46
48
  final_result = result
@@ -53,7 +55,7 @@ async def read_user_prompt(ctx: AnyContext) -> str:
53
55
  user_prompt = "\n".join(user_inputs)
54
56
  user_inputs = []
55
57
  result = await _trigger_ask_and_wait_for_result(
56
- ctx, user_prompt, current_modes
58
+ ctx, user_prompt, current_modes, current_yolo_mode
57
59
  )
58
60
  if result is not None:
59
61
  final_result = result
@@ -64,6 +66,13 @@ async def read_user_prompt(ctx: AnyContext) -> str:
64
66
  ctx.print(f"Current mode: {current_modes}", plain=True)
65
67
  ctx.print("", plain=True)
66
68
  continue
69
+ elif user_input.strip().lower().startswith("/yolo"):
70
+ yolo_mode_parts = user_input.split(" ", maxsplit=2)
71
+ if len(yolo_mode_parts) > 1:
72
+ current_yolo_mode = to_boolean(yolo_mode_parts[1])
73
+ ctx.print(f"Current_yolo mode: {current_yolo_mode}", plain=True)
74
+ ctx.print("", plain=True)
75
+ continue
67
76
  elif user_input.strip().lower() in ("/help", "/info"):
68
77
  _show_info(ctx)
69
78
  continue
@@ -74,7 +83,7 @@ async def read_user_prompt(ctx: AnyContext) -> str:
74
83
  user_prompt = "\n".join(user_inputs)
75
84
  user_inputs = []
76
85
  result = await _trigger_ask_and_wait_for_result(
77
- ctx, user_prompt, current_modes
86
+ ctx, user_prompt, current_modes, current_yolo_mode
78
87
  )
79
88
  if result is not None:
80
89
  final_result = result
@@ -90,24 +99,32 @@ def _show_info(ctx: AnyContext):
90
99
  ctx.print(
91
100
  "\n".join(
92
101
  [
93
- _format_info_line("/bye", "Quit from chat session"),
94
- _format_info_line("/multi", "Start multiline input"),
95
- _format_info_line("/end", "End multiline input"),
96
- _format_info_line("/modes", "Show current modes"),
97
- _format_info_line("/modes <mode1,mode2,..>", "Set current modes"),
98
- _format_info_line("/help", "Show this message"),
102
+ _show_command("/bye", "Quit from chat session"),
103
+ _show_command("/multi", "Start multiline input"),
104
+ _show_command("/end", "End multiline input"),
105
+ _show_command("/modes", "Show current modes"),
106
+ _show_subcommand("<mode1,mode2,..>", "Set current modes"),
107
+ _show_command("/yolo", "Get current YOLO mode"),
108
+ _show_subcommand("<true|false>", "Set YOLO mode to true/false"),
109
+ _show_command("/help", "Show this message"),
99
110
  ]
100
111
  ),
101
112
  plain=True,
102
113
  )
103
114
 
104
115
 
105
- def _format_info_line(command: str, description: str) -> str:
116
+ def _show_command(command: str, description: str) -> str:
106
117
  styled_command = stylize_bold_yellow(command.ljust(25))
107
118
  styled_description = stylize_faint(description)
108
119
  return f" {styled_command} {styled_description}"
109
120
 
110
121
 
122
+ def _show_subcommand(command: str, description: str) -> str:
123
+ styled_command = stylize_blue(f" {command}".ljust(25))
124
+ styled_description = stylize_faint(description)
125
+ return f" {styled_command} {styled_description}"
126
+
127
+
111
128
  async def _handle_initial_message(ctx: AnyContext) -> str:
112
129
  """Processes the initial message from the command line."""
113
130
  if not ctx.input.message or ctx.input.message.strip() == "":
@@ -119,6 +136,7 @@ async def _handle_initial_message(ctx: AnyContext) -> str:
119
136
  ctx,
120
137
  user_prompt=ctx.input.message,
121
138
  modes=ctx.input.modes,
139
+ yolo_mode=ctx.input.yolo,
122
140
  previous_session_name=ctx.input.previous_session,
123
141
  start_new=ctx.input.start_new,
124
142
  )
@@ -157,6 +175,7 @@ async def _trigger_ask_and_wait_for_result(
157
175
  ctx: AnyContext,
158
176
  user_prompt: str,
159
177
  modes: str,
178
+ yolo_mode: bool,
160
179
  previous_session_name: str | None = None,
161
180
  start_new: bool = False,
162
181
  ) -> str | None:
@@ -174,7 +193,9 @@ async def _trigger_ask_and_wait_for_result(
174
193
  """
175
194
  if user_prompt.strip() == "":
176
195
  return None
177
- await _trigger_ask(ctx, user_prompt, modes, previous_session_name, start_new)
196
+ await _trigger_ask(
197
+ ctx, user_prompt, modes, yolo_mode, previous_session_name, start_new
198
+ )
178
199
  result = await _wait_ask_result(ctx)
179
200
  md_result = _render_markdown(result) if result is not None else ""
180
201
  ctx.print("\n🤖 >>", plain=True)
@@ -220,6 +241,7 @@ def get_llm_ask_input_mapping(callback_ctx: AnyContext):
220
241
  "previous-session": data.get("previous_session_name"),
221
242
  "message": data.get("message"),
222
243
  "modes": data.get("modes"),
244
+ "yolo": data.get("yolo"),
223
245
  }
224
246
 
225
247
 
@@ -227,6 +249,7 @@ async def _trigger_ask(
227
249
  ctx: AnyContext,
228
250
  user_prompt: str,
229
251
  modes: str,
252
+ yolo_mode: bool,
230
253
  previous_session_name: str | None = None,
231
254
  start_new: bool = False,
232
255
  ):
@@ -247,6 +270,7 @@ async def _trigger_ask(
247
270
  "start_new": start_new,
248
271
  "message": user_prompt,
249
272
  "modes": modes,
273
+ "yolo": yolo_mode,
250
274
  }
251
275
  )
252
276
 
@@ -23,6 +23,7 @@ from zrb.builtin.llm.tool.web import (
23
23
  )
24
24
  from zrb.callback.callback import Callback
25
25
  from zrb.config.config import CFG
26
+ from zrb.config.llm_config import llm_config
26
27
  from zrb.input.bool_input import BoolInput
27
28
  from zrb.input.str_input import StrInput
28
29
  from zrb.input.text_input import TextInput
@@ -75,12 +76,20 @@ _llm_ask_inputs = [
75
76
  ),
76
77
  BoolInput(
77
78
  "start-new",
78
- description="Start new conversation (LLM will forget everything)",
79
- prompt="Start new conversation (LLM will forget everything)",
79
+ description="Start new session (LLM Agent will forget past conversation)",
80
+ prompt="Start new session (LLM Agent will forget past conversation)",
80
81
  default=False,
81
82
  allow_positional_parsing=False,
82
83
  always_prompt=False,
83
84
  ),
85
+ BoolInput(
86
+ "yolo",
87
+ description="YOLO mode (LLM Agent will start in YOLO Mode)",
88
+ prompt="YOLO mode (LLM Agent will start in YOLO Mode)",
89
+ default=lambda ctx: llm_config.default_yolo_mode,
90
+ allow_positional_parsing=False,
91
+ always_prompt=False,
92
+ ),
84
93
  TextInput("message", description="User message", prompt="Your message"),
85
94
  PreviousSessionInput(
86
95
  "previous-session",
@@ -154,8 +163,20 @@ if CFG.LLM_ALLOW_ACCESS_LOCAL_FILE:
154
163
  if CFG.LLM_ALLOW_ACCESS_SHELL:
155
164
  llm_ask.append_tool(run_shell_command)
156
165
 
157
- if CFG.LLM_ALLOW_ACCESS_INTERNET:
158
- llm_ask.append_tool(open_web_page, search_wikipedia, search_arxiv)
159
- if CFG.SERPAPI_KEY != "":
160
- llm_ask.append_tool(create_search_internet_tool(CFG.SERPAPI_KEY))
161
- llm_ask.append_tool(get_current_location, get_current_weather)
166
+ if CFG.LLM_ALLOW_OPEN_WEB_PAGE:
167
+ llm_ask.append_tool(open_web_page)
168
+
169
+ if CFG.LLM_ALLOW_SEARCH_WIKIPEDIA:
170
+ llm_ask.append_tool(search_wikipedia)
171
+
172
+ if CFG.LLM_ALLOW_SEARCH_ARXIV:
173
+ llm_ask.append_tool(search_arxiv)
174
+
175
+ if CFG.LLM_ALLOW_GET_CURRENT_LOCATION:
176
+ llm_ask.append_tool(get_current_location)
177
+
178
+ if CFG.LLM_ALLOW_GET_CURRENT_WEATHER:
179
+ llm_ask.append_tool(get_current_weather)
180
+
181
+ if CFG.SERPAPI_KEY != "" and CFG.LLM_ALLOW_SEARCH_INTERNET:
182
+ llm_ask.append_tool(create_search_internet_tool(CFG.SERPAPI_KEY))
@@ -2,7 +2,8 @@ import fnmatch
2
2
  import json
3
3
  import os
4
4
  import re
5
- from typing import Any, Dict, List, Optional, Tuple
5
+ import sys
6
+ from typing import Any, Optional
6
7
 
7
8
  from zrb.builtin.llm.tool.sub_agent import create_sub_agent_tool
8
9
  from zrb.config.config import CFG
@@ -10,7 +11,17 @@ from zrb.config.llm_rate_limitter import llm_rate_limitter
10
11
  from zrb.context.any_context import AnyContext
11
12
  from zrb.util.file import read_file, read_file_with_line_numbers, write_file
12
13
 
13
- _EXTRACT_INFO_FROM_FILE_SYSTEM_PROMPT = CFG.LLM_FILE_EXTRACTOR_SYSTEM_PROMPT
14
+ if sys.version_info >= (3, 12):
15
+ from typing import TypedDict
16
+ else:
17
+ from typing_extensions import TypedDict
18
+
19
+
20
+ class FileToWrite(TypedDict):
21
+ """Represents a file to be written, with a 'path' and 'content'."""
22
+
23
+ path: str
24
+ content: str
14
25
 
15
26
 
16
27
  DEFAULT_EXCLUDED_PATTERNS = [
@@ -481,14 +492,14 @@ async def analyze_file(
481
492
  return await _analyze_file(ctx, clipped_payload)
482
493
 
483
494
 
484
- def read_many_files(paths: List[str]) -> str:
495
+ def read_many_files(paths: list[str]) -> str:
485
496
  """
486
497
  Reads and returns the full content of multiple files at once.
487
498
 
488
499
  This tool is highly efficient for gathering context from several files simultaneously. Use it when you need to understand how different files in a project relate to each other, or when you need to inspect a set of related configuration or source code files.
489
500
 
490
501
  Args:
491
- paths (List[str]): A list of paths to the files you want to read. It is crucial to provide accurate paths. Use the `list_files` tool first if you are unsure about the exact file locations.
502
+ paths (list[str]): A list of paths to the files you want to read. It is crucial to provide accurate paths. Use the `list_files` tool first if you are unsure about the exact file locations.
492
503
 
493
504
  Returns:
494
505
  str: A JSON object where keys are the file paths and values are their corresponding contents, prefixed with line numbers. If a file cannot be read, its value will be an error message.
@@ -507,7 +518,7 @@ def read_many_files(paths: List[str]) -> str:
507
518
  return json.dumps({"results": results})
508
519
 
509
520
 
510
- def write_many_files(files: List[Tuple[str, str]]) -> str:
521
+ def write_many_files(files: list[FileToWrite]) -> str:
511
522
  """
512
523
  Writes content to multiple files in a single, atomic operation.
513
524
 
@@ -516,7 +527,7 @@ def write_many_files(files: List[Tuple[str, str]]) -> str:
516
527
  Each file's content is completely replaced. If a file does not exist, it will be created. If it exists, its current content will be entirely overwritten. Therefore, you must provide the full, intended content for each file.
517
528
 
518
529
  Args:
519
- files (List[Tuple[str, str]]): A list of tuples, where each tuple contains the file path and the complete content to be written to that file.
530
+ files: A list of file objects, where each object is a dictionary containing a 'path' and the complete 'content'.
520
531
 
521
532
  Returns:
522
533
  str: A JSON object summarizing the operation, listing successfully written files and any files that failed, along with corresponding error messages.
@@ -524,8 +535,13 @@ def write_many_files(files: List[Tuple[str, str]]) -> str:
524
535
  """
525
536
  success = []
526
537
  errors = {}
527
- for path, content in files:
538
+ # 4. Access the data using dictionary key-lookup syntax.
539
+ for file in files:
528
540
  try:
541
+ # Use file['path'] and file['content'] instead of file.path
542
+ path = file["path"]
543
+ content = file["content"]
544
+
529
545
  abs_path = os.path.abspath(os.path.expanduser(path))
530
546
  directory = os.path.dirname(abs_path)
531
547
  if directory and not os.path.exists(directory):
@@ -535,6 +551,3 @@ def write_many_files(files: List[Tuple[str, str]]) -> str:
535
551
  except Exception as e:
536
552
  errors[path] = f"Error writing file: {e}"
537
553
  return json.dumps({"success": success, "errors": errors})
538
-
539
-
540
- apply_diff = replace_in_file
@@ -27,6 +27,7 @@ def create_sub_agent_tool(
27
27
  model_settings: "ModelSettings | None" = None,
28
28
  tools: list[ToolOrCallable] = [],
29
29
  toolsets: list["AbstractToolset[Agent]"] = [],
30
+ is_yolo_mode: bool | None = None,
30
31
  ) -> Callable[[AnyContext, str], Coroutine[Any, Any, str]]:
31
32
  """
32
33
  Creates a "tool that is another AI agent," capable of handling complex, multi-step sub-tasks.
@@ -86,6 +87,7 @@ def create_sub_agent_tool(
86
87
  model_settings=resolved_model_settings,
87
88
  tools=tools,
88
89
  toolsets=toolsets,
90
+ is_yolo_mode=is_yolo_mode,
89
91
  )
90
92
 
91
93
  sub_agent_run = None
zrb/config/config.py CHANGED
@@ -350,8 +350,28 @@ class Config:
350
350
  return to_boolean(os.getenv("ZRB_LLM_ACCESS_SHELL", "1"))
351
351
 
352
352
  @property
353
- def LLM_ALLOW_ACCESS_INTERNET(self) -> bool:
354
- return to_boolean(os.getenv("ZRB_LLM_ACCESS_INTERNET", "1"))
353
+ def LLM_ALLOW_OPEN_WEB_PAGE(self) -> bool:
354
+ return to_boolean(os.getenv("ZRB_LLM_ALLOW_OPEN_WEB_PAGE", "1"))
355
+
356
+ @property
357
+ def LLM_ALLOW_SEARCH_INTERNET(self) -> bool:
358
+ return to_boolean(os.getenv("ZRB_LLM_ALLOW_SEARCH_INTERNET", "1"))
359
+
360
+ @property
361
+ def LLM_ALLOW_SEARCH_ARXIV(self) -> bool:
362
+ return to_boolean(os.getenv("ZRB_LLM_ALLOW_SEARCH_ARXIV", "1"))
363
+
364
+ @property
365
+ def LLM_ALLOW_SEARCH_WIKIPEDIA(self) -> bool:
366
+ return to_boolean(os.getenv("ZRB_LLM_ALLOW_SEARCH_WIKIPEDIA", "1"))
367
+
368
+ @property
369
+ def LLM_ALLOW_GET_CURRENT_LOCATION(self) -> bool:
370
+ return to_boolean(os.getenv("ZRB_LLM_ALLOW_GET_CURRENT_LOCATION", "1"))
371
+
372
+ @property
373
+ def LLM_ALLOW_GET_CURRENT_WEATHER(self) -> bool:
374
+ return to_boolean(os.getenv("ZRB_LLM_ALLOW_GET_CURRENT_WEATHER", "1"))
355
375
 
356
376
  @property
357
377
  def RAG_EMBEDDING_API_KEY(self) -> str:
zrb/config/llm_config.py CHANGED
@@ -2,8 +2,6 @@ import os
2
2
  from typing import TYPE_CHECKING, Any, Callable
3
3
 
4
4
  from zrb.config.config import CFG
5
- from zrb.config.llm_context.config import llm_context_config
6
- from zrb.util.llm.prompt import make_prompt_section
7
5
 
8
6
  if TYPE_CHECKING:
9
7
  from pydantic_ai.models import Model
@@ -28,6 +26,7 @@ class LLMConfig:
28
26
  default_model: "Model | None" = None,
29
27
  default_model_settings: "ModelSettings | None" = None,
30
28
  default_model_provider: "Provider | None" = None,
29
+ default_yolo_mode: bool | None = None,
31
30
  ):
32
31
  self.__internal_default_prompt: dict[str, str] = {}
33
32
  self._default_model_name = default_model_name
@@ -46,6 +45,7 @@ class LLMConfig:
46
45
  self._default_model = default_model
47
46
  self._default_model_settings = default_model_settings
48
47
  self._default_model_provider = default_model_provider
48
+ self._default_yolo_mode = default_yolo_mode
49
49
 
50
50
  def _get_internal_default_prompt(self, name: str) -> str:
51
51
  if name not in self.__internal_default_prompt:
@@ -174,6 +174,12 @@ class LLMConfig:
174
174
  lambda: 1000,
175
175
  )
176
176
 
177
+ @property
178
+ def default_yolo_mode(self) -> bool:
179
+ return self._get_property(
180
+ self._default_yolo_mode, CFG.LLM_YOLO_MODE, lambda: False
181
+ )
182
+
177
183
  def set_default_persona(self, persona: str):
178
184
  self._default_persona = persona
179
185
 
@@ -230,5 +236,8 @@ class LLMConfig:
230
236
  def set_default_model_settings(self, model_settings: "ModelSettings"):
231
237
  self._default_model_settings = model_settings
232
238
 
239
+ def set_default_yolo_mode(self, yolo_mode: bool):
240
+ self._default_yolo_mode = yolo_mode
241
+
233
242
 
234
243
  llm_config = LLMConfig()
@@ -27,6 +27,7 @@ class SharedContext(AnySharedContext):
27
27
  env: dict[str, str] = {},
28
28
  xcom: dict[str, Xcom] = {},
29
29
  logging_level: int | None = None,
30
+ is_web_mode: bool = False,
30
31
  ):
31
32
  self.__logging_level = logging_level
32
33
  self._input = DotDict(input)
@@ -35,6 +36,7 @@ class SharedContext(AnySharedContext):
35
36
  self._xcom = DotDict(xcom)
36
37
  self._session: AnySession | None = None
37
38
  self._log = []
39
+ self._is_web_mode = is_web_mode
38
40
 
39
41
  def __repr__(self):
40
42
  class_name = self.__class__.__name__
@@ -46,7 +48,7 @@ class SharedContext(AnySharedContext):
46
48
 
47
49
  @property
48
50
  def is_web_mode(self) -> bool:
49
- return self.env.get("_ZRB_IS_WEB_MODE", "0") == "1"
51
+ return self._is_web_mode
50
52
 
51
53
  @property
52
54
  def is_tty(self) -> bool:
@@ -1,5 +1,4 @@
1
1
  import asyncio
2
- import os
3
2
  from datetime import datetime, timedelta
4
3
  from typing import TYPE_CHECKING, Any
5
4
 
@@ -57,9 +56,7 @@ def serve_task_session_api(
57
56
  return JSONResponse(content={"detail": "Forbidden"}, status_code=403)
58
57
  session_name = residual_args[0] if residual_args else None
59
58
  if not session_name:
60
- shared_ctx = SharedContext(
61
- env={**dict(os.environ), "_ZRB_IS_WEB_MODE": "1"}
62
- )
59
+ shared_ctx = SharedContext(is_web_mode=True)
63
60
  session = Session(shared_ctx=shared_ctx, root_group=root_group)
64
61
  coro = asyncio.create_task(task.async_run(session, str_kwargs=inputs))
65
62
  coroutines.append(coro)
zrb/task/llm/agent.py CHANGED
@@ -30,11 +30,14 @@ def create_agent_instance(
30
30
  tools: list[ToolOrCallable] = [],
31
31
  toolsets: list["AbstractToolset[Agent]"] = [],
32
32
  retries: int = 3,
33
+ is_yolo_mode: bool | None = None,
33
34
  ) -> "Agent":
34
35
  """Creates a new Agent instance with configured tools and servers."""
35
36
  from pydantic_ai import Agent, Tool
36
37
  from pydantic_ai.tools import GenerateToolJsonSchema
37
38
 
39
+ if is_yolo_mode is None:
40
+ is_yolo_mode = False
38
41
  # Normalize tools
39
42
  tool_list = []
40
43
  for tool_or_callable in tools:
@@ -44,7 +47,7 @@ def create_agent_instance(
44
47
  tool = tool_or_callable
45
48
  tool_list.append(
46
49
  Tool(
47
- function=wrap_func(tool.function),
50
+ function=wrap_func(tool.function, ctx, is_yolo_mode),
48
51
  takes_ctx=tool.takes_ctx,
49
52
  max_retries=tool.max_retries,
50
53
  name=tool.name,
@@ -58,7 +61,7 @@ def create_agent_instance(
58
61
  )
59
62
  else:
60
63
  # Turn function into tool
61
- tool_list.append(wrap_tool(tool_or_callable, ctx))
64
+ tool_list.append(wrap_tool(tool_or_callable, ctx, is_yolo_mode))
62
65
  # Return Agent
63
66
  return Agent(
64
67
  model=model,
@@ -83,6 +86,7 @@ def get_agent(
83
86
  toolsets_attr: "list[AbstractToolset[Agent]] | Callable[[AnySharedContext], list[AbstractToolset[Agent]]]", # noqa
84
87
  additional_toolsets: "list[AbstractToolset[Agent]]",
85
88
  retries: int = 3,
89
+ is_yolo_mode: bool | None = None,
86
90
  ) -> "Agent":
87
91
  """Retrieves the configured Agent instance or creates one if necessary."""
88
92
  from pydantic_ai import Agent
@@ -114,6 +118,7 @@ def get_agent(
114
118
  toolsets=tool_sets,
115
119
  model_settings=model_settings,
116
120
  retries=retries,
121
+ is_yolo_mode=is_yolo_mode,
117
122
  )
118
123
 
119
124
 
zrb/task/llm/config.py CHANGED
@@ -1,16 +1,27 @@
1
- from typing import TYPE_CHECKING, Any, Callable
1
+ from typing import TYPE_CHECKING, Callable
2
2
 
3
3
  if TYPE_CHECKING:
4
4
  from pydantic_ai.models import Model
5
5
  from pydantic_ai.settings import ModelSettings
6
6
 
7
- from zrb.attr.type import StrAttr, fstring
7
+ from zrb.attr.type import BoolAttr, StrAttr, fstring
8
8
  from zrb.config.llm_config import LLMConfig, llm_config
9
9
  from zrb.context.any_context import AnyContext
10
10
  from zrb.context.any_shared_context import AnySharedContext
11
11
  from zrb.util.attr import get_attr
12
12
 
13
13
 
14
+ def get_is_yolo_mode(
15
+ ctx: AnyContext,
16
+ is_yolo_mode_attr: BoolAttr | None = None,
17
+ render_yolo_mode: bool = True,
18
+ ):
19
+ yolo_mode = get_attr(ctx, is_yolo_mode_attr, None, auto_render=render_yolo_mode)
20
+ if yolo_mode is None:
21
+ return llm_config.default_yolo_mode
22
+ return yolo_mode
23
+
24
+
14
25
  def get_model_settings(
15
26
  ctx: AnyContext,
16
27
  model_settings_attr: (
@@ -5,10 +5,15 @@ import typing
5
5
  from collections.abc import Callable
6
6
  from typing import TYPE_CHECKING
7
7
 
8
- from zrb.config.config import CFG
9
8
  from zrb.context.any_context import AnyContext
10
9
  from zrb.task.llm.error import ToolExecutionError
11
10
  from zrb.util.callable import get_callable_name
11
+ from zrb.util.cli.style import (
12
+ stylize_blue,
13
+ stylize_error,
14
+ stylize_green,
15
+ stylize_yellow,
16
+ )
12
17
  from zrb.util.run import run_async
13
18
  from zrb.util.string.conversion import to_boolean
14
19
 
@@ -16,22 +21,24 @@ if TYPE_CHECKING:
16
21
  from pydantic_ai import Tool
17
22
 
18
23
 
19
- def wrap_tool(func: Callable, ctx: AnyContext) -> "Tool":
24
+ def wrap_tool(func: Callable, ctx: AnyContext, is_yolo_mode: bool) -> "Tool":
20
25
  """Wraps a tool function to handle exceptions and context propagation."""
21
26
  from pydantic_ai import RunContext, Tool
22
27
 
23
28
  original_sig = inspect.signature(func)
24
29
  needs_run_context_for_pydantic = _has_context_parameter(original_sig, RunContext)
25
- wrapper = wrap_func(func, ctx)
30
+ wrapper = wrap_func(func, ctx, is_yolo_mode)
26
31
  return Tool(wrapper, takes_ctx=needs_run_context_for_pydantic)
27
32
 
28
33
 
29
- def wrap_func(func: Callable, ctx: AnyContext) -> Callable:
34
+ def wrap_func(func: Callable, ctx: AnyContext, is_yolo_mode: bool) -> Callable:
30
35
  original_sig = inspect.signature(func)
31
36
  needs_any_context_for_injection = _has_context_parameter(original_sig, AnyContext)
32
37
  takes_no_args = len(original_sig.parameters) == 0
33
38
  # Pass individual flags to the wrapper creator
34
- wrapper = _create_wrapper(func, original_sig, ctx, needs_any_context_for_injection)
39
+ wrapper = _create_wrapper(
40
+ func, original_sig, ctx, needs_any_context_for_injection, is_yolo_mode
41
+ )
35
42
  _adjust_signature(wrapper, original_sig, takes_no_args)
36
43
  return wrapper
37
44
 
@@ -68,8 +75,9 @@ def _is_annotated_with_context(param_annotation, context_type):
68
75
  def _create_wrapper(
69
76
  func: Callable,
70
77
  original_sig: inspect.Signature,
71
- ctx: AnyContext, # Accept ctx
78
+ ctx: AnyContext,
72
79
  needs_any_context_for_injection: bool,
80
+ is_yolo_mode: bool,
73
81
  ) -> Callable:
74
82
  """Creates the core wrapper function."""
75
83
 
@@ -97,19 +105,10 @@ def _create_wrapper(
97
105
  if "_dummy" in kwargs and "_dummy" not in original_sig.parameters:
98
106
  del kwargs["_dummy"]
99
107
  try:
100
- if not CFG.LLM_YOLO_MODE and not ctx.is_web_mode and ctx.is_tty:
101
- func_name = get_callable_name(func)
102
- ctx.print(f"✅ >> Allow to run tool: {func_name} (Y/n)", plain=True)
103
- user_confirmation_str = await _read_line()
104
- try:
105
- user_confirmation = to_boolean(user_confirmation_str)
106
- except Exception:
107
- user_confirmation = False
108
- if not user_confirmation:
109
- ctx.print(f"❌ >> Rejecting {func_name} call. Why?", plain=True)
110
- reason = await _read_line()
111
- ctx.print("", plain=True)
112
- raise ValueError(f"User disapproval: {reason}")
108
+ if not is_yolo_mode and not ctx.is_web_mode and ctx.is_tty:
109
+ approval, reason = await _ask_for_approval(ctx, func, *args, **kwargs)
110
+ if not approval:
111
+ raise ValueError(f"User disapproving: {reason}")
113
112
  return await run_async(func(*args, **kwargs))
114
113
  except Exception as e:
115
114
  error_model = ToolExecutionError(
@@ -123,6 +122,55 @@ def _create_wrapper(
123
122
  return wrapper
124
123
 
125
124
 
125
+ async def _ask_for_approval(
126
+ ctx: AnyContext, func: Callable, *args, **kwargs
127
+ ) -> tuple[bool, str]:
128
+ func_name = get_callable_name(func)
129
+ normalized_args = [stylize_green(_truncate_arg(arg)) for arg in args]
130
+ normalized_kwargs = [
131
+ f"{stylize_yellow(key)}={stylize_green(_truncate_arg(val))}"
132
+ for key, val in kwargs.items()
133
+ ]
134
+ func_param_str = ",".join(normalized_args + normalized_kwargs)
135
+ func_call_str = (
136
+ f"{stylize_blue(func_name + '(')}{func_param_str}{stylize_blue(')')}"
137
+ )
138
+ while True:
139
+ ctx.print(
140
+ f"✅ >> Allow to run tool: {func_call_str} (Yes | No, <reason>)", plain=True
141
+ )
142
+ user_input = await _read_line()
143
+ user_responses = [val.strip() for val in user_input.split(",", maxsplit=2)]
144
+ while len(user_responses) < 2:
145
+ user_responses.append("")
146
+ approval_str, reason = user_responses
147
+ try:
148
+ approved = to_boolean(approval_str)
149
+ if not approved and reason == "":
150
+ ctx.print(
151
+ stylize_error(
152
+ f"You must specify rejection reason (i.e., No, <why>) for {func_call_str}"
153
+ ), # noqa
154
+ plain=True,
155
+ )
156
+ continue
157
+ return approved, reason
158
+ except Exception:
159
+ ctx.print(
160
+ stylize_error(
161
+ f"Invalid approval value for {func_call_str}: {approval_str}"
162
+ ),
163
+ plain=True,
164
+ )
165
+ continue
166
+
167
+
168
+ def _truncate_arg(arg: str, length: int = 19) -> str:
169
+ if len(arg) > length:
170
+ return f"{arg[:length-4]} ..."
171
+ return arg
172
+
173
+
126
174
  async def _read_line():
127
175
  from prompt_toolkit import PromptSession
128
176
 
zrb/task/llm_task.py CHANGED
@@ -12,6 +12,7 @@ from zrb.task.any_task import AnyTask
12
12
  from zrb.task.base_task import BaseTask
13
13
  from zrb.task.llm.agent import get_agent, run_agent_iteration
14
14
  from zrb.task.llm.config import (
15
+ get_is_yolo_mode,
15
16
  get_model,
16
17
  get_model_settings,
17
18
  )
@@ -104,6 +105,8 @@ class LLMTask(BaseTask):
104
105
  execute_condition: bool | str | Callable[[AnySharedContext], bool] = True,
105
106
  retries: int = 2,
106
107
  retry_period: float = 0,
108
+ is_yolo_mode: bool | None = None,
109
+ render_yolo_mode: bool = True,
107
110
  readiness_check: list[AnyTask] | AnyTask | None = None,
108
111
  readiness_check_delay: float = 0.5,
109
112
  readiness_check_period: float = 5,
@@ -179,6 +182,8 @@ class LLMTask(BaseTask):
179
182
  )
180
183
  self._max_call_iteration = max_call_iteration
181
184
  self._conversation_context = conversation_context
185
+ self._is_yolo_mode = is_yolo_mode
186
+ self._render_yolo_mode = render_yolo_mode
182
187
 
183
188
  def add_tool(self, *tool: ToolOrCallable):
184
189
  self.append_tool(*tool)
@@ -214,6 +219,11 @@ class LLMTask(BaseTask):
214
219
  model_api_key_attr=self._model_api_key,
215
220
  render_model_api_key=self._render_model_api_key,
216
221
  )
222
+ is_yolo_mode = get_is_yolo_mode(
223
+ ctx=ctx,
224
+ is_yolo_mode_attr=self._is_yolo_mode,
225
+ render_yolo_mode=self._render_yolo_mode,
226
+ )
217
227
  summarization_prompt = get_summarization_system_prompt(
218
228
  ctx=ctx,
219
229
  summarization_prompt_attr=self._summarization_prompt,
@@ -254,6 +264,7 @@ class LLMTask(BaseTask):
254
264
  additional_tools=self._additional_tools,
255
265
  toolsets_attr=self._toolsets,
256
266
  additional_toolsets=self._additional_toolsets,
267
+ is_yolo_mode=is_yolo_mode,
257
268
  )
258
269
  # 4. Run the agent iteration and save the results/history
259
270
  result = await self._execute_agent(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: zrb
3
- Version: 1.13.3
3
+ Version: 1.14.1
4
4
  Summary: Your Automation Powerhouse
5
5
  Home-page: https://github.com/state-alchemists/zrb
6
6
  License: AGPL-3.0-or-later
@@ -9,18 +9,18 @@ zrb/builtin/git_subtree.py,sha256=7BKwOkVTWDrR0DXXQ4iJyHqeR6sV5VYRt8y_rEB0EHg,35
9
9
  zrb/builtin/group.py,sha256=t008xLM4_fgbjfZrPoi_fQAnSHIo6MOiQSCHBO4GDYU,2379
10
10
  zrb/builtin/http.py,sha256=sLqEczuSxGYXWzyJR6frGOHkPTviu4BeyroUr3-ZuAI,4322
11
11
  zrb/builtin/jwt.py,sha256=3M5uaQhJZbKQLjTUft1OwPz_JxtmK-xtkjxWjciOQho,2859
12
- zrb/builtin/llm/chat_session.py,sha256=u8bW67uKCq22hVv4ZkOsKIZxBeOdKtJh4Bjyy552RM4,9424
12
+ zrb/builtin/llm/chat_session.py,sha256=syMOQzAGcRmxs7ctrBkfvAZ7-IhxnywuxuyeoBMteEs,10464
13
13
  zrb/builtin/llm/history.py,sha256=LDOrL0p7r_AHLa5L8Dp7bHNsOALugmJd7OguXRWGnm4,3087
14
14
  zrb/builtin/llm/input.py,sha256=Nw-26uTWp2QhUgKJcP_IMHmtk-b542CCSQ_vCOjhvhM,877
15
- zrb/builtin/llm/llm_ask.py,sha256=18XAxyPWF7daE0TZkRkRt8opmqLUjhpM3oMVdOP-qWY,4857
15
+ zrb/builtin/llm/llm_ask.py,sha256=ZwuECO1BQaPEzIYvlA0ptQiHVBCKA7_a8DPHj2aXfLM,5460
16
16
  zrb/builtin/llm/previous-session.js,sha256=xMKZvJoAbrwiyHS0OoPrWuaKxWYLoyR5sguePIoCjTY,816
17
17
  zrb/builtin/llm/tool/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
18
18
  zrb/builtin/llm/tool/api.py,sha256=OhmfLc2TwWKQYIMweGelqb5s4JF4nB-YynbSO4yb_Jk,2342
19
19
  zrb/builtin/llm/tool/cli.py,sha256=dUWZrW2X5J_lONuzR__6-SbewSdi28E3RRuksjd4mWo,1234
20
20
  zrb/builtin/llm/tool/code.py,sha256=GRP_IZAkeL6RIlUm407BQRF992ES57pdzPaQdC5UsJU,8218
21
- zrb/builtin/llm/tool/file.py,sha256=OLg8RfWZOypTxTtK7YzyoZp6O2ITszGTaqos-7c0V-c,22288
21
+ zrb/builtin/llm/tool/file.py,sha256=hKqT-r4S9FTMv3CmNWuIL5y6q13hp9Ggo0vwUGg3HxI,22585
22
22
  zrb/builtin/llm/tool/rag.py,sha256=wB74JV7bxs0ec77b_09Z2lPjoR1WzPUvZbuXOdb9Q9g,9675
23
- zrb/builtin/llm/tool/sub_agent.py,sha256=9Su64FpNTVeE6O2qgNzo-eo4pcmv8qi_sd_QWLQBXYw,4870
23
+ zrb/builtin/llm/tool/sub_agent.py,sha256=yYqTIQWfG5mLL1nItc4jox-QK8vPOb_ZGvQ6sH0BcKw,4947
24
24
  zrb/builtin/llm/tool/web.py,sha256=gQlUsmYCJOFJtNjwpjK-xk13LMvrMSpSaFHXUTnIayQ,7090
25
25
  zrb/builtin/md5.py,sha256=690RV2LbW7wQeTFxY-lmmqTSVEEZv3XZbjEUW1Q3XpE,1480
26
26
  zrb/builtin/project/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -217,7 +217,7 @@ zrb/callback/callback.py,sha256=PFhCqzfxdk6IAthmXcZ13DokT62xtBzJr_ciLw6I8Zg,4030
217
217
  zrb/cmd/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
218
218
  zrb/cmd/cmd_result.py,sha256=L8bQJzWCpcYexIxHBNsXj2pT3BtLmWex0iJSMkvimOA,597
219
219
  zrb/cmd/cmd_val.py,sha256=7Doowyg6BK3ISSGBLt-PmlhzaEkBjWWm51cED6fAUOQ,1014
220
- zrb/config/config.py,sha256=d_F-hdPLADjeVRHtnpOxtOkfUBu5huSLclyD53uxO4U,12306
220
+ zrb/config/config.py,sha256=9yl4LtPpROvjYMqdDy3hXjlD-LA8zQXFrcsO5XWXmSw,13017
221
221
  zrb/config/default_prompt/file_extractor_system_prompt.md,sha256=tmeZMPzF9MGExsZZw7M2PZN6V0oFVRp1nIjiqUPvQ9M,1013
222
222
  zrb/config/default_prompt/interactive_system_prompt.md,sha256=ZFPeDEV2vlcksHiVG2o-TCehmqkFolDjtH0_Fzo1gGI,3566
223
223
  zrb/config/default_prompt/persona.md,sha256=WU4JKp-p7qJePDA6NZ_CYdBggo2B3PEq8IEnNVblIHU,41
@@ -225,7 +225,7 @@ zrb/config/default_prompt/repo_extractor_system_prompt.md,sha256=EGZ-zj78RlMEg2j
225
225
  zrb/config/default_prompt/repo_summarizer_system_prompt.md,sha256=fpG5B416OK3oE41bWPrh1M6pdH5SSadCPte_NJ_79z0,858
226
226
  zrb/config/default_prompt/summarization_prompt.md,sha256=hRXH5E78TugSze_Hgp-KTbIhCeyrMcJg-pSXvXH3C9E,1629
227
227
  zrb/config/default_prompt/system_prompt.md,sha256=Jkne5n9HJcBCgfeENwxvqH-kbDO2CaiUzqR4VoWMRHY,3054
228
- zrb/config/llm_config.py,sha256=bNLxorctwtVW1F9hA-hEYpDBe7FLSZHC25Nx8NlR4-M,8597
228
+ zrb/config/llm_config.py,sha256=WfbxP5wXolP-BzGUqidnbacMd55uuSdU2yO7EtL5F_8,8853
229
229
  zrb/config/llm_context/config.py,sha256=zeqSVOKK5yyApvqTbcO3ayGxtyoag22qlWWaXp1nINs,4950
230
230
  zrb/config/llm_context/config_parser.py,sha256=h95FbOjvVobhrsfGtG_BY3hxS-OLzQj-9F5vGZuehkY,1473
231
231
  zrb/config/llm_rate_limitter.py,sha256=P4vR7qxwiGwjlKx2kHcfdIxwGbJB98vdN-UQEH-Q2WU,4894
@@ -237,7 +237,7 @@ zrb/context/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
237
237
  zrb/context/any_context.py,sha256=2hgVKbbDwmwrEl1h1L1FaTUjuUYaDd_b7YRGkaorW6Q,6362
238
238
  zrb/context/any_shared_context.py,sha256=wJawL1jGgApcKPRcpw3js7W4-MhJRA3GMbR5zTsJmt0,1929
239
239
  zrb/context/context.py,sha256=ErGhXJgjgNaAqi6iPMejWxFZ3YvWnysC6mHEU-wodKk,6884
240
- zrb/context/shared_context.py,sha256=Jaa7AYCeCksOiEAwOnY3xD6Y2Yy2wJAkpehAkbKQ-Wc,3076
240
+ zrb/context/shared_context.py,sha256=c87VujBGKbkEC6R8Bby0-Gp0h2UvX8ZsH_3S0ElEKSs,3124
241
241
  zrb/dot_dict/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
242
242
  zrb/dot_dict/dot_dict.py,sha256=ubw_x8I7AOJ59xxtFVJ00VGmq_IYdZP3mUhNlO4nEK0,556
243
243
  zrb/env/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -316,7 +316,7 @@ zrb/runner/web_route/static/resources/session/event.js,sha256=X5OlSHefK0SDB9VkFC
316
316
  zrb/runner/web_route/static/resources/session/past-session.js,sha256=RwGJYKSp75K8NZ-iZP58XppWgdzkiKFaiC5wgcMLxDo,5470
317
317
  zrb/runner/web_route/static/static_route.py,sha256=QPs5XW4O_8CuzG0Wy4sHh5wRcLbU63CLDI4YNqkUxHA,1555
318
318
  zrb/runner/web_route/task_input_api_route.py,sha256=6JIehRjXPhzclq9qGMYkztaKB0TzWsBBbim0m47-YmA,1767
319
- zrb/runner/web_route/task_session_api_route.py,sha256=N4kg7uNfxiiuF-YEpk6khuorkyv_H5aDm_l3pwxNozo,6262
319
+ zrb/runner/web_route/task_session_api_route.py,sha256=U9fPOh_nmAzsmRnS5xe713KFUU15n0IkbbzU_Eg8YGI,6181
320
320
  zrb/runner/web_schema/session.py,sha256=NwbuS2Sv-CXO52nU-EZv8OMlD4vgCQWNeLC_dT0FK7I,92
321
321
  zrb/runner/web_schema/token.py,sha256=Y7XCPS4WzrxslTDtHeLcPTTUpmWhPOkRcl4b99zrC7c,185
322
322
  zrb/runner/web_schema/user.py,sha256=Kp10amg4i-f8Y-4czogv1YN7rwy0HdbePFiuovYu1ts,1018
@@ -346,8 +346,8 @@ zrb/task/base_trigger.py,sha256=WSGcmBcGAZw8EzUXfmCjqJQkz8GEmi1RzogpF6A1V4s,6902
346
346
  zrb/task/cmd_task.py,sha256=myM8WZm6NrUD-Wv0Vb5sTOrutrAVZLt5LVsSBKwX6SM,10860
347
347
  zrb/task/http_check.py,sha256=Gf5rOB2Se2EdizuN9rp65HpGmfZkGc-clIAlHmPVehs,2565
348
348
  zrb/task/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
349
- zrb/task/llm/agent.py,sha256=aZvtcL5HmZQvD3c79R9sDIOMawO0rUMcRiq2wZ1FNas,7457
350
- zrb/task/llm/config.py,sha256=TlyH925_fboIlK2Ixf34tynmenqs9s9rfsnPs4jff78,3490
349
+ zrb/task/llm/agent.py,sha256=bdrsmozPxSrUQXhv4FUs_BhIeSppQcVn2dOm40rySW4,7659
350
+ zrb/task/llm/config.py,sha256=GDL9RzH7LSzIaQ_YIrxG-4SqsUadI8i224oSu0pvcLo,3801
351
351
  zrb/task/llm/conversation_history.py,sha256=B_PDWYL_q66s0xwWBzMSomqPN6u3gkXlIeXBD5A0Apg,4416
352
352
  zrb/task/llm/conversation_history_model.py,sha256=DJ0KDBB0BriQuE5ugC_q0aSHhjNIBcfjUk1f0S_3I9U,9245
353
353
  zrb/task/llm/default_workflow/coding.md,sha256=2uythvPsnBpYfIhiIH1cCinQXX0i0yUqsL474Zpemw0,2484
@@ -357,9 +357,9 @@ zrb/task/llm/error.py,sha256=QR-nIohS6pBpC_16cWR-fw7Mevo1sNYAiXMBsh_CJDE,4157
357
357
  zrb/task/llm/history_summarization.py,sha256=_0RmzIeJdJA3KvtdTdKnd2Ga7_7x8C1J2PM0oSn-IYw,8000
358
358
  zrb/task/llm/print_node.py,sha256=mwdqsO2IVf5rDz-jdH9HXz6MFGCWrZ4Pv2xbUBtoNgc,4179
359
359
  zrb/task/llm/prompt.py,sha256=pHYuo4cLzuH9VYk2-PAyxng_l1ItXVZl0llZDQcbnWA,9748
360
- zrb/task/llm/tool_wrapper.py,sha256=jfKMAtTzm--HnF6TppOrbkDVsuTOIFRpowQqgwqd-7s,6756
360
+ zrb/task/llm/tool_wrapper.py,sha256=-BfyZzPPPR8mSMBCHqltds6FTDprutUonipdzzlyCbk,8153
361
361
  zrb/task/llm/typing.py,sha256=c8VAuPBw_4A3DxfYdydkgedaP-LU61W9_wj3m3CAX1E,58
362
- zrb/task/llm_task.py,sha256=jVuVeN2ylcPkycUiaTpavkkd1tBLbvcVsjMNN5FMHnk,13536
362
+ zrb/task/llm_task.py,sha256=88yc0SbU2ocWK6VBGoqBy-Lo49ebBTI_KKjsLxDp3sM,13945
363
363
  zrb/task/make_task.py,sha256=PD3b_aYazthS8LHeJsLAhwKDEgdurQZpymJDKeN60u0,2265
364
364
  zrb/task/rsync_task.py,sha256=WfqNSaicJgYWpunNU34eYxXDqHDHOftuDHyWJKjqwg0,6365
365
365
  zrb/task/scaffolder.py,sha256=rME18w1HJUHXgi9eTYXx_T2G4JdqDYzBoNOkdOOo5-o,6806
@@ -406,7 +406,7 @@ zrb/util/todo.py,sha256=r9_KYF2-hLKMNjsp6AFK9zivykMrywd-kJ4bCwfdafI,19323
406
406
  zrb/util/todo_model.py,sha256=hhzAX-uFl5rsg7iVX1ULlJOfBtblwQ_ieNUxBWfc-Os,1670
407
407
  zrb/xcom/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
408
408
  zrb/xcom/xcom.py,sha256=o79rxR9wphnShrcIushA0Qt71d_p3ZTxjNf7x9hJB78,1571
409
- zrb-1.13.3.dist-info/METADATA,sha256=WhAGmkemQMrm0fxn88gq6bSWUv4KLgruOZTrC_cSmnM,9777
410
- zrb-1.13.3.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
411
- zrb-1.13.3.dist-info/entry_points.txt,sha256=-Pg3ElWPfnaSM-XvXqCxEAa-wfVI6BEgcs386s8C8v8,46
412
- zrb-1.13.3.dist-info/RECORD,,
409
+ zrb-1.14.1.dist-info/METADATA,sha256=7QS2abopkUJEBHHFUG3kwHNQy0L6cr44wHyzaTJcK14,9777
410
+ zrb-1.14.1.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
411
+ zrb-1.14.1.dist-info/entry_points.txt,sha256=-Pg3ElWPfnaSM-XvXqCxEAa-wfVI6BEgcs386s8C8v8,46
412
+ zrb-1.14.1.dist-info/RECORD,,
File without changes