zrb 1.15.3__py3-none-any.whl → 2.0.0a4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of zrb might be problematic. Click here for more details.
- zrb/__init__.py +118 -133
- zrb/attr/type.py +10 -7
- zrb/builtin/__init__.py +55 -1
- zrb/builtin/git.py +12 -1
- zrb/builtin/group.py +31 -15
- zrb/builtin/llm/chat.py +147 -0
- zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/_zrb/entity/add_entity_util.py +7 -7
- zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/_zrb/module/add_module_util.py +5 -5
- zrb/builtin/project/add/fastapp/fastapp_util.py +1 -1
- zrb/builtin/searxng/config/settings.yml +5671 -0
- zrb/builtin/searxng/start.py +21 -0
- zrb/builtin/shell/autocomplete/bash.py +4 -3
- zrb/builtin/shell/autocomplete/zsh.py +4 -3
- zrb/callback/callback.py +8 -1
- zrb/cmd/cmd_result.py +2 -1
- zrb/config/config.py +555 -169
- zrb/config/helper.py +84 -0
- zrb/config/web_auth_config.py +50 -35
- zrb/context/any_shared_context.py +20 -3
- zrb/context/context.py +39 -5
- zrb/context/print_fn.py +13 -0
- zrb/context/shared_context.py +17 -8
- zrb/group/any_group.py +3 -3
- zrb/group/group.py +3 -3
- zrb/input/any_input.py +5 -1
- zrb/input/base_input.py +18 -6
- zrb/input/option_input.py +41 -1
- zrb/input/text_input.py +7 -24
- zrb/llm/agent/__init__.py +9 -0
- zrb/llm/agent/agent.py +215 -0
- zrb/llm/agent/summarizer.py +20 -0
- zrb/llm/app/__init__.py +10 -0
- zrb/llm/app/completion.py +281 -0
- zrb/llm/app/confirmation/allow_tool.py +66 -0
- zrb/llm/app/confirmation/handler.py +178 -0
- zrb/llm/app/confirmation/replace_confirmation.py +77 -0
- zrb/llm/app/keybinding.py +34 -0
- zrb/llm/app/layout.py +117 -0
- zrb/llm/app/lexer.py +155 -0
- zrb/llm/app/redirection.py +28 -0
- zrb/llm/app/style.py +16 -0
- zrb/llm/app/ui.py +733 -0
- zrb/llm/config/__init__.py +4 -0
- zrb/llm/config/config.py +122 -0
- zrb/llm/config/limiter.py +247 -0
- zrb/llm/history_manager/__init__.py +4 -0
- zrb/llm/history_manager/any_history_manager.py +23 -0
- zrb/llm/history_manager/file_history_manager.py +91 -0
- zrb/llm/history_processor/summarizer.py +108 -0
- zrb/llm/note/__init__.py +3 -0
- zrb/llm/note/manager.py +122 -0
- zrb/llm/prompt/__init__.py +29 -0
- zrb/llm/prompt/claude_compatibility.py +92 -0
- zrb/llm/prompt/compose.py +55 -0
- zrb/llm/prompt/default.py +51 -0
- zrb/llm/prompt/markdown/file_extractor.md +112 -0
- zrb/llm/prompt/markdown/mandate.md +23 -0
- zrb/llm/prompt/markdown/persona.md +3 -0
- zrb/llm/prompt/markdown/repo_extractor.md +112 -0
- zrb/llm/prompt/markdown/repo_summarizer.md +29 -0
- zrb/llm/prompt/markdown/summarizer.md +21 -0
- zrb/llm/prompt/note.py +41 -0
- zrb/llm/prompt/system_context.py +46 -0
- zrb/llm/prompt/zrb.py +41 -0
- zrb/llm/skill/__init__.py +3 -0
- zrb/llm/skill/manager.py +86 -0
- zrb/llm/task/__init__.py +4 -0
- zrb/llm/task/llm_chat_task.py +316 -0
- zrb/llm/task/llm_task.py +245 -0
- zrb/llm/tool/__init__.py +39 -0
- zrb/llm/tool/bash.py +75 -0
- zrb/llm/tool/code.py +266 -0
- zrb/llm/tool/file.py +419 -0
- zrb/llm/tool/note.py +70 -0
- zrb/{builtin/llm → llm}/tool/rag.py +33 -37
- zrb/llm/tool/search/brave.py +53 -0
- zrb/llm/tool/search/searxng.py +47 -0
- zrb/llm/tool/search/serpapi.py +47 -0
- zrb/llm/tool/skill.py +19 -0
- zrb/llm/tool/sub_agent.py +70 -0
- zrb/llm/tool/web.py +97 -0
- zrb/llm/tool/zrb_task.py +66 -0
- zrb/llm/util/attachment.py +101 -0
- zrb/llm/util/prompt.py +104 -0
- zrb/llm/util/stream_response.py +178 -0
- zrb/runner/cli.py +21 -20
- zrb/runner/common_util.py +24 -19
- zrb/runner/web_route/task_input_api_route.py +5 -5
- zrb/runner/web_util/user.py +7 -3
- zrb/session/any_session.py +12 -9
- zrb/session/session.py +38 -17
- zrb/task/any_task.py +24 -3
- zrb/task/base/context.py +42 -22
- zrb/task/base/execution.py +67 -55
- zrb/task/base/lifecycle.py +14 -7
- zrb/task/base/monitoring.py +12 -7
- zrb/task/base_task.py +113 -50
- zrb/task/base_trigger.py +16 -6
- zrb/task/cmd_task.py +6 -0
- zrb/task/http_check.py +11 -5
- zrb/task/make_task.py +5 -3
- zrb/task/rsync_task.py +30 -10
- zrb/task/scaffolder.py +7 -4
- zrb/task/scheduler.py +7 -4
- zrb/task/tcp_check.py +6 -4
- zrb/util/ascii_art/art/bee.txt +17 -0
- zrb/util/ascii_art/art/cat.txt +9 -0
- zrb/util/ascii_art/art/ghost.txt +16 -0
- zrb/util/ascii_art/art/panda.txt +17 -0
- zrb/util/ascii_art/art/rose.txt +14 -0
- zrb/util/ascii_art/art/unicorn.txt +15 -0
- zrb/util/ascii_art/banner.py +92 -0
- zrb/util/attr.py +54 -39
- zrb/util/cli/markdown.py +32 -0
- zrb/util/cli/text.py +30 -0
- zrb/util/cmd/command.py +33 -10
- zrb/util/file.py +61 -33
- zrb/util/git.py +2 -2
- zrb/util/{llm/prompt.py → markdown.py} +2 -3
- zrb/util/match.py +78 -0
- zrb/util/run.py +3 -3
- zrb/util/string/conversion.py +1 -1
- zrb/util/truncate.py +23 -0
- zrb/util/yaml.py +204 -0
- zrb/xcom/xcom.py +10 -0
- {zrb-1.15.3.dist-info → zrb-2.0.0a4.dist-info}/METADATA +41 -27
- {zrb-1.15.3.dist-info → zrb-2.0.0a4.dist-info}/RECORD +129 -131
- {zrb-1.15.3.dist-info → zrb-2.0.0a4.dist-info}/WHEEL +1 -1
- zrb/attr/__init__.py +0 -0
- zrb/builtin/llm/chat_session.py +0 -311
- zrb/builtin/llm/history.py +0 -71
- zrb/builtin/llm/input.py +0 -27
- zrb/builtin/llm/llm_ask.py +0 -187
- zrb/builtin/llm/previous-session.js +0 -21
- zrb/builtin/llm/tool/__init__.py +0 -0
- zrb/builtin/llm/tool/api.py +0 -71
- zrb/builtin/llm/tool/cli.py +0 -38
- zrb/builtin/llm/tool/code.py +0 -254
- zrb/builtin/llm/tool/file.py +0 -626
- zrb/builtin/llm/tool/sub_agent.py +0 -137
- zrb/builtin/llm/tool/web.py +0 -195
- zrb/builtin/project/__init__.py +0 -0
- zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/__init__.py +0 -0
- zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/_zrb/module/template/app_template/module/my_module/service/__init__.py +0 -0
- zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/common/__init__.py +0 -0
- zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/module/__init__.py +0 -0
- zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/module/auth/service/__init__.py +0 -0
- zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/module/auth/service/permission/__init__.py +0 -0
- zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/module/auth/service/role/__init__.py +0 -0
- zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/module/auth/service/user/__init__.py +0 -0
- zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/schema/__init__.py +0 -0
- zrb/builtin/project/create/__init__.py +0 -0
- zrb/builtin/shell/__init__.py +0 -0
- zrb/builtin/shell/autocomplete/__init__.py +0 -0
- zrb/callback/__init__.py +0 -0
- zrb/cmd/__init__.py +0 -0
- zrb/config/default_prompt/file_extractor_system_prompt.md +0 -12
- zrb/config/default_prompt/interactive_system_prompt.md +0 -35
- zrb/config/default_prompt/persona.md +0 -1
- zrb/config/default_prompt/repo_extractor_system_prompt.md +0 -112
- zrb/config/default_prompt/repo_summarizer_system_prompt.md +0 -10
- zrb/config/default_prompt/summarization_prompt.md +0 -16
- zrb/config/default_prompt/system_prompt.md +0 -32
- zrb/config/llm_config.py +0 -243
- zrb/config/llm_context/config.py +0 -129
- zrb/config/llm_context/config_parser.py +0 -46
- zrb/config/llm_rate_limitter.py +0 -137
- zrb/content_transformer/__init__.py +0 -0
- zrb/context/__init__.py +0 -0
- zrb/dot_dict/__init__.py +0 -0
- zrb/env/__init__.py +0 -0
- zrb/group/__init__.py +0 -0
- zrb/input/__init__.py +0 -0
- zrb/runner/__init__.py +0 -0
- zrb/runner/web_route/__init__.py +0 -0
- zrb/runner/web_route/home_page/__init__.py +0 -0
- zrb/session/__init__.py +0 -0
- zrb/session_state_log/__init__.py +0 -0
- zrb/session_state_logger/__init__.py +0 -0
- zrb/task/__init__.py +0 -0
- zrb/task/base/__init__.py +0 -0
- zrb/task/llm/__init__.py +0 -0
- zrb/task/llm/agent.py +0 -243
- zrb/task/llm/config.py +0 -103
- zrb/task/llm/conversation_history.py +0 -128
- zrb/task/llm/conversation_history_model.py +0 -242
- zrb/task/llm/default_workflow/coding.md +0 -24
- zrb/task/llm/default_workflow/copywriting.md +0 -17
- zrb/task/llm/default_workflow/researching.md +0 -18
- zrb/task/llm/error.py +0 -95
- zrb/task/llm/history_summarization.py +0 -216
- zrb/task/llm/print_node.py +0 -101
- zrb/task/llm/prompt.py +0 -325
- zrb/task/llm/tool_wrapper.py +0 -220
- zrb/task/llm/typing.py +0 -3
- zrb/task/llm_task.py +0 -341
- zrb/task_status/__init__.py +0 -0
- zrb/util/__init__.py +0 -0
- zrb/util/cli/__init__.py +0 -0
- zrb/util/cmd/__init__.py +0 -0
- zrb/util/codemod/__init__.py +0 -0
- zrb/util/string/__init__.py +0 -0
- zrb/xcom/__init__.py +0 -0
- {zrb-1.15.3.dist-info → zrb-2.0.0a4.dist-info}/entry_points.txt +0 -0
zrb/config/llm_config.py
DELETED
|
@@ -1,243 +0,0 @@
|
|
|
1
|
-
import os
|
|
2
|
-
from typing import TYPE_CHECKING, Any, Callable
|
|
3
|
-
|
|
4
|
-
from zrb.config.config import CFG
|
|
5
|
-
|
|
6
|
-
if TYPE_CHECKING:
|
|
7
|
-
from pydantic_ai.models import Model
|
|
8
|
-
from pydantic_ai.providers import Provider
|
|
9
|
-
from pydantic_ai.settings import ModelSettings
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
class LLMConfig:
|
|
13
|
-
def __init__(
|
|
14
|
-
self,
|
|
15
|
-
default_model_name: str | None = None,
|
|
16
|
-
default_base_url: str | None = None,
|
|
17
|
-
default_api_key: str | None = None,
|
|
18
|
-
default_persona: str | None = None,
|
|
19
|
-
default_system_prompt: str | None = None,
|
|
20
|
-
default_interactive_system_prompt: str | None = None,
|
|
21
|
-
default_special_instruction_prompt: str | None = None,
|
|
22
|
-
default_summarization_prompt: str | None = None,
|
|
23
|
-
default_summarize_history: bool | None = None,
|
|
24
|
-
default_history_summarization_token_threshold: int | None = None,
|
|
25
|
-
default_modes: list[str] | None = None,
|
|
26
|
-
default_model: "Model | None" = None,
|
|
27
|
-
default_model_settings: "ModelSettings | None" = None,
|
|
28
|
-
default_model_provider: "Provider | None" = None,
|
|
29
|
-
default_yolo_mode: bool | None = None,
|
|
30
|
-
):
|
|
31
|
-
self.__internal_default_prompt: dict[str, str] = {}
|
|
32
|
-
self._default_model_name = default_model_name
|
|
33
|
-
self._default_model_base_url = default_base_url
|
|
34
|
-
self._default_model_api_key = default_api_key
|
|
35
|
-
self._default_persona = default_persona
|
|
36
|
-
self._default_system_prompt = default_system_prompt
|
|
37
|
-
self._default_interactive_system_prompt = default_interactive_system_prompt
|
|
38
|
-
self._default_special_instruction_prompt = default_special_instruction_prompt
|
|
39
|
-
self._default_summarization_prompt = default_summarization_prompt
|
|
40
|
-
self._default_summarize_history = default_summarize_history
|
|
41
|
-
self._default_history_summarization_token_threshold = (
|
|
42
|
-
default_history_summarization_token_threshold
|
|
43
|
-
)
|
|
44
|
-
self._default_modes = default_modes
|
|
45
|
-
self._default_model = default_model
|
|
46
|
-
self._default_model_settings = default_model_settings
|
|
47
|
-
self._default_model_provider = default_model_provider
|
|
48
|
-
self._default_yolo_mode = default_yolo_mode
|
|
49
|
-
|
|
50
|
-
def _get_internal_default_prompt(self, name: str) -> str:
|
|
51
|
-
if name not in self.__internal_default_prompt:
|
|
52
|
-
file_path = os.path.join(
|
|
53
|
-
os.path.dirname(__file__), "default_prompt", f"{name}.md"
|
|
54
|
-
)
|
|
55
|
-
with open(file_path, "r") as f:
|
|
56
|
-
self.__internal_default_prompt[name] = f.read().strip()
|
|
57
|
-
return self.__internal_default_prompt[name]
|
|
58
|
-
|
|
59
|
-
def _get_property(
|
|
60
|
-
self,
|
|
61
|
-
instance_var: Any,
|
|
62
|
-
config_var: Any,
|
|
63
|
-
default_func: Callable[[], Any],
|
|
64
|
-
) -> Any:
|
|
65
|
-
if instance_var is not None:
|
|
66
|
-
return instance_var
|
|
67
|
-
if config_var is not None:
|
|
68
|
-
return config_var
|
|
69
|
-
return default_func()
|
|
70
|
-
|
|
71
|
-
@property
|
|
72
|
-
def default_model_name(self) -> str | None:
|
|
73
|
-
return self._get_property(self._default_model_name, CFG.LLM_MODEL, lambda: None)
|
|
74
|
-
|
|
75
|
-
@property
|
|
76
|
-
def default_model_base_url(self) -> str | None:
|
|
77
|
-
return self._get_property(
|
|
78
|
-
self._default_model_base_url, CFG.LLM_BASE_URL, lambda: None
|
|
79
|
-
)
|
|
80
|
-
|
|
81
|
-
@property
|
|
82
|
-
def default_model_api_key(self) -> str | None:
|
|
83
|
-
return self._get_property(
|
|
84
|
-
self._default_model_api_key, CFG.LLM_API_KEY, lambda: None
|
|
85
|
-
)
|
|
86
|
-
|
|
87
|
-
@property
|
|
88
|
-
def default_model_settings(self) -> "ModelSettings | None":
|
|
89
|
-
return self._get_property(self._default_model_settings, None, lambda: None)
|
|
90
|
-
|
|
91
|
-
@property
|
|
92
|
-
def default_model_provider(self) -> "Provider | str":
|
|
93
|
-
if self._default_model_provider is not None:
|
|
94
|
-
return self._default_model_provider
|
|
95
|
-
if self.default_model_base_url is None and self.default_model_api_key is None:
|
|
96
|
-
return "openai"
|
|
97
|
-
from pydantic_ai.providers.openai import OpenAIProvider
|
|
98
|
-
|
|
99
|
-
return OpenAIProvider(
|
|
100
|
-
base_url=self.default_model_base_url, api_key=self.default_model_api_key
|
|
101
|
-
)
|
|
102
|
-
|
|
103
|
-
@property
|
|
104
|
-
def default_system_prompt(self) -> str:
|
|
105
|
-
return self._get_property(
|
|
106
|
-
self._default_system_prompt,
|
|
107
|
-
CFG.LLM_SYSTEM_PROMPT,
|
|
108
|
-
lambda: self._get_internal_default_prompt("system_prompt"),
|
|
109
|
-
)
|
|
110
|
-
|
|
111
|
-
@property
|
|
112
|
-
def default_interactive_system_prompt(self) -> str:
|
|
113
|
-
return self._get_property(
|
|
114
|
-
self._default_interactive_system_prompt,
|
|
115
|
-
CFG.LLM_INTERACTIVE_SYSTEM_PROMPT,
|
|
116
|
-
lambda: self._get_internal_default_prompt("interactive_system_prompt"),
|
|
117
|
-
)
|
|
118
|
-
|
|
119
|
-
@property
|
|
120
|
-
def default_persona(self) -> str:
|
|
121
|
-
return self._get_property(
|
|
122
|
-
self._default_persona,
|
|
123
|
-
CFG.LLM_PERSONA,
|
|
124
|
-
lambda: self._get_internal_default_prompt("persona"),
|
|
125
|
-
)
|
|
126
|
-
|
|
127
|
-
@property
|
|
128
|
-
def default_modes(self) -> list[str]:
|
|
129
|
-
return self._get_property(
|
|
130
|
-
self._default_modes, CFG.LLM_MODES, lambda: ["coding"]
|
|
131
|
-
)
|
|
132
|
-
|
|
133
|
-
@property
|
|
134
|
-
def default_special_instruction_prompt(self) -> str:
|
|
135
|
-
return self._get_property(
|
|
136
|
-
self._default_special_instruction_prompt,
|
|
137
|
-
CFG.LLM_SPECIAL_INSTRUCTION_PROMPT,
|
|
138
|
-
lambda: "",
|
|
139
|
-
)
|
|
140
|
-
|
|
141
|
-
@property
|
|
142
|
-
def default_summarization_prompt(self) -> str:
|
|
143
|
-
return self._get_property(
|
|
144
|
-
self._default_summarization_prompt,
|
|
145
|
-
CFG.LLM_SUMMARIZATION_PROMPT,
|
|
146
|
-
lambda: self._get_internal_default_prompt("summarization_prompt"),
|
|
147
|
-
)
|
|
148
|
-
|
|
149
|
-
@property
|
|
150
|
-
def default_model(self) -> "Model | str":
|
|
151
|
-
if self._default_model is not None:
|
|
152
|
-
return self._default_model
|
|
153
|
-
model_name = self.default_model_name
|
|
154
|
-
if model_name is None:
|
|
155
|
-
return "openai:gpt-4o"
|
|
156
|
-
from pydantic_ai.models.openai import OpenAIModel
|
|
157
|
-
|
|
158
|
-
return OpenAIModel(
|
|
159
|
-
model_name=model_name,
|
|
160
|
-
provider=self.default_model_provider,
|
|
161
|
-
)
|
|
162
|
-
|
|
163
|
-
@property
|
|
164
|
-
def default_summarize_history(self) -> bool:
|
|
165
|
-
return self._get_property(
|
|
166
|
-
self._default_summarize_history, CFG.LLM_SUMMARIZE_HISTORY, lambda: False
|
|
167
|
-
)
|
|
168
|
-
|
|
169
|
-
@property
|
|
170
|
-
def default_history_summarization_token_threshold(self) -> int:
|
|
171
|
-
return self._get_property(
|
|
172
|
-
self._default_history_summarization_token_threshold,
|
|
173
|
-
CFG.LLM_HISTORY_SUMMARIZATION_TOKEN_THRESHOLD,
|
|
174
|
-
lambda: 1000,
|
|
175
|
-
)
|
|
176
|
-
|
|
177
|
-
@property
|
|
178
|
-
def default_yolo_mode(self) -> bool:
|
|
179
|
-
return self._get_property(
|
|
180
|
-
self._default_yolo_mode, CFG.LLM_YOLO_MODE, lambda: False
|
|
181
|
-
)
|
|
182
|
-
|
|
183
|
-
def set_default_persona(self, persona: str):
|
|
184
|
-
self._default_persona = persona
|
|
185
|
-
|
|
186
|
-
def set_default_system_prompt(self, system_prompt: str):
|
|
187
|
-
self._default_system_prompt = system_prompt
|
|
188
|
-
|
|
189
|
-
def set_default_interactive_system_prompt(self, interactive_system_prompt: str):
|
|
190
|
-
self._default_interactive_system_prompt = interactive_system_prompt
|
|
191
|
-
|
|
192
|
-
def set_default_special_instruction_prompt(self, special_instruction_prompt: str):
|
|
193
|
-
self._default_special_instruction_prompt = special_instruction_prompt
|
|
194
|
-
|
|
195
|
-
def set_default_modes(self, modes: list[str]):
|
|
196
|
-
self._default_modes = modes
|
|
197
|
-
|
|
198
|
-
def add_default_mode(self, mode: str):
|
|
199
|
-
if self._default_modes is None:
|
|
200
|
-
self._default_modes = []
|
|
201
|
-
self._default_modes.append(mode)
|
|
202
|
-
|
|
203
|
-
def remove_default_mode(self, mode: str):
|
|
204
|
-
if self._default_modes is None:
|
|
205
|
-
self._default_modes = []
|
|
206
|
-
self._default_modes.remove(mode)
|
|
207
|
-
|
|
208
|
-
def set_default_summarization_prompt(self, summarization_prompt: str):
|
|
209
|
-
self._default_summarization_prompt = summarization_prompt
|
|
210
|
-
|
|
211
|
-
def set_default_model_name(self, model_name: str):
|
|
212
|
-
self._default_model_name = model_name
|
|
213
|
-
|
|
214
|
-
def set_default_model_api_key(self, model_api_key: str):
|
|
215
|
-
self._default_model_api_key = model_api_key
|
|
216
|
-
|
|
217
|
-
def set_default_model_base_url(self, model_base_url: str):
|
|
218
|
-
self._default_model_base_url = model_base_url
|
|
219
|
-
|
|
220
|
-
def set_default_model_provider(self, provider: "Provider | str"):
|
|
221
|
-
self._default_model_provider = provider
|
|
222
|
-
|
|
223
|
-
def set_default_model(self, model: "Model | str"):
|
|
224
|
-
self._default_model = model
|
|
225
|
-
|
|
226
|
-
def set_default_summarize_history(self, summarize_history: bool):
|
|
227
|
-
self._default_summarize_history = summarize_history
|
|
228
|
-
|
|
229
|
-
def set_default_history_summarization_token_threshold(
|
|
230
|
-
self, history_summarization_token_threshold: int
|
|
231
|
-
):
|
|
232
|
-
self._default_history_summarization_token_threshold = (
|
|
233
|
-
history_summarization_token_threshold
|
|
234
|
-
)
|
|
235
|
-
|
|
236
|
-
def set_default_model_settings(self, model_settings: "ModelSettings"):
|
|
237
|
-
self._default_model_settings = model_settings
|
|
238
|
-
|
|
239
|
-
def set_default_yolo_mode(self, yolo_mode: bool):
|
|
240
|
-
self._default_yolo_mode = yolo_mode
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
llm_config = LLMConfig()
|
zrb/config/llm_context/config.py
DELETED
|
@@ -1,129 +0,0 @@
|
|
|
1
|
-
import os
|
|
2
|
-
|
|
3
|
-
from zrb.config.config import CFG
|
|
4
|
-
from zrb.config.llm_context.config_parser import markdown_to_dict
|
|
5
|
-
from zrb.util.llm.prompt import demote_markdown_headers
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
class LLMContextConfig:
|
|
9
|
-
"""High-level API for interacting with cascaded configurations."""
|
|
10
|
-
|
|
11
|
-
def _find_config_files(self, cwd: str) -> list[str]:
|
|
12
|
-
configs = []
|
|
13
|
-
current_dir = cwd
|
|
14
|
-
home_dir = os.path.expanduser("~")
|
|
15
|
-
while True:
|
|
16
|
-
config_path = os.path.join(current_dir, CFG.LLM_CONTEXT_FILE)
|
|
17
|
-
if os.path.exists(config_path):
|
|
18
|
-
configs.append(config_path)
|
|
19
|
-
if current_dir == home_dir or current_dir == "/":
|
|
20
|
-
break
|
|
21
|
-
current_dir = os.path.dirname(current_dir)
|
|
22
|
-
return configs
|
|
23
|
-
|
|
24
|
-
def _parse_config(self, file_path: str) -> dict[str, str]:
|
|
25
|
-
with open(file_path, "r") as f:
|
|
26
|
-
content = f.read()
|
|
27
|
-
return markdown_to_dict(content)
|
|
28
|
-
|
|
29
|
-
def _get_all_sections(self, cwd: str) -> list[tuple[str, dict[str, str]]]:
|
|
30
|
-
config_files = self._find_config_files(cwd)
|
|
31
|
-
all_sections = []
|
|
32
|
-
for config_file in config_files:
|
|
33
|
-
config_dir = os.path.dirname(config_file)
|
|
34
|
-
sections = self._parse_config(config_file)
|
|
35
|
-
all_sections.append((config_dir, sections))
|
|
36
|
-
return all_sections
|
|
37
|
-
|
|
38
|
-
def get_contexts(self, cwd: str | None = None) -> dict[str, str]:
|
|
39
|
-
"""Gathers all relevant contexts for a given path."""
|
|
40
|
-
if cwd is None:
|
|
41
|
-
cwd = os.getcwd()
|
|
42
|
-
all_sections = self._get_all_sections(cwd)
|
|
43
|
-
contexts: dict[str, str] = {}
|
|
44
|
-
for config_dir, sections in reversed(all_sections):
|
|
45
|
-
for key, value in sections.items():
|
|
46
|
-
if key.startswith("Context:"):
|
|
47
|
-
context_path = key[len("Context:") :].strip()
|
|
48
|
-
if context_path == ".":
|
|
49
|
-
context_path = config_dir
|
|
50
|
-
elif not os.path.isabs(context_path):
|
|
51
|
-
context_path = os.path.abspath(
|
|
52
|
-
os.path.join(config_dir, context_path)
|
|
53
|
-
)
|
|
54
|
-
if os.path.isabs(context_path) or cwd.startswith(context_path):
|
|
55
|
-
contexts[context_path] = value
|
|
56
|
-
return contexts
|
|
57
|
-
|
|
58
|
-
def get_workflows(self, cwd: str | None = None) -> dict[str, str]:
|
|
59
|
-
"""Gathers all relevant workflows for a given path."""
|
|
60
|
-
if cwd is None:
|
|
61
|
-
cwd = os.getcwd()
|
|
62
|
-
all_sections = self._get_all_sections(cwd)
|
|
63
|
-
workflows: dict[str, str] = {}
|
|
64
|
-
for _, sections in reversed(all_sections):
|
|
65
|
-
for key, value in sections.items():
|
|
66
|
-
if key.startswith("Workflow:"):
|
|
67
|
-
workflow_name = key[len("Workflow:") :].strip()
|
|
68
|
-
workflow_name = key.replace("Workflow:", "").lower().strip()
|
|
69
|
-
workflows[workflow_name] = value
|
|
70
|
-
return workflows
|
|
71
|
-
|
|
72
|
-
def write_context(
|
|
73
|
-
self, content: str, context_path: str | None = None, cwd: str | None = None
|
|
74
|
-
):
|
|
75
|
-
"""Writes content to a context block in the nearest configuration file."""
|
|
76
|
-
if cwd is None:
|
|
77
|
-
cwd = os.getcwd()
|
|
78
|
-
if context_path is None:
|
|
79
|
-
context_path = cwd
|
|
80
|
-
|
|
81
|
-
config_files = self._find_config_files(cwd)
|
|
82
|
-
if config_files:
|
|
83
|
-
config_file = config_files[0] # Closest config file
|
|
84
|
-
else:
|
|
85
|
-
config_file = os.path.join(cwd, CFG.LLM_CONTEXT_FILE)
|
|
86
|
-
|
|
87
|
-
sections = {}
|
|
88
|
-
if os.path.exists(config_file):
|
|
89
|
-
sections = self._parse_config(config_file)
|
|
90
|
-
|
|
91
|
-
# Determine the section key
|
|
92
|
-
section_key_path = context_path
|
|
93
|
-
if not os.path.isabs(context_path):
|
|
94
|
-
config_dir = os.path.dirname(config_file)
|
|
95
|
-
section_key_path = os.path.abspath(os.path.join(config_dir, context_path))
|
|
96
|
-
|
|
97
|
-
# Find existing key
|
|
98
|
-
found_key = ""
|
|
99
|
-
for key in sections.keys():
|
|
100
|
-
if not key.startswith("Context:"):
|
|
101
|
-
continue
|
|
102
|
-
key_path = key.replace("Context:", "").strip()
|
|
103
|
-
if key_path == ".":
|
|
104
|
-
key_path = os.path.dirname(config_file)
|
|
105
|
-
elif not os.path.isabs(key_path):
|
|
106
|
-
key_path = os.path.abspath(
|
|
107
|
-
os.path.join(os.path.dirname(config_file), key_path)
|
|
108
|
-
)
|
|
109
|
-
if key_path == section_key_path:
|
|
110
|
-
found_key = key
|
|
111
|
-
break
|
|
112
|
-
|
|
113
|
-
if found_key != "":
|
|
114
|
-
sections[found_key] = content
|
|
115
|
-
else:
|
|
116
|
-
# Add new entry
|
|
117
|
-
new_key = f"Context: {context_path}"
|
|
118
|
-
sections[new_key] = content
|
|
119
|
-
|
|
120
|
-
# Serialize back to markdown
|
|
121
|
-
new_file_content = ""
|
|
122
|
-
for key, value in sections.items():
|
|
123
|
-
new_file_content += f"# {key}\n{demote_markdown_headers(value)}\n\n"
|
|
124
|
-
|
|
125
|
-
with open(config_file, "w") as f:
|
|
126
|
-
f.write(new_file_content)
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
llm_context_config = LLMContextConfig()
|
|
@@ -1,46 +0,0 @@
|
|
|
1
|
-
import re
|
|
2
|
-
|
|
3
|
-
from zrb.util.llm.prompt import promote_markdown_headers
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
def markdown_to_dict(markdown: str) -> dict[str, str]:
|
|
7
|
-
sections: dict[str, str] = {}
|
|
8
|
-
current_title = ""
|
|
9
|
-
current_content: list[str] = []
|
|
10
|
-
fence_stack: list[str] = []
|
|
11
|
-
|
|
12
|
-
fence_pattern = re.compile(r"^([`~]{3,})(.*)$")
|
|
13
|
-
h1_pattern = re.compile(r"^# (.+)$")
|
|
14
|
-
|
|
15
|
-
for line in markdown.splitlines():
|
|
16
|
-
# Detect code fence open/close
|
|
17
|
-
fence_match = fence_pattern.match(line.strip())
|
|
18
|
-
|
|
19
|
-
if fence_match:
|
|
20
|
-
fence = fence_match.group(1)
|
|
21
|
-
if fence_stack and fence_stack[-1] == fence:
|
|
22
|
-
fence_stack.pop() # close current fence
|
|
23
|
-
else:
|
|
24
|
-
fence_stack.append(fence) # open new fence
|
|
25
|
-
|
|
26
|
-
# Only parse H1 when not inside a code fence
|
|
27
|
-
if not fence_stack:
|
|
28
|
-
h1_match = h1_pattern.match(line)
|
|
29
|
-
if h1_match:
|
|
30
|
-
# Save previous section
|
|
31
|
-
if current_title:
|
|
32
|
-
sections[current_title] = "\n".join(current_content).strip()
|
|
33
|
-
# Start new section
|
|
34
|
-
current_title = h1_match.group(1).strip()
|
|
35
|
-
current_content = []
|
|
36
|
-
continue
|
|
37
|
-
|
|
38
|
-
current_content.append(line)
|
|
39
|
-
|
|
40
|
-
# Save final section
|
|
41
|
-
if current_title:
|
|
42
|
-
sections[current_title] = "\n".join(current_content).strip()
|
|
43
|
-
return {
|
|
44
|
-
header: promote_markdown_headers(content)
|
|
45
|
-
for header, content in sections.items()
|
|
46
|
-
}
|
zrb/config/llm_rate_limitter.py
DELETED
|
@@ -1,137 +0,0 @@
|
|
|
1
|
-
import asyncio
|
|
2
|
-
import time
|
|
3
|
-
from collections import deque
|
|
4
|
-
from typing import Callable
|
|
5
|
-
|
|
6
|
-
import tiktoken
|
|
7
|
-
|
|
8
|
-
from zrb.config.config import CFG
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
def _estimate_token(text: str) -> int:
|
|
12
|
-
"""
|
|
13
|
-
Estimates the number of tokens in a given text.
|
|
14
|
-
Tries to use the 'gpt-4o' model's tokenizer for an accurate count.
|
|
15
|
-
If the tokenizer is unavailable (e.g., due to network issues),
|
|
16
|
-
it falls back to a heuristic of 4 characters per token.
|
|
17
|
-
"""
|
|
18
|
-
try:
|
|
19
|
-
# Primary method: Use tiktoken for an accurate count
|
|
20
|
-
enc = tiktoken.encoding_for_model("gpt-4o")
|
|
21
|
-
return len(enc.encode(text))
|
|
22
|
-
except Exception:
|
|
23
|
-
# Fallback method: Heuristic (4 characters per token)
|
|
24
|
-
return len(text) // 4
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
class LLMRateLimiter:
|
|
28
|
-
"""
|
|
29
|
-
Helper class to enforce LLM API rate limits and throttling.
|
|
30
|
-
Tracks requests and tokens in a rolling 60-second window.
|
|
31
|
-
"""
|
|
32
|
-
|
|
33
|
-
def __init__(
|
|
34
|
-
self,
|
|
35
|
-
max_requests_per_minute: int | None = None,
|
|
36
|
-
max_tokens_per_minute: int | None = None,
|
|
37
|
-
max_tokens_per_request: int | None = None,
|
|
38
|
-
throttle_sleep: float | None = None,
|
|
39
|
-
token_counter_fn: Callable[[str], int] | None = None,
|
|
40
|
-
):
|
|
41
|
-
self._max_requests_per_minute = max_requests_per_minute
|
|
42
|
-
self._max_tokens_per_minute = max_tokens_per_minute
|
|
43
|
-
self._max_tokens_per_request = max_tokens_per_request
|
|
44
|
-
self._throttle_sleep = throttle_sleep
|
|
45
|
-
self._token_counter_fn = token_counter_fn
|
|
46
|
-
self.request_times = deque()
|
|
47
|
-
self.token_times = deque()
|
|
48
|
-
|
|
49
|
-
@property
|
|
50
|
-
def max_requests_per_minute(self) -> int:
|
|
51
|
-
if self._max_requests_per_minute is not None:
|
|
52
|
-
return self._max_requests_per_minute
|
|
53
|
-
return CFG.LLM_MAX_REQUESTS_PER_MINUTE
|
|
54
|
-
|
|
55
|
-
@property
|
|
56
|
-
def max_tokens_per_minute(self) -> int:
|
|
57
|
-
if self._max_tokens_per_minute is not None:
|
|
58
|
-
return self._max_tokens_per_minute
|
|
59
|
-
return CFG.LLM_MAX_TOKENS_PER_MINUTE
|
|
60
|
-
|
|
61
|
-
@property
|
|
62
|
-
def max_tokens_per_request(self) -> int:
|
|
63
|
-
if self._max_tokens_per_request is not None:
|
|
64
|
-
return self._max_tokens_per_request
|
|
65
|
-
return CFG.LLM_MAX_TOKENS_PER_REQUEST
|
|
66
|
-
|
|
67
|
-
@property
|
|
68
|
-
def throttle_sleep(self) -> float:
|
|
69
|
-
if self._throttle_sleep is not None:
|
|
70
|
-
return self._throttle_sleep
|
|
71
|
-
return CFG.LLM_THROTTLE_SLEEP
|
|
72
|
-
|
|
73
|
-
@property
|
|
74
|
-
def count_token(self) -> Callable[[str], int]:
|
|
75
|
-
if self._token_counter_fn is not None:
|
|
76
|
-
return self._token_counter_fn
|
|
77
|
-
return _estimate_token
|
|
78
|
-
|
|
79
|
-
def set_max_requests_per_minute(self, value: int):
|
|
80
|
-
self._max_requests_per_minute = value
|
|
81
|
-
|
|
82
|
-
def set_max_tokens_per_minute(self, value: int):
|
|
83
|
-
self._max_tokens_per_minute = value
|
|
84
|
-
|
|
85
|
-
def set_max_tokens_per_request(self, value: int):
|
|
86
|
-
self._max_tokens_per_request = value
|
|
87
|
-
|
|
88
|
-
def set_throttle_sleep(self, value: float):
|
|
89
|
-
self._throttle_sleep = value
|
|
90
|
-
|
|
91
|
-
def set_token_counter_fn(self, fn: Callable[[str], int]):
|
|
92
|
-
self._token_counter_fn = fn
|
|
93
|
-
|
|
94
|
-
def clip_prompt(self, prompt: str, limit: int) -> str:
|
|
95
|
-
token_count = self.count_token(prompt)
|
|
96
|
-
if token_count <= limit:
|
|
97
|
-
return prompt
|
|
98
|
-
while token_count > limit:
|
|
99
|
-
prompt_parts = prompt.split(" ")
|
|
100
|
-
last_part_index = len(prompt_parts) - 2
|
|
101
|
-
clipped_prompt = " ".join(prompt_parts[:last_part_index])
|
|
102
|
-
clipped_prompt += "(Content clipped...)"
|
|
103
|
-
token_count = self.count_token(clipped_prompt)
|
|
104
|
-
if token_count < limit:
|
|
105
|
-
return clipped_prompt
|
|
106
|
-
return prompt[:limit]
|
|
107
|
-
|
|
108
|
-
async def throttle(self, prompt: str):
|
|
109
|
-
now = time.time()
|
|
110
|
-
tokens = self.count_token(prompt)
|
|
111
|
-
# Clean up old entries
|
|
112
|
-
while self.request_times and now - self.request_times[0] > 60:
|
|
113
|
-
self.request_times.popleft()
|
|
114
|
-
while self.token_times and now - self.token_times[0][0] > 60:
|
|
115
|
-
self.token_times.popleft()
|
|
116
|
-
# Check per-request token limit
|
|
117
|
-
if tokens > self.max_tokens_per_request:
|
|
118
|
-
raise ValueError(
|
|
119
|
-
f"Request exceeds max_tokens_per_request ({self.max_tokens_per_request})."
|
|
120
|
-
)
|
|
121
|
-
# Wait if over per-minute request or token limit
|
|
122
|
-
while (
|
|
123
|
-
len(self.request_times) >= self.max_requests_per_minute
|
|
124
|
-
or sum(t for _, t in self.token_times) + tokens > self.max_tokens_per_minute
|
|
125
|
-
):
|
|
126
|
-
await asyncio.sleep(self.throttle_sleep)
|
|
127
|
-
now = time.time()
|
|
128
|
-
while self.request_times and now - self.request_times[0] > 60:
|
|
129
|
-
self.request_times.popleft()
|
|
130
|
-
while self.token_times and now - self.token_times[0][0] > 60:
|
|
131
|
-
self.token_times.popleft()
|
|
132
|
-
# Record this request
|
|
133
|
-
self.request_times.append(now)
|
|
134
|
-
self.token_times.append((now, tokens))
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
llm_rate_limitter = LLMRateLimiter()
|
|
File without changes
|
zrb/context/__init__.py
DELETED
|
File without changes
|
zrb/dot_dict/__init__.py
DELETED
|
File without changes
|
zrb/env/__init__.py
DELETED
|
File without changes
|
zrb/group/__init__.py
DELETED
|
File without changes
|
zrb/input/__init__.py
DELETED
|
File without changes
|
zrb/runner/__init__.py
DELETED
|
File without changes
|
zrb/runner/web_route/__init__.py
DELETED
|
File without changes
|
|
File without changes
|
zrb/session/__init__.py
DELETED
|
File without changes
|
|
File without changes
|
|
File without changes
|
zrb/task/__init__.py
DELETED
|
File without changes
|
zrb/task/base/__init__.py
DELETED
|
File without changes
|
zrb/task/llm/__init__.py
DELETED
|
File without changes
|