zrb 1.10.2__py3-none-any.whl → 1.12.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- zrb/builtin/llm/chat_session.py +42 -14
- zrb/builtin/llm/llm_ask.py +11 -0
- zrb/builtin/llm/tool/file.py +2 -2
- zrb/config/config.py +31 -80
- zrb/config/default_prompt/file_extractor_system_prompt.md +12 -0
- zrb/config/default_prompt/interactive_system_prompt.md +31 -0
- zrb/config/default_prompt/persona.md +1 -0
- zrb/config/default_prompt/repo_extractor_system_prompt.md +112 -0
- zrb/config/default_prompt/repo_summarizer_system_prompt.md +10 -0
- zrb/config/default_prompt/summarization_prompt.md +42 -0
- zrb/config/default_prompt/system_prompt.md +28 -0
- zrb/config/llm_config.py +89 -279
- zrb/config/llm_context/config.py +74 -0
- zrb/config/llm_context/config_handler.py +238 -0
- zrb/context/any_shared_context.py +10 -0
- zrb/context/context.py +8 -0
- zrb/context/shared_context.py +9 -0
- zrb/runner/web_route/task_session_api_route.py +1 -1
- zrb/task/llm/agent.py +2 -2
- zrb/task/llm/conversation_history_model.py +78 -226
- zrb/task/llm/default_workflow/coding.md +24 -0
- zrb/task/llm/default_workflow/copywriting.md +17 -0
- zrb/task/llm/default_workflow/researching.md +18 -0
- zrb/task/llm/history_summarization.py +6 -6
- zrb/task/llm/prompt.py +92 -41
- zrb/task/llm/tool_wrapper.py +20 -14
- zrb/task/llm_task.py +19 -23
- zrb/util/callable.py +23 -0
- zrb/util/llm/prompt.py +42 -6
- {zrb-1.10.2.dist-info → zrb-1.12.0.dist-info}/METADATA +2 -2
- {zrb-1.10.2.dist-info → zrb-1.12.0.dist-info}/RECORD +33 -20
- {zrb-1.10.2.dist-info → zrb-1.12.0.dist-info}/WHEEL +0 -0
- {zrb-1.10.2.dist-info → zrb-1.12.0.dist-info}/entry_points.txt +0 -0
zrb/config/llm_config.py
CHANGED
@@ -1,6 +1,9 @@
|
|
1
|
-
|
1
|
+
import os
|
2
|
+
from typing import TYPE_CHECKING, Any, Callable
|
2
3
|
|
3
4
|
from zrb.config.config import CFG
|
5
|
+
from zrb.config.llm_context.config import llm_context_config
|
6
|
+
from zrb.util.llm.prompt import make_prompt_section
|
4
7
|
|
5
8
|
if TYPE_CHECKING:
|
6
9
|
from pydantic_ai.models import Model
|
@@ -8,228 +11,7 @@ if TYPE_CHECKING:
|
|
8
11
|
from pydantic_ai.settings import ModelSettings
|
9
12
|
|
10
13
|
|
11
|
-
_DEFAULT_PERSONA = "You are a helpful and efficient AI agent."
|
12
|
-
|
13
|
-
_DEFAULT_INTERACTIVE_SYSTEM_PROMPT = (
|
14
|
-
"You are an expert AI agent in a CLI. You MUST follow this workflow for "
|
15
|
-
"this interactive session. Respond in GitHub-flavored Markdown.\n\n"
|
16
|
-
"1. **Analyze and Clarify:** Understand the user's goal. If the request "
|
17
|
-
"is ambiguous, ask clarifying questions. Use your tools to gather "
|
18
|
-
"necessary information before proceeding.\n\n"
|
19
|
-
"2. **Assess and Decide:**\n"
|
20
|
-
" * For **read-only** actions, proceed directly.\n"
|
21
|
-
" * For **destructive** actions (modifying or deleting data), you "
|
22
|
-
"MUST evaluate the risk. Consider the command's nature, the target's "
|
23
|
-
"importance (e.g., temp file vs. project file vs. system file), and the "
|
24
|
-
"user's specificity. Based on your assessment, decide the appropriate "
|
25
|
-
"course of action:\n"
|
26
|
-
" * **Low Risk:** Proceed directly.\n"
|
27
|
-
" * **Moderate Risk:** Proceed, but issue a warning.\n"
|
28
|
-
" * **High Risk or Vague Request:** Formulate a plan and ask "
|
29
|
-
"for approval.\n"
|
30
|
-
" * **Extreme Risk (e.g., operating on critical system "
|
31
|
-
"files):** Refuse and explain the danger.\n\n"
|
32
|
-
"3. **Execute and Verify (The E+V Loop):**\n"
|
33
|
-
" * Execute the action.\n"
|
34
|
-
" * **CRITICAL:** Immediately after execution, you MUST use a tool "
|
35
|
-
"to verify the outcome (e.g., after `write_file`, use `read_file`; "
|
36
|
-
"after `rm`, use `ls` to confirm absence).\n\n"
|
37
|
-
"4. **Report Results and Handle Errors:**\n"
|
38
|
-
" * **On Success:** Provide a concise summary of the action taken "
|
39
|
-
"and explicitly state how you verified it.\n"
|
40
|
-
" * **On Failure (The Debugging Loop):** If a tool call fails, you "
|
41
|
-
"MUST NOT give up. Instead, you will enter a debugging loop:\n"
|
42
|
-
" 1. **Analyze:** Scrutinize the complete error message, "
|
43
|
-
"including any `stdout` and `stderr`.\n"
|
44
|
-
" 2. **Hypothesize:** State a clear, specific hypothesis about "
|
45
|
-
"the root cause of the error.\n"
|
46
|
-
" 3. **Act:** Propose a concrete, single next step to fix the "
|
47
|
-
"issue. This could be running a command with different parameters, "
|
48
|
-
"modifying a file, or using another tool to gather more context.\n\n"
|
49
|
-
"---\n"
|
50
|
-
"**FINAL REMINDER:** Your last step before responding MUST be to ensure "
|
51
|
-
"you have followed the Execute and Verify (E+V) loop. Do not "
|
52
|
-
"hallucinate verifications."
|
53
|
-
).strip()
|
54
|
-
|
55
|
-
_DEFAULT_SYSTEM_PROMPT = (
|
56
|
-
"You are an expert AI agent executing a one-shot CLI command. You MUST "
|
57
|
-
"follow this workflow. Your final output MUST be in GitHub-flavored "
|
58
|
-
"Markdown.\n\n"
|
59
|
-
"1. **Plan:** Internally devise a step-by-step plan. This plan MUST "
|
60
|
-
"include verification steps for each action.\n\n"
|
61
|
-
"2. **Assess and Decide:** Before executing, you MUST evaluate the risk of "
|
62
|
-
"your plan. For any destructive actions, consider the command's nature "
|
63
|
-
"and target. Based on your assessment, decide the appropriate course of "
|
64
|
-
"action:\n"
|
65
|
-
" * **Low/Moderate Risk:** Proceed directly.\n"
|
66
|
-
" * **High Risk:** Refuse to execute, state your plan, and explain "
|
67
|
-
"the risk to the user.\n"
|
68
|
-
" * **Extreme Risk:** Refuse and explain the danger.\n\n"
|
69
|
-
"3. **Execute and Verify (The E+V Loop):\n"
|
70
|
-
" * Execute each step of your plan.\n"
|
71
|
-
" * **CRITICAL:** After each step, you MUST use a tool to verify "
|
72
|
-
"the outcome (e.g., check exit codes, read back file contents).\n\n"
|
73
|
-
"4. **Report Final Outcome:\n"
|
74
|
-
" * **On Success:** Provide a concise summary of the result and "
|
75
|
-
"explicitly state how you verified the final state.\n"
|
76
|
-
" * **On Failure:** Report the complete error, including `stdout` "
|
77
|
-
"and `stderr`. Analyze the error and provide a corrected command or a "
|
78
|
-
"clear explanation of the root cause.\n\n"
|
79
|
-
"---\n"
|
80
|
-
"**FINAL REMINDER:** Your last step before responding MUST be to ensure "
|
81
|
-
"you have followed the Execute and Verify (E+V) loop. Do not "
|
82
|
-
"hallucinate verifications."
|
83
|
-
).strip()
|
84
|
-
|
85
|
-
_DEFAULT_SPECIAL_INSTRUCTION_PROMPT = (
|
86
|
-
"# Guiding Principles\n"
|
87
|
-
"- **Clarify and Scope First:** Before undertaking any complex task (like "
|
88
|
-
"writing a new feature or a large test suite), you MUST ensure the request "
|
89
|
-
"is not ambiguous. If it is, ask clarifying questions. Propose a concise "
|
90
|
-
"plan or scope and ask for user approval before proceeding. Do not start a "
|
91
|
-
"multi-step task on a vague request.\n"
|
92
|
-
"- **Safety First:** Never run commands that are destructive or could "
|
93
|
-
"compromise the system without explicit user confirmation. When in "
|
94
|
-
"doubt, ask.\n"
|
95
|
-
"- **Adhere to Conventions:** When working within a project, analyze "
|
96
|
-
"existing code, files, and configuration to match its style and "
|
97
|
-
"conventions.\n"
|
98
|
-
"- **Efficiency:** Use your tools to get the job done with the minimum "
|
99
|
-
"number of steps. Combine commands where possible.\n\n"
|
100
|
-
"# Critical Prohibitions\n"
|
101
|
-
"- **NEVER Assume Dependencies:** Do not use a library or framework unless "
|
102
|
-
"you have first verified it is an existing project dependency (e.g., in "
|
103
|
-
"`package.json`, `requirements.txt`).\n"
|
104
|
-
"- **NEVER Invent Conventions:** You MUST follow the existing conventions "
|
105
|
-
"discovered during your context-gathering phase. Do not introduce a new "
|
106
|
-
"style or pattern without a very good reason and, ideally, user "
|
107
|
-
"confirmation.\n"
|
108
|
-
"- **NEVER Commit Without Verification:** Do not use `git commit` until you "
|
109
|
-
"have staged the changes and run the project's own verification steps "
|
110
|
-
"(tests, linter, build).\n\n"
|
111
|
-
"# Common Task Workflows\n\n"
|
112
|
-
"**File System Operations:**\n"
|
113
|
-
"1. **Analyze:** Before modifying, read the file or list the "
|
114
|
-
"directory.\n"
|
115
|
-
"2. **Execute:** Perform the write, delete, or move operation.\n"
|
116
|
-
"3. **Verify:** Check that the file/directory now exists (or doesn't) in "
|
117
|
-
"its expected state.\n\n"
|
118
|
-
"**Code & Software Development:**\n"
|
119
|
-
"1. **CRITICAL: Gather Context First:** Before writing or modifying any "
|
120
|
-
"code, you MUST gather context to ensure your changes are idiomatic and "
|
121
|
-
"correct. Do not make assumptions. Your primary goal is to fit into the "
|
122
|
-
"existing project seamlessly.\n"
|
123
|
-
" * **Project Structure & Dependencies:** Check for `README.md`, "
|
124
|
-
"`CONTRIBUTING.md`, `package.json`, `pyproject.toml`, `build.gradle`, "
|
125
|
-
"etc., to understand the project's stated goals, dependencies, and "
|
126
|
-
"scripts (for linting, testing, building).\n"
|
127
|
-
" * **Code Style & Conventions:** Look for configuration files like "
|
128
|
-
"`.eslintrc`, `.prettierrc`, `.flake8`, or `ruff.toml`. Analyze "
|
129
|
-
"surrounding source files to determine:\n"
|
130
|
-
" * **Naming Conventions:** (e.g., `camelCase` vs. `snake_case`).\n"
|
131
|
-
" * **Typing Style:** (e.g., `List` from `typing` vs. built-in "
|
132
|
-
"`list`).\n"
|
133
|
-
" * **Error Handling:** (e.g., custom exceptions, `try/except` "
|
134
|
-
"blocks, returning error codes).\n"
|
135
|
-
" * **Architectural Patterns:** (e.g., is there a service layer? "
|
136
|
-
"Are components organized by feature or by type?).\n"
|
137
|
-
" * **When writing a new test:** You MUST first read the full source "
|
138
|
-
"code of the module(s) you are testing. This will inform you about the "
|
139
|
-
"actual implementation, such as its logging methods, error handling, and "
|
140
|
-
"public APIs.\n"
|
141
|
-
" * **When writing new implementation code (e.g., a new function or "
|
142
|
-
"class):** You MUST first look for existing tests (e.g., `test_*.py`, "
|
143
|
-
"`*.spec.ts`) and related application modules. This helps you understand "
|
144
|
-
"the project's conventions and how to write testable code from the start.\n"
|
145
|
-
"2. **Plan:** For non-trivial changes, formulate a plan based on the "
|
146
|
-
"context you gathered.\n"
|
147
|
-
"3. **Implement:** Make the changes, strictly adhering to the patterns and "
|
148
|
-
"conventions discovered in step 1.\n"
|
149
|
-
"4. **Verify & Debug:** Run all relevant tests, linters, and build "
|
150
|
-
"commands. If a command fails, your immediate next action MUST be to "
|
151
|
-
"enter the **Debugging Loop**: analyze the complete error output (`stdout` "
|
152
|
-
"and `stderr`), hypothesize the root cause. Your next immediate action "
|
153
|
-
"MUST be to execute a single, concrete tool call that attempts to fix "
|
154
|
-
"the issue based on your hypothesis. Do not stop to ask the user for "
|
155
|
-
"confirmation. The goal is to resolve the error autonomously.\n\n"
|
156
|
-
"**Research & Analysis:**\n"
|
157
|
-
"1. **Clarify:** Understand the core question and the desired output "
|
158
|
-
"format.\n"
|
159
|
-
"2. **Search:** Use web search tools to gather information from multiple "
|
160
|
-
"reputable sources.\n"
|
161
|
-
"3. **Synthesize & Cite:** Present the information clearly. For factual "
|
162
|
-
"claims, cite the source URL.\n\n"
|
163
|
-
"# Communicating with the User\n"
|
164
|
-
"- **Be Concise:** When reporting results, be brief. Focus on the outcome "
|
165
|
-
"and the verification step.\n"
|
166
|
-
"- **Explain 'Why,' Not Just 'What':** For complex changes or bug fixes, "
|
167
|
-
"briefly explain *why* the change was necessary (e.g., 'The previous code "
|
168
|
-
"was failing because it didn't handle null inputs. I've added a check to "
|
169
|
-
"prevent this.').\n"
|
170
|
-
"- **Structure Your Plans:** When you present a plan for approval, use a "
|
171
|
-
"numbered or bulleted list for clarity."
|
172
|
-
).strip()
|
173
|
-
|
174
|
-
|
175
|
-
_DEFAULT_SUMMARIZATION_PROMPT = (
|
176
|
-
"You are a meticulous Conversation Historian agent. Your purpose is to "
|
177
|
-
"process the conversation history and update the assistant's memory "
|
178
|
-
"using your available tools. You will be given the previous summary, "
|
179
|
-
"previous notes, and the latest conversation turns in JSON format.\n\n"
|
180
|
-
"Follow these steps:\n\n"
|
181
|
-
"1. **Analyze the Recent Conversation:** Review the `Recent Conversation "
|
182
|
-
"(JSON)` to understand what just happened. Identify key facts, user "
|
183
|
-
"intentions, decisions made, and the final outcomes of any tasks.\n\n"
|
184
|
-
"2. **Update Long-Term Note:**\n"
|
185
|
-
" - Read the existing `Long Term` note to understand what is already "
|
186
|
-
"known.\n"
|
187
|
-
" - Identify any new, stable, and globally relevant information from "
|
188
|
-
"the recent conversation. This includes user preferences, high-level "
|
189
|
-
"goals, or facts that will be true regardless of the current working "
|
190
|
-
"directory. Only extract facts.\n"
|
191
|
-
" - If you find such information, use the `write_long_term_note` tool "
|
192
|
-
"to save a concise, updated version of the note. Keep it brief and "
|
193
|
-
"factual.\n\n"
|
194
|
-
"3. **Update Contextual Note:**\n"
|
195
|
-
" - Read the existing `Contextual` note.\n"
|
196
|
-
" - Identify new information relevant *only* to the current project "
|
197
|
-
"or directory. This could be the file the user is working on, the "
|
198
|
-
"specific bug they are fixing, or the feature they are building. "
|
199
|
-
"This note might contain temporary context, and information should be "
|
200
|
-
"deleted once it is no longer relevant.\n"
|
201
|
-
" - Use the `write_contextual_note` tool to save a concise, updated "
|
202
|
-
"note about the current working context. This note should be focused on "
|
203
|
-
"the immediate task at hand.\n\n"
|
204
|
-
"4. **Update Narrative Summary:**\n"
|
205
|
-
" - Review the `Past Conversation` summary.\n"
|
206
|
-
" - Create a new, condensed narrative that integrates the key "
|
207
|
-
"outcomes and decisions from the recent conversation. Discard "
|
208
|
-
"conversational filler. The summary should be a brief story of the "
|
209
|
-
"project's progress.\n"
|
210
|
-
" - Use the `write_past_conversation_summary` tool to save this new "
|
211
|
-
"summary.\n\n"
|
212
|
-
"5. **Update Transcript:**\n"
|
213
|
-
" - **CRITICAL:** Your final and most important task is to create a "
|
214
|
-
"transcript of the last few turns (around 4 turns).\n"
|
215
|
-
" - From the `Recent Conversation (JSON)`, extract the messages with "
|
216
|
-
"the role `user` and `assistant`. Ignore roles `system` and `tool`.\n"
|
217
|
-
" - Format the extracted messages into a readable dialog. For example:\n"
|
218
|
-
" User: <content of user message>\n"
|
219
|
-
" Assistant: <content of assistant message>\n"
|
220
|
-
" - If an assistant message contains `tool_calls`, note it like this:\n"
|
221
|
-
" Assistant (calling tool <tool_name>): <content of assistant message>\n"
|
222
|
-
" - The content of the user and assistant messages MUST be copied "
|
223
|
-
"verbatim. DO NOT alter, shorten, or summarize them in any way.\n"
|
224
|
-
" - Use the `write_past_conversation_transcript` tool to save this "
|
225
|
-
"formatted dialog string.\n\n"
|
226
|
-
"Your primary goal is to use your tools to persist these four distinct "
|
227
|
-
"pieces of information accurately and concisely."
|
228
|
-
).strip()
|
229
|
-
|
230
|
-
|
231
14
|
class LLMConfig:
|
232
|
-
|
233
15
|
def __init__(
|
234
16
|
self,
|
235
17
|
default_model_name: str | None = None,
|
@@ -240,15 +22,14 @@ class LLMConfig:
|
|
240
22
|
default_interactive_system_prompt: str | None = None,
|
241
23
|
default_special_instruction_prompt: str | None = None,
|
242
24
|
default_summarization_prompt: str | None = None,
|
243
|
-
default_context_enrichment_prompt: str | None = None,
|
244
25
|
default_summarize_history: bool | None = None,
|
245
26
|
default_history_summarization_token_threshold: int | None = None,
|
246
|
-
|
247
|
-
default_context_enrichment_token_threshold: int | None = None,
|
27
|
+
default_modes: list[str] | None = None,
|
248
28
|
default_model: "Model | None" = None,
|
249
29
|
default_model_settings: "ModelSettings | None" = None,
|
250
30
|
default_model_provider: "Provider | None" = None,
|
251
31
|
):
|
32
|
+
self.__internal_default_prompt: dict[str, str] = {}
|
252
33
|
self._default_model_name = default_model_name
|
253
34
|
self._default_model_base_url = default_base_url
|
254
35
|
self._default_model_api_key = default_api_key
|
@@ -257,47 +38,55 @@ class LLMConfig:
|
|
257
38
|
self._default_interactive_system_prompt = default_interactive_system_prompt
|
258
39
|
self._default_special_instruction_prompt = default_special_instruction_prompt
|
259
40
|
self._default_summarization_prompt = default_summarization_prompt
|
260
|
-
self._default_context_enrichment_prompt = default_context_enrichment_prompt
|
261
41
|
self._default_summarize_history = default_summarize_history
|
262
42
|
self._default_history_summarization_token_threshold = (
|
263
43
|
default_history_summarization_token_threshold
|
264
44
|
)
|
265
|
-
self.
|
266
|
-
self.
|
267
|
-
default_context_enrichment_token_threshold
|
268
|
-
)
|
45
|
+
self._default_modes = default_modes
|
46
|
+
self._default_model = default_model
|
269
47
|
self._default_model_settings = default_model_settings
|
270
48
|
self._default_model_provider = default_model_provider
|
271
|
-
|
49
|
+
|
50
|
+
def _get_internal_default_prompt(self, name: str) -> str:
|
51
|
+
if name not in self.__internal_default_prompt:
|
52
|
+
file_path = os.path.join(
|
53
|
+
os.path.dirname(__file__), "default_prompt", f"{name}.md"
|
54
|
+
)
|
55
|
+
with open(file_path, "r") as f:
|
56
|
+
self.__internal_default_prompt[name] = f.read().strip()
|
57
|
+
return self.__internal_default_prompt[name]
|
58
|
+
|
59
|
+
def _get_property(
|
60
|
+
self,
|
61
|
+
instance_var: Any,
|
62
|
+
config_var: Any,
|
63
|
+
default_func: Callable[[], Any],
|
64
|
+
) -> Any:
|
65
|
+
if instance_var is not None:
|
66
|
+
return instance_var
|
67
|
+
if config_var is not None:
|
68
|
+
return config_var
|
69
|
+
return default_func()
|
272
70
|
|
273
71
|
@property
|
274
72
|
def default_model_name(self) -> str | None:
|
275
|
-
|
276
|
-
return self._default_model_name
|
277
|
-
if CFG.LLM_MODEL is not None:
|
278
|
-
return CFG.LLM_MODEL
|
279
|
-
return None
|
73
|
+
return self._get_property(self._default_model_name, CFG.LLM_MODEL, lambda: None)
|
280
74
|
|
281
75
|
@property
|
282
76
|
def default_model_base_url(self) -> str | None:
|
283
|
-
|
284
|
-
|
285
|
-
|
286
|
-
return CFG.LLM_BASE_URL
|
287
|
-
return None
|
77
|
+
return self._get_property(
|
78
|
+
self._default_model_base_url, CFG.LLM_BASE_URL, lambda: None
|
79
|
+
)
|
288
80
|
|
289
81
|
@property
|
290
82
|
def default_model_api_key(self) -> str | None:
|
291
|
-
|
292
|
-
|
293
|
-
|
294
|
-
return CFG.LLM_API_KEY
|
83
|
+
return self._get_property(
|
84
|
+
self._default_model_api_key, CFG.LLM_API_KEY, lambda: None
|
85
|
+
)
|
295
86
|
|
296
87
|
@property
|
297
88
|
def default_model_settings(self) -> "ModelSettings | None":
|
298
|
-
|
299
|
-
return self._default_model_settings
|
300
|
-
return None
|
89
|
+
return self._get_property(self._default_model_settings, None, lambda: None)
|
301
90
|
|
302
91
|
@property
|
303
92
|
def default_model_provider(self) -> "Provider | str":
|
@@ -313,43 +102,49 @@ class LLMConfig:
|
|
313
102
|
|
314
103
|
@property
|
315
104
|
def default_system_prompt(self) -> str:
|
316
|
-
|
317
|
-
|
318
|
-
|
319
|
-
|
320
|
-
|
105
|
+
return self._get_property(
|
106
|
+
self._default_system_prompt,
|
107
|
+
CFG.LLM_SYSTEM_PROMPT,
|
108
|
+
lambda: self._get_internal_default_prompt("system_prompt"),
|
109
|
+
)
|
321
110
|
|
322
111
|
@property
|
323
112
|
def default_interactive_system_prompt(self) -> str:
|
324
|
-
|
325
|
-
|
326
|
-
|
327
|
-
|
328
|
-
|
113
|
+
return self._get_property(
|
114
|
+
self._default_interactive_system_prompt,
|
115
|
+
CFG.LLM_INTERACTIVE_SYSTEM_PROMPT,
|
116
|
+
lambda: self._get_internal_default_prompt("interactive_system_prompt"),
|
117
|
+
)
|
329
118
|
|
330
119
|
@property
|
331
120
|
def default_persona(self) -> str:
|
332
|
-
|
333
|
-
|
334
|
-
|
335
|
-
|
336
|
-
|
121
|
+
return self._get_property(
|
122
|
+
self._default_persona,
|
123
|
+
CFG.LLM_PERSONA,
|
124
|
+
lambda: self._get_internal_default_prompt("persona"),
|
125
|
+
)
|
126
|
+
|
127
|
+
@property
|
128
|
+
def default_modes(self) -> list[str]:
|
129
|
+
return self._get_property(
|
130
|
+
self._default_modes, CFG.LLM_MODES, lambda: ["coding"]
|
131
|
+
)
|
337
132
|
|
338
133
|
@property
|
339
134
|
def default_special_instruction_prompt(self) -> str:
|
340
|
-
|
341
|
-
|
342
|
-
|
343
|
-
|
344
|
-
|
135
|
+
return self._get_property(
|
136
|
+
self._default_special_instruction_prompt,
|
137
|
+
CFG.LLM_SPECIAL_INSTRUCTION_PROMPT,
|
138
|
+
lambda: "",
|
139
|
+
)
|
345
140
|
|
346
141
|
@property
|
347
142
|
def default_summarization_prompt(self) -> str:
|
348
|
-
|
349
|
-
|
350
|
-
|
351
|
-
|
352
|
-
|
143
|
+
return self._get_property(
|
144
|
+
self._default_summarization_prompt,
|
145
|
+
CFG.LLM_SUMMARIZATION_PROMPT,
|
146
|
+
lambda: self._get_internal_default_prompt("summarization_prompt"),
|
147
|
+
)
|
353
148
|
|
354
149
|
@property
|
355
150
|
def default_model(self) -> "Model | str | None":
|
@@ -367,15 +162,17 @@ class LLMConfig:
|
|
367
162
|
|
368
163
|
@property
|
369
164
|
def default_summarize_history(self) -> bool:
|
370
|
-
|
371
|
-
|
372
|
-
|
165
|
+
return self._get_property(
|
166
|
+
self._default_summarize_history, CFG.LLM_SUMMARIZE_HISTORY, lambda: False
|
167
|
+
)
|
373
168
|
|
374
169
|
@property
|
375
170
|
def default_history_summarization_token_threshold(self) -> int:
|
376
|
-
|
377
|
-
|
378
|
-
|
171
|
+
return self._get_property(
|
172
|
+
self._default_history_summarization_token_threshold,
|
173
|
+
CFG.LLM_HISTORY_SUMMARIZATION_TOKEN_THRESHOLD,
|
174
|
+
lambda: 1000,
|
175
|
+
)
|
379
176
|
|
380
177
|
def set_default_persona(self, persona: str):
|
381
178
|
self._default_persona = persona
|
@@ -389,6 +186,19 @@ class LLMConfig:
|
|
389
186
|
def set_default_special_instruction_prompt(self, special_instruction_prompt: str):
|
390
187
|
self._default_special_instruction_prompt = special_instruction_prompt
|
391
188
|
|
189
|
+
def set_default_modes(self, modes: list[str]):
|
190
|
+
self._default_modes = modes
|
191
|
+
|
192
|
+
def add_default_mode(self, mode: str):
|
193
|
+
if self._default_modes is None:
|
194
|
+
self._default_modes = []
|
195
|
+
self._default_modes.append(mode)
|
196
|
+
|
197
|
+
def remove_default_mode(self, mode: str):
|
198
|
+
if self._default_modes is None:
|
199
|
+
self._default_modes = []
|
200
|
+
self._default_modes.remove(mode)
|
201
|
+
|
392
202
|
def set_default_summarization_prompt(self, summarization_prompt: str):
|
393
203
|
self._default_summarization_prompt = summarization_prompt
|
394
204
|
|
@@ -0,0 +1,74 @@
|
|
1
|
+
import os
|
2
|
+
|
3
|
+
from zrb.config.config import CFG
|
4
|
+
from zrb.config.llm_context.config_handler import LLMContextConfigHandler
|
5
|
+
|
6
|
+
|
7
|
+
def cascading_path_filter(section_path: str, base_path: str) -> bool:
|
8
|
+
"""
|
9
|
+
Returns True if the section path is an ancestor of, the same as the base path,
|
10
|
+
or if the section path is an absolute path.
|
11
|
+
"""
|
12
|
+
return os.path.isabs(section_path) or base_path.startswith(section_path)
|
13
|
+
|
14
|
+
|
15
|
+
class LLMContextConfig:
|
16
|
+
"""High-level API for interacting with cascaded configurations."""
|
17
|
+
|
18
|
+
@property
|
19
|
+
def _context_handler(self):
|
20
|
+
return LLMContextConfigHandler(
|
21
|
+
"Context",
|
22
|
+
config_file_name=CFG.LLM_CONTEXT_FILE,
|
23
|
+
filter_section_func=cascading_path_filter,
|
24
|
+
resolve_section_path=True,
|
25
|
+
)
|
26
|
+
|
27
|
+
@property
|
28
|
+
def _workflow_handler(self):
|
29
|
+
return LLMContextConfigHandler(
|
30
|
+
"Workflow",
|
31
|
+
config_file_name=CFG.LLM_CONTEXT_FILE,
|
32
|
+
resolve_section_path=False,
|
33
|
+
)
|
34
|
+
|
35
|
+
def get_contexts(self, cwd: str | None = None) -> dict[str, str]:
|
36
|
+
"""Gathers all relevant contexts for a given path."""
|
37
|
+
if cwd is None:
|
38
|
+
cwd = os.getcwd()
|
39
|
+
return self._context_handler.get_section(cwd)
|
40
|
+
|
41
|
+
def get_workflows(self, cwd: str | None = None) -> dict[str, str]:
|
42
|
+
"""Gathers all relevant workflows for a given path."""
|
43
|
+
if cwd is None:
|
44
|
+
cwd = os.getcwd()
|
45
|
+
return self._workflow_handler.get_section(cwd)
|
46
|
+
|
47
|
+
def add_to_context(
|
48
|
+
self, content: str, context_path: str | None = None, cwd: str | None = None
|
49
|
+
):
|
50
|
+
"""Adds content to a context block in the nearest configuration file."""
|
51
|
+
if cwd is None:
|
52
|
+
cwd = os.getcwd()
|
53
|
+
if context_path is None:
|
54
|
+
context_path = cwd
|
55
|
+
abs_path = os.path.abspath(context_path)
|
56
|
+
home_dir = os.path.expanduser("~")
|
57
|
+
search_dir = cwd
|
58
|
+
if not abs_path.startswith(home_dir):
|
59
|
+
search_dir = home_dir
|
60
|
+
self._context_handler.add_to_section(content, abs_path, cwd=search_dir)
|
61
|
+
|
62
|
+
def remove_from_context(
|
63
|
+
self, content: str, context_path: str | None = None, cwd: str | None = None
|
64
|
+
) -> bool:
|
65
|
+
"""Removes content from a context block in all relevant config files."""
|
66
|
+
if cwd is None:
|
67
|
+
cwd = os.getcwd()
|
68
|
+
if context_path is None:
|
69
|
+
context_path = cwd
|
70
|
+
abs_path = os.path.abspath(context_path)
|
71
|
+
return self._context_handler.remove_from_section(content, abs_path, cwd=cwd)
|
72
|
+
|
73
|
+
|
74
|
+
llm_context_config = LLMContextConfig()
|