zrb 1.10.1__py3-none-any.whl → 1.11.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- zrb/builtin/llm/chat_session.py +4 -5
- zrb/builtin/llm/tool/file.py +2 -2
- zrb/config/config.py +27 -80
- zrb/config/default_prompt/file_extractor_system_prompt.md +12 -0
- zrb/config/default_prompt/interactive_system_prompt.md +31 -0
- zrb/config/default_prompt/persona.md +1 -0
- zrb/config/default_prompt/repo_extractor_system_prompt.md +112 -0
- zrb/config/default_prompt/repo_summarizer_system_prompt.md +10 -0
- zrb/config/default_prompt/summarization_prompt.md +42 -0
- zrb/config/default_prompt/system_prompt.md +28 -0
- zrb/config/default_workflow/code.md +26 -0
- zrb/config/default_workflow/content.md +19 -0
- zrb/config/default_workflow/research.md +20 -0
- zrb/config/llm_config.py +87 -270
- zrb/config/llm_context/config.py +74 -0
- zrb/config/llm_context/config_handler.py +238 -0
- zrb/context/any_shared_context.py +10 -0
- zrb/context/context.py +8 -0
- zrb/context/shared_context.py +9 -0
- zrb/runner/web_route/task_session_api_route.py +1 -1
- zrb/task/llm/agent.py +2 -2
- zrb/task/llm/conversation_history_model.py +78 -224
- zrb/task/llm/history_summarization.py +6 -6
- zrb/task/llm/prompt.py +32 -18
- zrb/util/llm/prompt.py +42 -6
- {zrb-1.10.1.dist-info → zrb-1.11.0.dist-info}/METADATA +2 -2
- {zrb-1.10.1.dist-info → zrb-1.11.0.dist-info}/RECORD +29 -17
- {zrb-1.10.1.dist-info → zrb-1.11.0.dist-info}/WHEEL +0 -0
- {zrb-1.10.1.dist-info → zrb-1.11.0.dist-info}/entry_points.txt +0 -0
@@ -3,10 +3,11 @@ import os
|
|
3
3
|
from collections.abc import Callable
|
4
4
|
from typing import Any
|
5
5
|
|
6
|
-
from zrb.config.config import
|
6
|
+
from zrb.config.llm_context.config import llm_context_config
|
7
7
|
from zrb.context.any_context import AnyContext
|
8
8
|
from zrb.task.llm.typing import ListOfDict
|
9
|
-
from zrb.util.file import read_file
|
9
|
+
from zrb.util.file import read_file
|
10
|
+
from zrb.util.llm.prompt import make_prompt_section
|
10
11
|
from zrb.util.run import run_async
|
11
12
|
|
12
13
|
|
@@ -81,12 +82,8 @@ class ConversationHistory:
|
|
81
82
|
return None
|
82
83
|
|
83
84
|
def fetch_newest_notes(self):
|
84
|
-
|
85
|
-
|
86
|
-
self.long_term_note = read_file(long_term_note_path)
|
87
|
-
contextual_note_path = self._get_contextual_note_path()
|
88
|
-
if os.path.isfile(contextual_note_path):
|
89
|
-
self.contextual_note = read_file(contextual_note_path)
|
85
|
+
self._fetch_long_term_note()
|
86
|
+
self._fetch_contextual_note()
|
90
87
|
|
91
88
|
@classmethod
|
92
89
|
def parse_and_validate(
|
@@ -137,12 +134,13 @@ class ConversationHistory:
|
|
137
134
|
past_conversation_summary (str): The summary text to store.
|
138
135
|
|
139
136
|
Returns:
|
140
|
-
|
137
|
+
str: A JSON object indicating the success or failure of the operation.
|
141
138
|
|
142
139
|
Raises:
|
143
140
|
Exception: If the summary cannot be written.
|
144
141
|
"""
|
145
142
|
self.past_conversation_summary = past_conversation_summary
|
143
|
+
return json.dumps({"success": True})
|
146
144
|
|
147
145
|
def write_past_conversation_transcript(self, past_conversation_transcript: str):
|
148
146
|
"""
|
@@ -155,284 +153,140 @@ class ConversationHistory:
|
|
155
153
|
past_conversation_transcript (str): The transcript text to store.
|
156
154
|
|
157
155
|
Returns:
|
158
|
-
|
156
|
+
str: A JSON object indicating the success or failure of the operation.
|
159
157
|
|
160
158
|
Raises:
|
161
159
|
Exception: If the transcript cannot be written.
|
162
160
|
"""
|
163
161
|
self.past_conversation_transcript = past_conversation_transcript
|
162
|
+
return json.dumps({"success": True})
|
164
163
|
|
165
|
-
def read_long_term_note(
|
166
|
-
self,
|
167
|
-
start_line: int | None = None,
|
168
|
-
end_line: int | None = None,
|
169
|
-
) -> str:
|
164
|
+
def read_long_term_note(self) -> str:
|
170
165
|
"""
|
171
|
-
Read the content of the long-term
|
166
|
+
Read the content of the long-term references.
|
172
167
|
|
173
168
|
This tool helps you retrieve knowledge or notes stored for long-term reference.
|
174
169
|
If the note does not exist, you may want to create it using the write tool.
|
175
170
|
|
176
|
-
Args:
|
177
|
-
start_line (int, optional): 1-based line number to start reading from.
|
178
|
-
end_line (int, optional): 1-based line number to stop reading at (inclusive).
|
179
|
-
|
180
171
|
Returns:
|
181
|
-
str: JSON with
|
182
|
-
and total lines.
|
172
|
+
str: JSON with content of the notes.
|
183
173
|
|
184
174
|
Raises:
|
185
175
|
Exception: If the note cannot be read.
|
186
|
-
Suggests writing the note if it does not exist.
|
187
176
|
"""
|
188
|
-
return self.
|
189
|
-
self._get_long_term_note_path(),
|
190
|
-
start_line,
|
191
|
-
end_line,
|
192
|
-
note_type="long-term note",
|
193
|
-
)
|
177
|
+
return json.dumps({"content": self._fetch_long_term_note()})
|
194
178
|
|
195
|
-
def
|
179
|
+
def add_long_term_info(self, new_info: str) -> str:
|
196
180
|
"""
|
197
|
-
|
198
|
-
|
199
|
-
Use this tool to create a new long-term note or replace its entire content.
|
200
|
-
Always read the note first to avoid accidental data loss, unless you are sure
|
201
|
-
you want to overwrite.
|
181
|
+
Add new info for long-term reference.
|
202
182
|
|
203
183
|
Args:
|
204
|
-
|
184
|
+
new_info (str): New info to be added into long-term references.
|
205
185
|
|
206
186
|
Returns:
|
207
|
-
str: JSON
|
187
|
+
str: JSON with new content of the notes.
|
208
188
|
|
209
189
|
Raises:
|
210
|
-
Exception: If the note cannot be
|
190
|
+
Exception: If the note cannot be read.
|
211
191
|
"""
|
212
|
-
|
213
|
-
return self.
|
214
|
-
self._get_long_term_note_path(), content, note_type="long-term note"
|
215
|
-
)
|
192
|
+
llm_context_config.add_to_context(new_info, cwd="/")
|
193
|
+
return json.dumps({"success": True, "content": self._fetch_long_term_note()})
|
216
194
|
|
217
|
-
def
|
218
|
-
self,
|
219
|
-
old_string: str,
|
220
|
-
new_string: str,
|
221
|
-
) -> str:
|
195
|
+
def remove_long_term_info(self, irrelevant_info: str) -> str:
|
222
196
|
"""
|
223
|
-
|
224
|
-
|
225
|
-
Use this tool to update a specific part of the long-term note without
|
226
|
-
overwriting the entire content. If the note does not exist, consider writing it
|
227
|
-
first. If the string is not found, check your input or read the note to verify.
|
197
|
+
Remove irrelevant info from long-term reference.
|
228
198
|
|
229
199
|
Args:
|
230
|
-
|
231
|
-
new_string (str): The string to replace with.
|
200
|
+
irrelevant_info (str): Irrelevant info to be removed from long-term references.
|
232
201
|
|
233
202
|
Returns:
|
234
|
-
str: JSON
|
203
|
+
str: JSON with new content of the notes and deletion status.
|
235
204
|
|
236
205
|
Raises:
|
237
|
-
Exception: If the note
|
238
|
-
Suggests writing or reading the note.
|
206
|
+
Exception: If the note cannot be read.
|
239
207
|
"""
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
208
|
+
was_removed = llm_context_config.remove_from_context(irrelevant_info, cwd="/")
|
209
|
+
return json.dumps(
|
210
|
+
{
|
211
|
+
"success": was_removed,
|
212
|
+
"content": self._fetch_long_term_note(),
|
213
|
+
}
|
245
214
|
)
|
246
|
-
self.long_term_note = new_string
|
247
|
-
return result
|
248
215
|
|
249
|
-
def read_contextual_note(
|
250
|
-
self,
|
251
|
-
start_line: int | None = None,
|
252
|
-
end_line: int | None = None,
|
253
|
-
) -> str:
|
216
|
+
def read_contextual_note(self) -> str:
|
254
217
|
"""
|
255
|
-
Read the content of the contextual
|
218
|
+
Read the content of the contextual references.
|
256
219
|
|
257
|
-
This tool helps you retrieve
|
220
|
+
This tool helps you retrieve knowledge or notes stored for contextual reference.
|
258
221
|
If the note does not exist, you may want to create it using the write tool.
|
259
222
|
|
260
|
-
Args:
|
261
|
-
start_line (int, optional): 1-based line number to start reading from.
|
262
|
-
end_line (int, optional): 1-based line number to stop reading at (inclusive).
|
263
|
-
|
264
223
|
Returns:
|
265
|
-
str: JSON with
|
266
|
-
and total lines.
|
224
|
+
str: JSON with content of the notes.
|
267
225
|
|
268
226
|
Raises:
|
269
227
|
Exception: If the note cannot be read.
|
270
|
-
Suggests writing the note if it does not exist.
|
271
228
|
"""
|
272
|
-
return self.
|
273
|
-
self._get_contextual_note_path(),
|
274
|
-
start_line,
|
275
|
-
end_line,
|
276
|
-
note_type="contextual note",
|
277
|
-
)
|
229
|
+
return json.dumps({"content": self._fetch_contextual_note()})
|
278
230
|
|
279
|
-
def
|
231
|
+
def add_contextual_info(self, new_info: str, context_path: str | None) -> str:
|
280
232
|
"""
|
281
|
-
|
282
|
-
|
283
|
-
Use this tool to create a new contextual note or replace its entire content.
|
284
|
-
Always read the note first to avoid accidental data loss, unless you are sure
|
285
|
-
you want to overwrite.
|
233
|
+
Add new info for contextual reference.
|
286
234
|
|
287
235
|
Args:
|
288
|
-
|
236
|
+
new_info (str): New info to be added into contextual references.
|
237
|
+
context_path (str, optional): contextual directory path for new info
|
289
238
|
|
290
239
|
Returns:
|
291
|
-
str: JSON
|
240
|
+
str: JSON with new content of the notes.
|
292
241
|
|
293
242
|
Raises:
|
294
|
-
Exception: If the note cannot be
|
243
|
+
Exception: If the note cannot be read.
|
295
244
|
"""
|
296
|
-
|
297
|
-
|
298
|
-
|
299
|
-
)
|
245
|
+
if context_path is None:
|
246
|
+
context_path = self.project_path
|
247
|
+
llm_context_config.add_to_context(new_info, context_path=context_path)
|
248
|
+
return json.dumps({"success": True, "content": self._fetch_contextual_note()})
|
300
249
|
|
301
|
-
def
|
302
|
-
self,
|
303
|
-
old_string: str,
|
304
|
-
new_string: str,
|
250
|
+
def remove_contextual_info(
|
251
|
+
self, irrelevant_info: str, context_path: str | None
|
305
252
|
) -> str:
|
306
253
|
"""
|
307
|
-
|
308
|
-
|
309
|
-
Use this tool to update a specific part of the contextual note without
|
310
|
-
overwriting the entire content. If the note does not exist, consider writing it
|
311
|
-
first. If the string is not found, check your input or read the note to verify.
|
254
|
+
Remove irrelevant info from contextual reference.
|
312
255
|
|
313
256
|
Args:
|
314
|
-
|
315
|
-
|
257
|
+
irrelevant_info (str): Irrelevant info to be removed from contextual references.
|
258
|
+
context_path (str, optional): contextual directory path of the irrelevant info
|
316
259
|
|
317
260
|
Returns:
|
318
|
-
str: JSON
|
261
|
+
str: JSON with new content of the notes and deletion status.
|
319
262
|
|
320
263
|
Raises:
|
321
|
-
Exception: If the note
|
322
|
-
Suggests writing or reading the note.
|
264
|
+
Exception: If the note cannot be read.
|
323
265
|
"""
|
324
|
-
|
325
|
-
self.
|
326
|
-
|
327
|
-
|
328
|
-
|
266
|
+
if context_path is None:
|
267
|
+
context_path = self.project_path
|
268
|
+
was_removed = llm_context_config.remove_from_context(
|
269
|
+
irrelevant_info, context_path=context_path
|
270
|
+
)
|
271
|
+
return json.dumps(
|
272
|
+
{
|
273
|
+
"success": was_removed,
|
274
|
+
"content": self._fetch_contextual_note(),
|
275
|
+
}
|
329
276
|
)
|
330
|
-
self.contextual_note = new_string
|
331
|
-
return result
|
332
|
-
|
333
|
-
def _get_long_term_note_path(self) -> str:
|
334
|
-
return os.path.abspath(os.path.expanduser(CFG.LLM_LONG_TERM_NOTE_PATH))
|
335
|
-
|
336
|
-
def _get_contextual_note_path(self) -> str:
|
337
|
-
return os.path.join(self.project_path, CFG.LLM_CONTEXTUAL_NOTE_FILE)
|
338
|
-
|
339
|
-
def _read_note(
|
340
|
-
self,
|
341
|
-
path: str,
|
342
|
-
start_line: int | None = None,
|
343
|
-
end_line: int | None = None,
|
344
|
-
note_type: str = "note",
|
345
|
-
) -> str:
|
346
|
-
"""
|
347
|
-
Internal helper to read a note file with line numbers and error handling.
|
348
|
-
"""
|
349
|
-
if not os.path.exists(path):
|
350
|
-
return json.dumps(
|
351
|
-
{
|
352
|
-
"path": path,
|
353
|
-
"content": "",
|
354
|
-
"start_line": 0,
|
355
|
-
"end_line": 0,
|
356
|
-
"total_lines": 0,
|
357
|
-
}
|
358
|
-
)
|
359
|
-
try:
|
360
|
-
content = read_file_with_line_numbers(path)
|
361
|
-
lines = content.splitlines()
|
362
|
-
total_lines = len(lines)
|
363
|
-
start_idx = (start_line - 1) if start_line is not None else 0
|
364
|
-
end_idx = end_line if end_line is not None else total_lines
|
365
|
-
if start_idx < 0:
|
366
|
-
start_idx = 0
|
367
|
-
if end_idx > total_lines:
|
368
|
-
end_idx = total_lines
|
369
|
-
if start_idx > end_idx:
|
370
|
-
start_idx = end_idx
|
371
|
-
selected_lines = lines[start_idx:end_idx]
|
372
|
-
content_result = "\n".join(selected_lines)
|
373
|
-
return json.dumps(
|
374
|
-
{
|
375
|
-
"path": path,
|
376
|
-
"content": content_result,
|
377
|
-
"start_line": start_idx + 1,
|
378
|
-
"end_line": end_idx,
|
379
|
-
"total_lines": total_lines,
|
380
|
-
}
|
381
|
-
)
|
382
|
-
except Exception:
|
383
|
-
raise Exception(
|
384
|
-
f"Failed to read the {note_type}. "
|
385
|
-
f"If the {note_type} does not exist, try writing it first."
|
386
|
-
)
|
387
|
-
|
388
|
-
def _write_note(self, path: str, content: str, note_type: str = "note") -> str:
|
389
|
-
"""
|
390
|
-
Internal helper to write a note file with error handling.
|
391
|
-
"""
|
392
|
-
try:
|
393
|
-
directory = os.path.dirname(path)
|
394
|
-
if directory and not os.path.exists(directory):
|
395
|
-
os.makedirs(directory, exist_ok=True)
|
396
|
-
write_file(path, content)
|
397
|
-
return json.dumps({"success": True, "path": path})
|
398
|
-
except (OSError, IOError):
|
399
|
-
raise Exception(
|
400
|
-
f"Failed to write the {note_type}. "
|
401
|
-
"Please check if the path is correct and you have write permissions."
|
402
|
-
)
|
403
|
-
except Exception:
|
404
|
-
raise Exception(
|
405
|
-
f"Unexpected error while writing the {note_type}. "
|
406
|
-
"Please check your input and try again."
|
407
|
-
)
|
408
277
|
|
409
|
-
def
|
410
|
-
|
411
|
-
|
412
|
-
|
413
|
-
|
414
|
-
|
415
|
-
|
416
|
-
|
417
|
-
|
418
|
-
|
419
|
-
|
420
|
-
|
421
|
-
|
422
|
-
|
423
|
-
|
424
|
-
if old_string not in content:
|
425
|
-
raise Exception(
|
426
|
-
f"The specified string to replace was not found in the {note_type}. ("
|
427
|
-
f"Try reading the {note_type} to verify its content or "
|
428
|
-
f"write a new one if needed)."
|
429
|
-
)
|
430
|
-
new_content = content.replace(old_string, new_string, 1)
|
431
|
-
write_file(path, new_content)
|
432
|
-
return json.dumps({"success": True, "path": path})
|
433
|
-
except Exception:
|
434
|
-
raise Exception(
|
435
|
-
f"Failed to replace content in the {note_type}. ("
|
436
|
-
f"Try reading the {note_type} to verify its content or "
|
437
|
-
"write a new one if needed)."
|
438
|
-
)
|
278
|
+
def _fetch_long_term_note(self):
|
279
|
+
contexts = llm_context_config.get_contexts(cwd=self.project_path)
|
280
|
+
self.long_term_note = contexts.get("/", "")
|
281
|
+
return self.long_term_note
|
282
|
+
|
283
|
+
def _fetch_contextual_note(self):
|
284
|
+
contexts = llm_context_config.get_contexts(cwd=self.project_path)
|
285
|
+
self.contextual_note = "\n".join(
|
286
|
+
[
|
287
|
+
make_prompt_section(header, content)
|
288
|
+
for header, content in contexts.items()
|
289
|
+
if header != "/"
|
290
|
+
]
|
291
|
+
)
|
292
|
+
return self.contextual_note
|
@@ -145,16 +145,16 @@ async def summarize_history(
|
|
145
145
|
tools=[
|
146
146
|
conversation_history.write_past_conversation_summary,
|
147
147
|
conversation_history.write_past_conversation_transcript,
|
148
|
-
conversation_history.read_contextual_note,
|
149
|
-
conversation_history.write_contextual_note,
|
150
|
-
conversation_history.replace_in_contextual_note,
|
151
148
|
conversation_history.read_long_term_note,
|
152
|
-
conversation_history.
|
153
|
-
conversation_history.
|
149
|
+
conversation_history.add_long_term_info,
|
150
|
+
conversation_history.remove_long_term_info,
|
151
|
+
conversation_history.read_contextual_note,
|
152
|
+
conversation_history.add_contextual_info,
|
153
|
+
conversation_history.remove_contextual_info,
|
154
154
|
],
|
155
155
|
)
|
156
156
|
try:
|
157
|
-
ctx.print(stylize_faint("📝 Summarize"), plain=True)
|
157
|
+
ctx.print(stylize_faint("📝 Summarize Conversation >>>"), plain=True)
|
158
158
|
summary_run = await run_agent_iteration(
|
159
159
|
ctx=ctx,
|
160
160
|
agent=summarization_agent,
|
zrb/task/llm/prompt.py
CHANGED
@@ -125,42 +125,56 @@ def get_system_and_user_prompt(
|
|
125
125
|
def extract_conversation_context(user_message: str) -> tuple[str, str]:
|
126
126
|
modified_user_message = user_message
|
127
127
|
# Match “@” + any non-space/comma sequence that contains at least one “/”
|
128
|
-
pattern = r"(?<!\w)@(?=[^,\s]
|
128
|
+
pattern = r"(?<!\w)@(?=[^,\s]*\/)([^,\s]+)"
|
129
129
|
potential_resource_path = re.findall(pattern, user_message)
|
130
130
|
apendixes = []
|
131
|
-
for ref in potential_resource_path:
|
131
|
+
for i, ref in enumerate(potential_resource_path):
|
132
132
|
resource_path = os.path.abspath(os.path.expanduser(ref))
|
133
|
-
|
133
|
+
content = ""
|
134
|
+
ref_type = ""
|
134
135
|
if os.path.isfile(resource_path):
|
135
136
|
content = read_file_with_line_numbers(resource_path)
|
136
|
-
|
137
|
-
make_prompt_section(
|
138
|
-
f"`{ref}` (file path: `{resource_path}`)", content, as_code=True
|
139
|
-
)
|
140
|
-
)
|
141
|
-
# Remove the '@' from the modified user message for valid file paths
|
142
|
-
modified_user_message = modified_user_message.replace(f"@{ref}", ref, 1)
|
137
|
+
ref_type = "file"
|
143
138
|
elif os.path.isdir(resource_path):
|
144
139
|
content = read_dir(resource_path)
|
140
|
+
ref_type = "directory"
|
141
|
+
if content != "":
|
142
|
+
# Replace the @-reference in the user message with the placeholder
|
143
|
+
placeholder = f"[Reference {i+1}: {os.path.basename(ref)}]"
|
144
|
+
modified_user_message = modified_user_message.replace(
|
145
|
+
f"@{ref}", placeholder, 1
|
146
|
+
)
|
145
147
|
apendixes.append(
|
146
148
|
make_prompt_section(
|
147
|
-
f"
|
149
|
+
f"{placeholder} ({ref_type} path: `{resource_path}`)",
|
148
150
|
content,
|
149
151
|
as_code=True,
|
150
152
|
)
|
151
153
|
)
|
152
|
-
# Remove the '@' from the modified user message for valid directory paths
|
153
|
-
modified_user_message = modified_user_message.replace(f"@{ref}", ref, 1)
|
154
154
|
conversation_context = "\n".join(
|
155
155
|
[
|
156
|
-
make_prompt_section(
|
157
|
-
"Current Time", datetime.now(timezone.utc).astimezone().isoformat()
|
158
|
-
),
|
159
|
-
make_prompt_section("Current Working Directory", os.getcwd()),
|
160
156
|
make_prompt_section("Current OS", platform.system()),
|
161
157
|
make_prompt_section("OS Version", platform.version()),
|
162
158
|
make_prompt_section("Python Version", platform.python_version()),
|
163
|
-
|
159
|
+
]
|
160
|
+
)
|
161
|
+
iso_date = datetime.now(timezone.utc).astimezone().isoformat()
|
162
|
+
current_directory = os.getcwd()
|
163
|
+
modified_user_message = "\n".join(
|
164
|
+
[
|
165
|
+
make_prompt_section("User Message", modified_user_message),
|
166
|
+
make_prompt_section(
|
167
|
+
"Context",
|
168
|
+
"\n".join(
|
169
|
+
[
|
170
|
+
make_prompt_section(
|
171
|
+
"Current working directory", current_directory
|
172
|
+
),
|
173
|
+
make_prompt_section("Current time", iso_date),
|
174
|
+
make_prompt_section("Apendixes", "\n".join(apendixes)),
|
175
|
+
]
|
176
|
+
),
|
177
|
+
),
|
164
178
|
]
|
165
179
|
)
|
166
180
|
return conversation_context, modified_user_message
|
zrb/util/llm/prompt.py
CHANGED
@@ -2,17 +2,53 @@ import re
|
|
2
2
|
|
3
3
|
|
4
4
|
def _demote_markdown_headers(md: str) -> str:
|
5
|
-
|
6
|
-
|
7
|
-
|
5
|
+
lines = md.split("\n")
|
6
|
+
new_lines = []
|
7
|
+
fence_stack = []
|
8
|
+
for line in lines:
|
9
|
+
stripped_line = line.strip()
|
10
|
+
fence_match = re.match(r"^([`~]{3,})", stripped_line)
|
8
11
|
|
9
|
-
|
10
|
-
|
12
|
+
if fence_match:
|
13
|
+
current_fence = fence_match.group(1)
|
14
|
+
# If stack is not empty and we found a closing fence
|
15
|
+
if (
|
16
|
+
fence_stack
|
17
|
+
and fence_stack[-1][0] == current_fence[0]
|
18
|
+
and len(current_fence) >= len(fence_stack[-1])
|
19
|
+
):
|
20
|
+
fence_stack.pop()
|
21
|
+
else:
|
22
|
+
fence_stack.append(current_fence)
|
23
|
+
new_lines.append(line)
|
24
|
+
else:
|
25
|
+
if fence_stack: # If we are inside a code block
|
26
|
+
new_lines.append(line)
|
27
|
+
else:
|
28
|
+
match = re.match(r"^(#{1,6})(\s)", line)
|
29
|
+
if match:
|
30
|
+
new_lines.append("#" + line)
|
31
|
+
else:
|
32
|
+
new_lines.append(line)
|
33
|
+
return "\n".join(new_lines)
|
11
34
|
|
12
35
|
|
13
36
|
def make_prompt_section(header: str, content: str, as_code: bool = False) -> str:
|
14
37
|
if content.strip() == "":
|
15
38
|
return ""
|
16
39
|
if as_code:
|
17
|
-
|
40
|
+
# Find the longest sequence of backticks in the content
|
41
|
+
longest_backtick_sequence = 0
|
42
|
+
# Use finditer to find all occurrences of backticks
|
43
|
+
for match in re.finditer(r"`+", content):
|
44
|
+
longest_backtick_sequence = max(
|
45
|
+
longest_backtick_sequence, len(match.group(0))
|
46
|
+
)
|
47
|
+
|
48
|
+
# The fence should be one longer than the longest sequence found
|
49
|
+
fence_len = 4
|
50
|
+
if longest_backtick_sequence >= fence_len:
|
51
|
+
fence_len = longest_backtick_sequence + 1
|
52
|
+
fence = "`" * fence_len
|
53
|
+
return f"# {header}\n{fence}\n{content.strip()}\n{fence}\n"
|
18
54
|
return f"# {header}\n{_demote_markdown_headers(content.strip())}\n"
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: zrb
|
3
|
-
Version: 1.
|
3
|
+
Version: 1.11.0
|
4
4
|
Summary: Your Automation Powerhouse
|
5
5
|
Home-page: https://github.com/state-alchemists/zrb
|
6
6
|
License: AGPL-3.0-or-later
|
@@ -27,7 +27,7 @@ Requires-Dist: pdfplumber (>=0.11.6,<0.12.0) ; extra == "rag" or extra == "all"
|
|
27
27
|
Requires-Dist: playwright (>=1.53.0,<2.0.0) ; extra == "playwright" or extra == "all"
|
28
28
|
Requires-Dist: prompt-toolkit (>=3.0.51,<4.0.0)
|
29
29
|
Requires-Dist: psutil (>=7.0.0,<8.0.0)
|
30
|
-
Requires-Dist: pydantic-ai (>=0.
|
30
|
+
Requires-Dist: pydantic-ai (>=0.4.4,<0.5.0)
|
31
31
|
Requires-Dist: pyjwt (>=2.10.1,<3.0.0)
|
32
32
|
Requires-Dist: python-dotenv (>=1.1.1,<2.0.0)
|
33
33
|
Requires-Dist: python-jose[cryptography] (>=3.4.0,<4.0.0)
|