zrb 1.9.16__py3-none-any.whl → 1.10.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
zrb/task/llm/prompt.py CHANGED
@@ -1,7 +1,15 @@
1
+ import os
2
+ import platform
3
+ import re
4
+ from datetime import datetime, timezone
5
+
1
6
  from zrb.attr.type import StrAttr
2
7
  from zrb.config.llm_config import llm_config as llm_config
3
8
  from zrb.context.any_context import AnyContext
9
+ from zrb.task.llm.conversation_history_model import ConversationHistory
4
10
  from zrb.util.attr import get_attr, get_str_attr
11
+ from zrb.util.file import read_dir, read_file_with_line_numbers
12
+ from zrb.util.llm.prompt import make_prompt_section
5
13
 
6
14
 
7
15
  def get_persona(
@@ -52,26 +60,110 @@ def get_special_instruction_prompt(
52
60
  return llm_config.default_special_instruction_prompt
53
61
 
54
62
 
55
- def get_combined_system_prompt(
63
+ def get_system_and_user_prompt(
56
64
  ctx: AnyContext,
57
- persona_attr: StrAttr | None,
58
- system_prompt_attr: StrAttr | None,
59
- special_instruction_prompt_attr: StrAttr | None,
60
- ) -> str:
65
+ user_message: str,
66
+ persona_attr: StrAttr | None = None,
67
+ system_prompt_attr: StrAttr | None = None,
68
+ special_instruction_prompt_attr: StrAttr | None = None,
69
+ conversation_history: ConversationHistory | None = None,
70
+ ) -> tuple[str, str]:
61
71
  """Combines persona, base system prompt, and special instructions."""
62
72
  persona = get_persona(ctx, persona_attr)
63
73
  base_system_prompt = get_base_system_prompt(ctx, system_prompt_attr)
64
74
  special_instruction = get_special_instruction_prompt(
65
75
  ctx, special_instruction_prompt_attr
66
76
  )
67
- parts = []
68
- if persona:
69
- parts.append(persona)
70
- if base_system_prompt:
71
- parts.append(base_system_prompt)
72
- if special_instruction:
73
- parts.append(special_instruction)
74
- return "\n\n".join(parts).strip()
77
+ if conversation_history is None:
78
+ conversation_history = ConversationHistory()
79
+ conversation_context, new_user_message = extract_conversation_context(user_message)
80
+ new_system_prompt = "\n".join(
81
+ [
82
+ make_prompt_section("Persona", persona),
83
+ make_prompt_section("System Prompt", base_system_prompt),
84
+ make_prompt_section("Special Instruction", special_instruction),
85
+ make_prompt_section(
86
+ "Past Conversation",
87
+ "\n".join(
88
+ [
89
+ make_prompt_section(
90
+ "Summary",
91
+ conversation_history.past_conversation_summary,
92
+ as_code=True,
93
+ ),
94
+ make_prompt_section(
95
+ "Last Transcript",
96
+ conversation_history.past_conversation_transcript,
97
+ as_code=True,
98
+ ),
99
+ ]
100
+ ),
101
+ ),
102
+ make_prompt_section(
103
+ "Notes",
104
+ "\n".join(
105
+ [
106
+ make_prompt_section(
107
+ "Long Term",
108
+ conversation_history.long_term_note,
109
+ as_code=True,
110
+ ),
111
+ make_prompt_section(
112
+ "Contextual",
113
+ conversation_history.contextual_note,
114
+ as_code=True,
115
+ ),
116
+ ]
117
+ ),
118
+ ),
119
+ make_prompt_section("Conversation Context", conversation_context),
120
+ ]
121
+ )
122
+ return new_system_prompt, new_user_message
123
+
124
+
125
+ def extract_conversation_context(user_message: str) -> tuple[str, str]:
126
+ modified_user_message = user_message
127
+ # Match “@” + any non-space/comma sequence that contains at least one “/”
128
+ pattern = r"(?<!\w)@(?=[^,\s]*/)([^,\s]+)"
129
+ potential_resource_path = re.findall(pattern, user_message)
130
+ apendixes = []
131
+ for ref in potential_resource_path:
132
+ resource_path = os.path.abspath(os.path.expanduser(ref))
133
+ print("RESOURCE PATH", resource_path)
134
+ if os.path.isfile(resource_path):
135
+ content = read_file_with_line_numbers(resource_path)
136
+ apendixes.append(
137
+ make_prompt_section(
138
+ f"`{ref}` (file path: `{resource_path}`)", content, as_code=True
139
+ )
140
+ )
141
+ # Remove the '@' from the modified user message for valid file paths
142
+ modified_user_message = modified_user_message.replace(f"@{ref}", ref, 1)
143
+ elif os.path.isdir(resource_path):
144
+ content = read_dir(resource_path)
145
+ apendixes.append(
146
+ make_prompt_section(
147
+ f"`{ref}` (directory path: `{resource_path}`)",
148
+ content,
149
+ as_code=True,
150
+ )
151
+ )
152
+ # Remove the '@' from the modified user message for valid directory paths
153
+ modified_user_message = modified_user_message.replace(f"@{ref}", ref, 1)
154
+ conversation_context = "\n".join(
155
+ [
156
+ make_prompt_section(
157
+ "Current Time", datetime.now(timezone.utc).astimezone().isoformat()
158
+ ),
159
+ make_prompt_section("Current Working Directory", os.getcwd()),
160
+ make_prompt_section("Current OS", platform.system()),
161
+ make_prompt_section("OS Version", platform.version()),
162
+ make_prompt_section("Python Version", platform.python_version()),
163
+ make_prompt_section("Apendixes", "\n".join(apendixes)),
164
+ ]
165
+ )
166
+ return conversation_context, modified_user_message
75
167
 
76
168
 
77
169
  def get_user_message(
@@ -85,7 +177,7 @@ def get_user_message(
85
177
  )
86
178
 
87
179
 
88
- def get_summarization_prompt(
180
+ def get_summarization_system_prompt(
89
181
  ctx: AnyContext,
90
182
  summarization_prompt_attr: StrAttr | None,
91
183
  ) -> str:
zrb/task/llm_task.py CHANGED
@@ -15,19 +15,17 @@ from zrb.task.llm.config import (
15
15
  get_model,
16
16
  get_model_settings,
17
17
  )
18
- from zrb.task.llm.context import extract_default_context
19
- from zrb.task.llm.context_enrichment import maybe_enrich_context
20
- from zrb.task.llm.history import (
21
- ConversationHistoryData,
18
+ from zrb.task.llm.conversation_history import (
22
19
  ListOfDict,
23
20
  read_conversation_history,
24
21
  write_conversation_history,
25
22
  )
23
+ from zrb.task.llm.conversation_history_model import ConversationHistory
26
24
  from zrb.task.llm.history_summarization import maybe_summarize_history
27
25
  from zrb.task.llm.prompt import (
28
- get_combined_system_prompt,
29
26
  get_context_enrichment_prompt,
30
- get_summarization_prompt,
27
+ get_summarization_system_prompt,
28
+ get_system_and_user_prompt,
31
29
  get_user_message,
32
30
  )
33
31
  from zrb.util.cli.style import stylize_faint
@@ -85,17 +83,17 @@ class LLMTask(BaseTask):
85
83
  list["MCPServer"] | Callable[[AnySharedContext], list["MCPServer"]]
86
84
  ) = [],
87
85
  conversation_history: (
88
- ConversationHistoryData
89
- | Callable[[AnySharedContext], ConversationHistoryData | dict | list]
86
+ ConversationHistory
87
+ | Callable[[AnySharedContext], ConversationHistory | dict | list]
90
88
  | dict
91
89
  | list
92
- ) = ConversationHistoryData(),
90
+ ) = ConversationHistory(),
93
91
  conversation_history_reader: (
94
- Callable[[AnySharedContext], ConversationHistoryData | dict | list | None]
92
+ Callable[[AnySharedContext], ConversationHistory | dict | list | None]
95
93
  | None
96
94
  ) = None,
97
95
  conversation_history_writer: (
98
- Callable[[AnySharedContext, ConversationHistoryData], None] | None
96
+ Callable[[AnySharedContext, ConversationHistory], None] | None
99
97
  ) = None,
100
98
  conversation_history_file: StrAttr | None = None,
101
99
  render_history_file: bool = True,
@@ -226,55 +224,57 @@ class LLMTask(BaseTask):
226
224
  model_api_key_attr=self._model_api_key,
227
225
  render_model_api_key=self._render_model_api_key,
228
226
  )
229
- context_enrichment_prompt = get_context_enrichment_prompt(
230
- ctx=ctx,
231
- context_enrichment_prompt_attr=self._context_enrichment_prompt,
232
- )
233
- summarization_prompt = get_summarization_prompt(
227
+ summarization_prompt = get_summarization_system_prompt(
234
228
  ctx=ctx,
235
229
  summarization_prompt_attr=self._summarization_prompt,
236
230
  )
237
231
  user_message = get_user_message(ctx, self._message, self._render_message)
238
- # Get the combined system prompt using the new getter
239
- system_prompt = get_combined_system_prompt(
240
- ctx=ctx,
241
- persona_attr=self._persona,
242
- system_prompt_attr=self._system_prompt,
243
- special_instruction_prompt_attr=self._special_instruction_prompt,
244
- )
245
232
  # 1. Prepare initial state (read history from previous session)
246
- history_data = await read_conversation_history(
233
+ conversation_history = await read_conversation_history(
247
234
  ctx=ctx,
248
235
  conversation_history_reader=self._conversation_history_reader,
249
236
  conversation_history_file_attr=self._conversation_history_file,
250
237
  render_history_file=self._render_history_file,
251
238
  conversation_history_attr=self._conversation_history,
252
239
  )
253
- history_list = history_data.history
254
- long_term_context = history_data.long_term_context
255
- conversation_summary = history_data.conversation_summary
256
-
257
- # 2. Enrich context and summarize history sequentially
258
- new_long_term_context = await maybe_enrich_context(
240
+ conversation_history.fetch_newest_notes()
241
+ # 2. Get system prompt and user prompt
242
+ system_prompt, user_message = get_system_and_user_prompt(
243
+ ctx=ctx,
244
+ user_message=user_message,
245
+ persona_attr=self._persona,
246
+ system_prompt_attr=self._system_prompt,
247
+ special_instruction_prompt_attr=self._special_instruction_prompt,
248
+ conversation_history=conversation_history,
249
+ )
250
+ # 3. Get the agent instance
251
+ agent = get_agent(
259
252
  ctx=ctx,
260
- history_list=history_list,
261
- long_term_context=long_term_context,
262
- should_enrich_context_attr=self._should_enrich_context,
263
- render_enrich_context=self._render_enrich_context,
264
- context_enrichment_token_threshold_attr=self._context_enrichment_token_threshold,
265
- render_context_enrichment_token_threshold=self._render_context_enrichment_token_threshold, # noqa
253
+ agent_attr=self._agent,
266
254
  model=model,
255
+ system_prompt=system_prompt,
267
256
  model_settings=model_settings,
268
- context_enrichment_prompt=context_enrichment_prompt,
269
- rate_limitter=self._rate_limitter,
257
+ tools_attr=self._tools,
258
+ additional_tools=self._additional_tools,
259
+ mcp_servers_attr=self._mcp_servers,
260
+ additional_mcp_servers=self._additional_mcp_servers,
270
261
  )
271
- new_history_list, new_conversation_summary = await maybe_summarize_history(
262
+ # 4. Run the agent iteration and save the results/history
263
+ result = await self._execute_agent(
264
+ ctx,
265
+ agent,
266
+ user_message,
267
+ conversation_history,
268
+ )
269
+ # 5. Summarize
270
+ conversation_history = await maybe_summarize_history(
272
271
  ctx=ctx,
273
- history_list=history_list,
274
- conversation_summary=conversation_summary,
272
+ conversation_history=conversation_history,
275
273
  should_summarize_history_attr=self._should_summarize_history,
276
274
  render_summarize_history=self._render_summarize_history,
277
- history_summarization_token_threshold_attr=self._history_summarization_token_threshold, # noqa
275
+ history_summarization_token_threshold_attr=(
276
+ self._history_summarization_token_threshold
277
+ ),
278
278
  render_history_summarization_token_threshold=(
279
279
  self._render_history_summarization_token_threshold
280
280
  ),
@@ -283,50 +283,22 @@ class LLMTask(BaseTask):
283
283
  summarization_prompt=summarization_prompt,
284
284
  rate_limitter=self._rate_limitter,
285
285
  )
286
-
287
- # 3. Build the final user prompt and system prompt
288
- final_user_prompt, system_info = extract_default_context(user_message)
289
- context_parts = [
290
- f"## System Information\n{json.dumps(system_info, indent=2)}",
291
- ]
292
- if new_long_term_context:
293
- context_parts.append(new_long_term_context)
294
- if new_conversation_summary:
295
- context_parts.append(new_conversation_summary)
296
-
297
- final_system_prompt = "\n\n".join(
298
- [system_prompt, "# Context", "\n\n---\n\n".join(context_parts)]
299
- )
300
- # 4. Get the agent instance
301
- agent = get_agent(
286
+ # 6. Write conversation history
287
+ await write_conversation_history(
302
288
  ctx=ctx,
303
- agent_attr=self._agent,
304
- model=model,
305
- system_prompt=final_system_prompt,
306
- model_settings=model_settings,
307
- tools_attr=self._tools,
308
- additional_tools=self._additional_tools,
309
- mcp_servers_attr=self._mcp_servers,
310
- additional_mcp_servers=self._additional_mcp_servers,
311
- )
312
- # 5. Run the agent iteration and save the results/history
313
- return await self._run_agent_and_save_history(
314
- ctx,
315
- agent,
316
- final_user_prompt,
317
- new_history_list,
318
- new_long_term_context,
319
- new_conversation_summary,
289
+ history_data=conversation_history,
290
+ conversation_history_writer=self._conversation_history_writer,
291
+ conversation_history_file_attr=self._conversation_history_file,
292
+ render_history_file=self._render_history_file,
320
293
  )
294
+ return result
321
295
 
322
- async def _run_agent_and_save_history(
296
+ async def _execute_agent(
323
297
  self,
324
298
  ctx: AnyContext,
325
299
  agent: "Agent",
326
300
  user_prompt: str,
327
- history_list: ListOfDict,
328
- long_term_context: str,
329
- conversation_summary: str,
301
+ conversation_history: ConversationHistory,
330
302
  ) -> Any:
331
303
  """Executes the agent, processes results, and saves history."""
332
304
  try:
@@ -334,23 +306,12 @@ class LLMTask(BaseTask):
334
306
  ctx=ctx,
335
307
  agent=agent,
336
308
  user_prompt=user_prompt,
337
- history_list=history_list,
309
+ history_list=conversation_history.history,
338
310
  rate_limitter=self._rate_limitter,
339
311
  )
340
312
  if agent_run and agent_run.result:
341
313
  new_history_list = json.loads(agent_run.result.all_messages_json())
342
- data_to_write = ConversationHistoryData(
343
- long_term_context=long_term_context,
344
- conversation_summary=conversation_summary,
345
- history=new_history_list,
346
- )
347
- await write_conversation_history(
348
- ctx=ctx,
349
- history_data=data_to_write,
350
- conversation_history_writer=self._conversation_history_writer,
351
- conversation_history_file_attr=self._conversation_history_file,
352
- render_history_file=self._render_history_file,
353
- )
314
+ conversation_history.history = new_history_list
354
315
  xcom_usage_key = f"{self.name}-usage"
355
316
  if xcom_usage_key not in ctx.xcom:
356
317
  ctx.xcom[xcom_usage_key] = Xcom([])
zrb/util/llm/prompt.py ADDED
@@ -0,0 +1,18 @@
1
+ import re
2
+
3
+
4
+ def _demote_markdown_headers(md: str) -> str:
5
+ def demote(match):
6
+ hashes = match.group(1)
7
+ return "#" + hashes + match.group(2) # add one `#`
8
+
9
+ # Replace headers at the beginning of a line
10
+ return re.sub(r"^(#{1,6})(\s)", demote, md, flags=re.MULTILINE)
11
+
12
+
13
+ def make_prompt_section(header: str, content: str, as_code: bool = False) -> str:
14
+ if content.strip() == "":
15
+ return ""
16
+ if as_code:
17
+ return f"# {header}\n````\n{content.strip()}\n````\n"
18
+ return f"# {header}\n{_demote_markdown_headers(content.strip())}\n"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: zrb
3
- Version: 1.9.16
3
+ Version: 1.10.0
4
4
  Summary: Your Automation Powerhouse
5
5
  Home-page: https://github.com/state-alchemists/zrb
6
6
  License: AGPL-3.0-or-later
@@ -1,4 +1,4 @@
1
- zrb/__init__.py,sha256=Rp5YD2qgKmUiz-f53JHYpxnHcAByrLOrCyjue6LbAMk,5101
1
+ zrb/__init__.py,sha256=W0Wz3UcnNf5oE-C0i5wkDWPISEktXv-ea-FCFO8zf2g,5114
2
2
  zrb/__main__.py,sha256=9SXH9MK4PVyU9lkEyHxiIUABbcsV2wseP94HmlqTR4M,2657
3
3
  zrb/attr/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  zrb/attr/type.py,sha256=4TV5gPYMMrKh5V-yB6iRYKCbsXAH_AvGXMsjxKLHcUs,568
@@ -10,17 +10,17 @@ zrb/builtin/group.py,sha256=t008xLM4_fgbjfZrPoi_fQAnSHIo6MOiQSCHBO4GDYU,2379
10
10
  zrb/builtin/http.py,sha256=sLqEczuSxGYXWzyJR6frGOHkPTviu4BeyroUr3-ZuAI,4322
11
11
  zrb/builtin/jwt.py,sha256=3M5uaQhJZbKQLjTUft1OwPz_JxtmK-xtkjxWjciOQho,2859
12
12
  zrb/builtin/llm/chat_session.py,sha256=0R04DpBr_LGfNJbXIQ_4XQSxL7kY2M3U-bbu5lsXZ54,8542
13
- zrb/builtin/llm/history.py,sha256=jCMeRCHUsDFnQWyDoH9SOBptzceOs_wACvVpYkDOoTk,3086
13
+ zrb/builtin/llm/history.py,sha256=k4wY0anietrF5iEUVuYAAYGbhjs4TBo03goXzCk_pRM,3091
14
14
  zrb/builtin/llm/input.py,sha256=Nw-26uTWp2QhUgKJcP_IMHmtk-b542CCSQ_vCOjhvhM,877
15
15
  zrb/builtin/llm/llm_ask.py,sha256=oozfQwa1i2PnXV4qWbn60Pmd3fS0kgmhYCbfKlhr25o,4549
16
16
  zrb/builtin/llm/previous-session.js,sha256=xMKZvJoAbrwiyHS0OoPrWuaKxWYLoyR5sguePIoCjTY,816
17
17
  zrb/builtin/llm/tool/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
18
18
  zrb/builtin/llm/tool/api.py,sha256=OhmfLc2TwWKQYIMweGelqb5s4JF4nB-YynbSO4yb_Jk,2342
19
- zrb/builtin/llm/tool/cli.py,sha256=QqIil29dVOjbTxwb9Gib4KhlaJcOcto-OxEX5hHmA1s,1377
19
+ zrb/builtin/llm/tool/cli.py,sha256=dUWZrW2X5J_lONuzR__6-SbewSdi28E3RRuksjd4mWo,1234
20
20
  zrb/builtin/llm/tool/code.py,sha256=GRP_IZAkeL6RIlUm407BQRF992ES57pdzPaQdC5UsJU,8218
21
- zrb/builtin/llm/tool/file.py,sha256=qoQh5C0RPlQcIoLJp_nT16-w3FAekj7YtIdtsjigARg,22290
21
+ zrb/builtin/llm/tool/file.py,sha256=vUpkHPJHszdFKWjsh5Ma8_WGFwZMcm1nlJ-rGCIA_tI,22290
22
22
  zrb/builtin/llm/tool/rag.py,sha256=wB74JV7bxs0ec77b_09Z2lPjoR1WzPUvZbuXOdb9Q9g,9675
23
- zrb/builtin/llm/tool/sub_agent.py,sha256=7Awa9dpXqtJAZhxyXaKeZv5oIE2N_OqXhAbNmsOG49Y,4951
23
+ zrb/builtin/llm/tool/sub_agent.py,sha256=UWBLiuCK6FT8Ku0yPfSxd_k67h_Pme1K7d2VSABacjQ,4855
24
24
  zrb/builtin/llm/tool/web.py,sha256=gQlUsmYCJOFJtNjwpjK-xk13LMvrMSpSaFHXUTnIayQ,7090
25
25
  zrb/builtin/md5.py,sha256=690RV2LbW7wQeTFxY-lmmqTSVEEZv3XZbjEUW1Q3XpE,1480
26
26
  zrb/builtin/project/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -217,9 +217,9 @@ zrb/callback/callback.py,sha256=PFhCqzfxdk6IAthmXcZ13DokT62xtBzJr_ciLw6I8Zg,4030
217
217
  zrb/cmd/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
218
218
  zrb/cmd/cmd_result.py,sha256=L8bQJzWCpcYexIxHBNsXj2pT3BtLmWex0iJSMkvimOA,597
219
219
  zrb/cmd/cmd_val.py,sha256=7Doowyg6BK3ISSGBLt-PmlhzaEkBjWWm51cED6fAUOQ,1014
220
- zrb/config/config.py,sha256=UpVm_IFD_bSfGS-QJoRo86xV63eGIuIwWICMaUZgR00,15268
221
- zrb/config/llm_config.py,sha256=YxMV_rRY8SU57aq4xlRiMm223yMzfQUzADmqAyoNRTI,17415
222
- zrb/config/llm_rate_limitter.py,sha256=0U0qm4qgCWqBjohPdwANNUzLR3joJCFYr6oW6Xpccfo,4436
220
+ zrb/config/config.py,sha256=vUmJWQHRgmMVL1FmoukjQe9J6vCuyzqBIg4FncREGmw,15108
221
+ zrb/config/llm_config.py,sha256=ROGDX3LKE5YI9JkhcMUO0E-ffscj716PAN4b7u1EFRY,20676
222
+ zrb/config/llm_rate_limitter.py,sha256=P4vR7qxwiGwjlKx2kHcfdIxwGbJB98vdN-UQEH-Q2WU,4894
223
223
  zrb/config/web_auth_config.py,sha256=_PXatQTYh2mX9H3HSYSQKp13zm1RlLyVIoeIr6KYMQ8,6279
224
224
  zrb/content_transformer/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
225
225
  zrb/content_transformer/any_content_transformer.py,sha256=v8ZUbcix1GGeDQwB6OKX_1TjpY__ksxWVeqibwa_iZA,850
@@ -339,16 +339,15 @@ zrb/task/http_check.py,sha256=Gf5rOB2Se2EdizuN9rp65HpGmfZkGc-clIAlHmPVehs,2565
339
339
  zrb/task/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
340
340
  zrb/task/llm/agent.py,sha256=BZHbz-YXgSdm1tTwGMR_maqcd3yMFGSdzLyDjuxT_XI,6702
341
341
  zrb/task/llm/config.py,sha256=TlyH925_fboIlK2Ixf34tynmenqs9s9rfsnPs4jff78,3490
342
- zrb/task/llm/context.py,sha256=LGGQ_mb1dWorfshHjGgXEW_pRweGj-6MZcIUFq3AHy4,2213
343
- zrb/task/llm/context_enrichment.py,sha256=djY4fE9C0zUxJPrrb2xDboBXr_2kPUS_b4HjqslVpHg,6051
342
+ zrb/task/llm/conversation_history.py,sha256=B_PDWYL_q66s0xwWBzMSomqPN6u3gkXlIeXBD5A0Apg,4416
343
+ zrb/task/llm/conversation_history_model.py,sha256=Zbz7w0M2FeOklWG4AVz8C2y2AZ87aNMZ5I0YbBanO0Y,16312
344
344
  zrb/task/llm/error.py,sha256=QR-nIohS6pBpC_16cWR-fw7Mevo1sNYAiXMBsh_CJDE,4157
345
- zrb/task/llm/history.py,sha256=dqCwJYRoMVrvyMHb4M6-KLcwAUF4tbDCP7qgD1h694s,8540
346
- zrb/task/llm/history_summarization.py,sha256=V0G1BiISnxxmD8040PrvT0_dfqGE7zbLtk74KUpuqig,6050
345
+ zrb/task/llm/history_summarization.py,sha256=vY2_iLULgSNTaqW1xJqOhI8oOH3vNEsZn_yNcx6jYX8,8104
347
346
  zrb/task/llm/print_node.py,sha256=zocTKi9gZDxl2I6KNu095TmMc13Yip6SNuWYnswS680,4060
348
- zrb/task/llm/prompt.py,sha256=qhR8qS8RgaQ23D3amaHSHnBNv_NOnFB_1uxaQNc8KFw,3417
347
+ zrb/task/llm/prompt.py,sha256=ARyJ2Q0G6N7I2X2KZfLQW45yj9nmzl5SfX90rrY7TVU,7417
349
348
  zrb/task/llm/tool_wrapper.py,sha256=8_bL8m_WpRf-pVKSrvQIVqT-m2sUA87a1RBQG13lhp4,6457
350
349
  zrb/task/llm/typing.py,sha256=c8VAuPBw_4A3DxfYdydkgedaP-LU61W9_wj3m3CAX1E,58
351
- zrb/task/llm_task.py,sha256=Vq2kPnE40xJZtHYHjeCBv-nNFKzSCkyMJaVUNXmEmuc,15616
350
+ zrb/task/llm_task.py,sha256=TTYb9FYqZX_OIgDE6q5Z9IVuM6NcsKFeCVIi6ovQDE8,13712
352
351
  zrb/task/make_task.py,sha256=PD3b_aYazthS8LHeJsLAhwKDEgdurQZpymJDKeN60u0,2265
353
352
  zrb/task/rsync_task.py,sha256=WfqNSaicJgYWpunNU34eYxXDqHDHOftuDHyWJKjqwg0,6365
354
353
  zrb/task/scaffolder.py,sha256=rME18w1HJUHXgi9eTYXx_T2G4JdqDYzBoNOkdOOo5-o,6806
@@ -383,6 +382,7 @@ zrb/util/git_subtree.py,sha256=AyQWCWEi2EIzEpYXRnYN55157KMUql0WHj70QNw5PHU,4612
383
382
  zrb/util/git_subtree_model.py,sha256=P_gJ0zhOAc3gFM6sYcjc0Ack9dFBt75TI5fXdE0q320,871
384
383
  zrb/util/group.py,sha256=T82yr3qg9I5k10VPXkMyrIRIqyfzadSH813bqzwKEPI,4718
385
384
  zrb/util/init_path.py,sha256=9eN7CkWNGhDBpjTQs2j9YHVMzui7Y8DEb1WP4aTPzeo,659
385
+ zrb/util/llm/prompt.py,sha256=tJEGV2X7v13b1PXUzRXzu1e1HnY6d9JLtqbUGiZqHoo,573
386
386
  zrb/util/load.py,sha256=DK0KYSlu48HCoGPqnW1IxnE3pHrZSPCstfz8Fjyqqv8,2140
387
387
  zrb/util/run.py,sha256=vu-mcSWDP_WuuvIKqM_--Gk3WkABO1oTXiHmBRTvVQk,546
388
388
  zrb/util/string/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -393,7 +393,7 @@ zrb/util/todo.py,sha256=r9_KYF2-hLKMNjsp6AFK9zivykMrywd-kJ4bCwfdafI,19323
393
393
  zrb/util/todo_model.py,sha256=hhzAX-uFl5rsg7iVX1ULlJOfBtblwQ_ieNUxBWfc-Os,1670
394
394
  zrb/xcom/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
395
395
  zrb/xcom/xcom.py,sha256=o79rxR9wphnShrcIushA0Qt71d_p3ZTxjNf7x9hJB78,1571
396
- zrb-1.9.16.dist-info/METADATA,sha256=mDFOvi0SF15P4y4BKlynOsHZrQJClrZC2dNeTB-Xzz0,9778
397
- zrb-1.9.16.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
398
- zrb-1.9.16.dist-info/entry_points.txt,sha256=-Pg3ElWPfnaSM-XvXqCxEAa-wfVI6BEgcs386s8C8v8,46
399
- zrb-1.9.16.dist-info/RECORD,,
396
+ zrb-1.10.0.dist-info/METADATA,sha256=L0WbyLS9Ormz797L5aHD2ycfplf___3mDYqx05H_Eqs,9778
397
+ zrb-1.10.0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
398
+ zrb-1.10.0.dist-info/entry_points.txt,sha256=-Pg3ElWPfnaSM-XvXqCxEAa-wfVI6BEgcs386s8C8v8,46
399
+ zrb-1.10.0.dist-info/RECORD,,
zrb/task/llm/context.py DELETED
@@ -1,58 +0,0 @@
1
- import datetime
2
- import os
3
- import platform
4
- import re
5
- from typing import Any
6
-
7
- from zrb.util.file import read_dir, read_file_with_line_numbers
8
-
9
-
10
- def extract_default_context(user_message: str) -> tuple[str, dict[str, Any]]:
11
- """
12
- Return modified user message and default context including time, OS, and file references.
13
- """
14
- modified_user_message = user_message
15
- # Match “@” + any non-space/comma sequence that contains at least one “/”
16
- pattern = r"(?<!\w)@(?=[^,\s]*/)([^,\s]+)"
17
- potential_resource_path = re.findall(pattern, user_message)
18
- current_references = []
19
-
20
- for ref in potential_resource_path:
21
- resource_path = os.path.abspath(os.path.expanduser(ref))
22
- print("RESOURCE PATH", resource_path)
23
- if os.path.isfile(resource_path):
24
- content = read_file_with_line_numbers(resource_path)
25
- current_references.append(
26
- {
27
- "reference": ref,
28
- "name": resource_path,
29
- "type": "file",
30
- "note": "line numbers are included in the content",
31
- "content": content,
32
- }
33
- )
34
- # Remove the '@' from the modified user message for valid file paths
35
- modified_user_message = modified_user_message.replace(f"@{ref}", ref, 1)
36
- elif os.path.isdir(resource_path):
37
- content = read_dir(resource_path)
38
- current_references.append(
39
- {
40
- "reference": ref,
41
- "name": resource_path,
42
- "type": "directory",
43
- "content": content,
44
- }
45
- )
46
- # Remove the '@' from the modified user message for valid directory paths
47
- modified_user_message = modified_user_message.replace(f"@{ref}", ref, 1)
48
-
49
- context = {
50
- "current_time": datetime.datetime.now().isoformat(),
51
- "current_working_directory": os.getcwd(),
52
- "current_os": platform.system(),
53
- "os_version": platform.version(),
54
- "python_version": platform.python_version(),
55
- "current_references": current_references,
56
- }
57
-
58
- return modified_user_message, context