zrb 1.15.3__py3-none-any.whl → 2.0.0a4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of zrb might be problematic. Click here for more details.

Files changed (204) hide show
  1. zrb/__init__.py +118 -133
  2. zrb/attr/type.py +10 -7
  3. zrb/builtin/__init__.py +55 -1
  4. zrb/builtin/git.py +12 -1
  5. zrb/builtin/group.py +31 -15
  6. zrb/builtin/llm/chat.py +147 -0
  7. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/_zrb/entity/add_entity_util.py +7 -7
  8. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/_zrb/module/add_module_util.py +5 -5
  9. zrb/builtin/project/add/fastapp/fastapp_util.py +1 -1
  10. zrb/builtin/searxng/config/settings.yml +5671 -0
  11. zrb/builtin/searxng/start.py +21 -0
  12. zrb/builtin/shell/autocomplete/bash.py +4 -3
  13. zrb/builtin/shell/autocomplete/zsh.py +4 -3
  14. zrb/callback/callback.py +8 -1
  15. zrb/cmd/cmd_result.py +2 -1
  16. zrb/config/config.py +555 -169
  17. zrb/config/helper.py +84 -0
  18. zrb/config/web_auth_config.py +50 -35
  19. zrb/context/any_shared_context.py +20 -3
  20. zrb/context/context.py +39 -5
  21. zrb/context/print_fn.py +13 -0
  22. zrb/context/shared_context.py +17 -8
  23. zrb/group/any_group.py +3 -3
  24. zrb/group/group.py +3 -3
  25. zrb/input/any_input.py +5 -1
  26. zrb/input/base_input.py +18 -6
  27. zrb/input/option_input.py +41 -1
  28. zrb/input/text_input.py +7 -24
  29. zrb/llm/agent/__init__.py +9 -0
  30. zrb/llm/agent/agent.py +215 -0
  31. zrb/llm/agent/summarizer.py +20 -0
  32. zrb/llm/app/__init__.py +10 -0
  33. zrb/llm/app/completion.py +281 -0
  34. zrb/llm/app/confirmation/allow_tool.py +66 -0
  35. zrb/llm/app/confirmation/handler.py +178 -0
  36. zrb/llm/app/confirmation/replace_confirmation.py +77 -0
  37. zrb/llm/app/keybinding.py +34 -0
  38. zrb/llm/app/layout.py +117 -0
  39. zrb/llm/app/lexer.py +155 -0
  40. zrb/llm/app/redirection.py +28 -0
  41. zrb/llm/app/style.py +16 -0
  42. zrb/llm/app/ui.py +733 -0
  43. zrb/llm/config/__init__.py +4 -0
  44. zrb/llm/config/config.py +122 -0
  45. zrb/llm/config/limiter.py +247 -0
  46. zrb/llm/history_manager/__init__.py +4 -0
  47. zrb/llm/history_manager/any_history_manager.py +23 -0
  48. zrb/llm/history_manager/file_history_manager.py +91 -0
  49. zrb/llm/history_processor/summarizer.py +108 -0
  50. zrb/llm/note/__init__.py +3 -0
  51. zrb/llm/note/manager.py +122 -0
  52. zrb/llm/prompt/__init__.py +29 -0
  53. zrb/llm/prompt/claude_compatibility.py +92 -0
  54. zrb/llm/prompt/compose.py +55 -0
  55. zrb/llm/prompt/default.py +51 -0
  56. zrb/llm/prompt/markdown/file_extractor.md +112 -0
  57. zrb/llm/prompt/markdown/mandate.md +23 -0
  58. zrb/llm/prompt/markdown/persona.md +3 -0
  59. zrb/llm/prompt/markdown/repo_extractor.md +112 -0
  60. zrb/llm/prompt/markdown/repo_summarizer.md +29 -0
  61. zrb/llm/prompt/markdown/summarizer.md +21 -0
  62. zrb/llm/prompt/note.py +41 -0
  63. zrb/llm/prompt/system_context.py +46 -0
  64. zrb/llm/prompt/zrb.py +41 -0
  65. zrb/llm/skill/__init__.py +3 -0
  66. zrb/llm/skill/manager.py +86 -0
  67. zrb/llm/task/__init__.py +4 -0
  68. zrb/llm/task/llm_chat_task.py +316 -0
  69. zrb/llm/task/llm_task.py +245 -0
  70. zrb/llm/tool/__init__.py +39 -0
  71. zrb/llm/tool/bash.py +75 -0
  72. zrb/llm/tool/code.py +266 -0
  73. zrb/llm/tool/file.py +419 -0
  74. zrb/llm/tool/note.py +70 -0
  75. zrb/{builtin/llm → llm}/tool/rag.py +33 -37
  76. zrb/llm/tool/search/brave.py +53 -0
  77. zrb/llm/tool/search/searxng.py +47 -0
  78. zrb/llm/tool/search/serpapi.py +47 -0
  79. zrb/llm/tool/skill.py +19 -0
  80. zrb/llm/tool/sub_agent.py +70 -0
  81. zrb/llm/tool/web.py +97 -0
  82. zrb/llm/tool/zrb_task.py +66 -0
  83. zrb/llm/util/attachment.py +101 -0
  84. zrb/llm/util/prompt.py +104 -0
  85. zrb/llm/util/stream_response.py +178 -0
  86. zrb/runner/cli.py +21 -20
  87. zrb/runner/common_util.py +24 -19
  88. zrb/runner/web_route/task_input_api_route.py +5 -5
  89. zrb/runner/web_util/user.py +7 -3
  90. zrb/session/any_session.py +12 -9
  91. zrb/session/session.py +38 -17
  92. zrb/task/any_task.py +24 -3
  93. zrb/task/base/context.py +42 -22
  94. zrb/task/base/execution.py +67 -55
  95. zrb/task/base/lifecycle.py +14 -7
  96. zrb/task/base/monitoring.py +12 -7
  97. zrb/task/base_task.py +113 -50
  98. zrb/task/base_trigger.py +16 -6
  99. zrb/task/cmd_task.py +6 -0
  100. zrb/task/http_check.py +11 -5
  101. zrb/task/make_task.py +5 -3
  102. zrb/task/rsync_task.py +30 -10
  103. zrb/task/scaffolder.py +7 -4
  104. zrb/task/scheduler.py +7 -4
  105. zrb/task/tcp_check.py +6 -4
  106. zrb/util/ascii_art/art/bee.txt +17 -0
  107. zrb/util/ascii_art/art/cat.txt +9 -0
  108. zrb/util/ascii_art/art/ghost.txt +16 -0
  109. zrb/util/ascii_art/art/panda.txt +17 -0
  110. zrb/util/ascii_art/art/rose.txt +14 -0
  111. zrb/util/ascii_art/art/unicorn.txt +15 -0
  112. zrb/util/ascii_art/banner.py +92 -0
  113. zrb/util/attr.py +54 -39
  114. zrb/util/cli/markdown.py +32 -0
  115. zrb/util/cli/text.py +30 -0
  116. zrb/util/cmd/command.py +33 -10
  117. zrb/util/file.py +61 -33
  118. zrb/util/git.py +2 -2
  119. zrb/util/{llm/prompt.py → markdown.py} +2 -3
  120. zrb/util/match.py +78 -0
  121. zrb/util/run.py +3 -3
  122. zrb/util/string/conversion.py +1 -1
  123. zrb/util/truncate.py +23 -0
  124. zrb/util/yaml.py +204 -0
  125. zrb/xcom/xcom.py +10 -0
  126. {zrb-1.15.3.dist-info → zrb-2.0.0a4.dist-info}/METADATA +41 -27
  127. {zrb-1.15.3.dist-info → zrb-2.0.0a4.dist-info}/RECORD +129 -131
  128. {zrb-1.15.3.dist-info → zrb-2.0.0a4.dist-info}/WHEEL +1 -1
  129. zrb/attr/__init__.py +0 -0
  130. zrb/builtin/llm/chat_session.py +0 -311
  131. zrb/builtin/llm/history.py +0 -71
  132. zrb/builtin/llm/input.py +0 -27
  133. zrb/builtin/llm/llm_ask.py +0 -187
  134. zrb/builtin/llm/previous-session.js +0 -21
  135. zrb/builtin/llm/tool/__init__.py +0 -0
  136. zrb/builtin/llm/tool/api.py +0 -71
  137. zrb/builtin/llm/tool/cli.py +0 -38
  138. zrb/builtin/llm/tool/code.py +0 -254
  139. zrb/builtin/llm/tool/file.py +0 -626
  140. zrb/builtin/llm/tool/sub_agent.py +0 -137
  141. zrb/builtin/llm/tool/web.py +0 -195
  142. zrb/builtin/project/__init__.py +0 -0
  143. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/__init__.py +0 -0
  144. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/_zrb/module/template/app_template/module/my_module/service/__init__.py +0 -0
  145. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/common/__init__.py +0 -0
  146. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/module/__init__.py +0 -0
  147. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/module/auth/service/__init__.py +0 -0
  148. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/module/auth/service/permission/__init__.py +0 -0
  149. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/module/auth/service/role/__init__.py +0 -0
  150. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/module/auth/service/user/__init__.py +0 -0
  151. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/schema/__init__.py +0 -0
  152. zrb/builtin/project/create/__init__.py +0 -0
  153. zrb/builtin/shell/__init__.py +0 -0
  154. zrb/builtin/shell/autocomplete/__init__.py +0 -0
  155. zrb/callback/__init__.py +0 -0
  156. zrb/cmd/__init__.py +0 -0
  157. zrb/config/default_prompt/file_extractor_system_prompt.md +0 -12
  158. zrb/config/default_prompt/interactive_system_prompt.md +0 -35
  159. zrb/config/default_prompt/persona.md +0 -1
  160. zrb/config/default_prompt/repo_extractor_system_prompt.md +0 -112
  161. zrb/config/default_prompt/repo_summarizer_system_prompt.md +0 -10
  162. zrb/config/default_prompt/summarization_prompt.md +0 -16
  163. zrb/config/default_prompt/system_prompt.md +0 -32
  164. zrb/config/llm_config.py +0 -243
  165. zrb/config/llm_context/config.py +0 -129
  166. zrb/config/llm_context/config_parser.py +0 -46
  167. zrb/config/llm_rate_limitter.py +0 -137
  168. zrb/content_transformer/__init__.py +0 -0
  169. zrb/context/__init__.py +0 -0
  170. zrb/dot_dict/__init__.py +0 -0
  171. zrb/env/__init__.py +0 -0
  172. zrb/group/__init__.py +0 -0
  173. zrb/input/__init__.py +0 -0
  174. zrb/runner/__init__.py +0 -0
  175. zrb/runner/web_route/__init__.py +0 -0
  176. zrb/runner/web_route/home_page/__init__.py +0 -0
  177. zrb/session/__init__.py +0 -0
  178. zrb/session_state_log/__init__.py +0 -0
  179. zrb/session_state_logger/__init__.py +0 -0
  180. zrb/task/__init__.py +0 -0
  181. zrb/task/base/__init__.py +0 -0
  182. zrb/task/llm/__init__.py +0 -0
  183. zrb/task/llm/agent.py +0 -243
  184. zrb/task/llm/config.py +0 -103
  185. zrb/task/llm/conversation_history.py +0 -128
  186. zrb/task/llm/conversation_history_model.py +0 -242
  187. zrb/task/llm/default_workflow/coding.md +0 -24
  188. zrb/task/llm/default_workflow/copywriting.md +0 -17
  189. zrb/task/llm/default_workflow/researching.md +0 -18
  190. zrb/task/llm/error.py +0 -95
  191. zrb/task/llm/history_summarization.py +0 -216
  192. zrb/task/llm/print_node.py +0 -101
  193. zrb/task/llm/prompt.py +0 -325
  194. zrb/task/llm/tool_wrapper.py +0 -220
  195. zrb/task/llm/typing.py +0 -3
  196. zrb/task/llm_task.py +0 -341
  197. zrb/task_status/__init__.py +0 -0
  198. zrb/util/__init__.py +0 -0
  199. zrb/util/cli/__init__.py +0 -0
  200. zrb/util/cmd/__init__.py +0 -0
  201. zrb/util/codemod/__init__.py +0 -0
  202. zrb/util/string/__init__.py +0 -0
  203. zrb/xcom/__init__.py +0 -0
  204. {zrb-1.15.3.dist-info → zrb-2.0.0a4.dist-info}/entry_points.txt +0 -0
@@ -1,311 +0,0 @@
1
- """
2
- This module provides functions for managing interactive chat sessions with an LLM.
3
-
4
- It handles reading user input, triggering the LLM task, and managing the
5
- conversation flow via XCom.
6
- """
7
-
8
- import asyncio
9
- import sys
10
-
11
- from zrb.config.llm_config import llm_config
12
- from zrb.context.any_context import AnyContext
13
- from zrb.util.cli.style import stylize_blue, stylize_bold_yellow, stylize_faint
14
- from zrb.util.string.conversion import to_boolean
15
-
16
-
17
- async def read_user_prompt(ctx: AnyContext) -> str:
18
- """
19
- Reads user input from the CLI for an interactive chat session.
20
- Orchestrates the session by calling helper functions.
21
- """
22
- _show_info(ctx)
23
- final_result = await _handle_initial_message(ctx)
24
- if ctx.is_web_mode:
25
- return final_result
26
- is_tty = ctx.is_tty
27
- reader = await _setup_input_reader(is_tty)
28
- multiline_mode = False
29
- current_modes = ctx.input.modes
30
- current_yolo_mode = ctx.input.yolo
31
- user_inputs = []
32
- while True:
33
- await asyncio.sleep(0.01)
34
- # Get user input based on mode
35
- if not multiline_mode:
36
- ctx.print("💬 >>", plain=True)
37
- user_input = await _read_next_line(reader, ctx)
38
- if not multiline_mode:
39
- ctx.print("", plain=True)
40
- # Handle user input
41
- if user_input.strip().lower() in ("/bye", "/quit", "/q", "/exit"):
42
- user_prompt = "\n".join(user_inputs)
43
- user_inputs = []
44
- result = await _trigger_ask_and_wait_for_result(
45
- ctx, user_prompt, current_modes, current_yolo_mode
46
- )
47
- if result is not None:
48
- final_result = result
49
- break
50
- elif user_input.strip().lower() in ("/multi",):
51
- multiline_mode = True
52
- elif user_input.strip().lower() in ("/end",):
53
- ctx.print("", plain=True)
54
- multiline_mode = False
55
- user_prompt = "\n".join(user_inputs)
56
- user_inputs = []
57
- result = await _trigger_ask_and_wait_for_result(
58
- ctx, user_prompt, current_modes, current_yolo_mode
59
- )
60
- if result is not None:
61
- final_result = result
62
- elif user_input.strip().lower().startswith("/mode"):
63
- mode_parts = user_input.split(" ", maxsplit=2)
64
- if len(mode_parts) > 1:
65
- current_modes = mode_parts[1]
66
- ctx.print(f"Current mode: {current_modes}", plain=True)
67
- ctx.print("", plain=True)
68
- continue
69
- elif user_input.strip().lower().startswith("/yolo"):
70
- yolo_mode_parts = user_input.split(" ", maxsplit=2)
71
- if len(yolo_mode_parts) > 1:
72
- current_yolo_mode = to_boolean(yolo_mode_parts[1])
73
- ctx.print(f"Current_yolo mode: {current_yolo_mode}", plain=True)
74
- ctx.print("", plain=True)
75
- continue
76
- elif user_input.strip().lower() in ("/help", "/info"):
77
- _show_info(ctx)
78
- continue
79
- else:
80
- user_inputs.append(user_input)
81
- if multiline_mode:
82
- continue
83
- user_prompt = "\n".join(user_inputs)
84
- user_inputs = []
85
- result = await _trigger_ask_and_wait_for_result(
86
- ctx, user_prompt, current_modes, current_yolo_mode
87
- )
88
- if result is not None:
89
- final_result = result
90
- return final_result
91
-
92
-
93
- def _show_info(ctx: AnyContext):
94
- """
95
- Displays the available chat session commands to the user.
96
- Args:
97
- ctx: The context object for the task.
98
- """
99
- ctx.print(
100
- "\n".join(
101
- [
102
- _show_command("/bye", "Quit from chat session"),
103
- _show_command("/multi", "Start multiline input"),
104
- _show_command("/end", "End multiline input"),
105
- _show_command("/modes", "Show current modes"),
106
- _show_subcommand("<mode1,mode2,..>", "Set current modes"),
107
- _show_command("/yolo", "Get current YOLO mode"),
108
- _show_subcommand("<true|false>", "Set YOLO mode to true/false"),
109
- _show_command("/help", "Show this message"),
110
- ]
111
- ),
112
- plain=True,
113
- )
114
- ctx.print("", plain=True)
115
-
116
-
117
- def _show_command(command: str, description: str) -> str:
118
- styled_command = stylize_bold_yellow(command.ljust(25))
119
- styled_description = stylize_faint(description)
120
- return f" {styled_command} {styled_description}"
121
-
122
-
123
- def _show_subcommand(command: str, description: str) -> str:
124
- styled_command = stylize_blue(f" {command}".ljust(25))
125
- styled_description = stylize_faint(description)
126
- return f" {styled_command} {styled_description}"
127
-
128
-
129
- async def _handle_initial_message(ctx: AnyContext) -> str:
130
- """Processes the initial message from the command line."""
131
- if not ctx.input.message or ctx.input.message.strip() == "":
132
- return ""
133
- ctx.print("💬 >>", plain=True)
134
- ctx.print(ctx.input.message, plain=True)
135
- ctx.print("", plain=True)
136
- result = await _trigger_ask_and_wait_for_result(
137
- ctx,
138
- user_prompt=ctx.input.message,
139
- modes=ctx.input.modes,
140
- yolo_mode=ctx.input.yolo,
141
- previous_session_name=ctx.input.previous_session,
142
- start_new=ctx.input.start_new,
143
- )
144
- return result if result is not None else ""
145
-
146
-
147
- async def _setup_input_reader(is_interactive: bool):
148
- """Sets up and returns the appropriate asynchronous input reader."""
149
- if is_interactive:
150
- from prompt_toolkit import PromptSession
151
-
152
- return PromptSession()
153
-
154
- loop = asyncio.get_event_loop()
155
- reader = asyncio.StreamReader(loop=loop)
156
- protocol = asyncio.StreamReaderProtocol(reader)
157
- await loop.connect_read_pipe(lambda: protocol, sys.stdin)
158
- return reader
159
-
160
-
161
- async def _read_next_line(reader, ctx: AnyContext) -> str:
162
- """Reads one line of input using the provided reader."""
163
- from prompt_toolkit import PromptSession
164
-
165
- if isinstance(reader, PromptSession):
166
- return await reader.prompt_async()
167
-
168
- line_bytes = await reader.readline()
169
- if not line_bytes:
170
- return "/bye" # Signal to exit
171
-
172
- user_input = line_bytes.decode().strip()
173
- ctx.print(user_input, plain=True)
174
- return user_input
175
-
176
-
177
- async def _trigger_ask_and_wait_for_result(
178
- ctx: AnyContext,
179
- user_prompt: str,
180
- modes: str,
181
- yolo_mode: bool,
182
- previous_session_name: str | None = None,
183
- start_new: bool = False,
184
- ) -> str | None:
185
- """
186
- Triggers the LLM ask task and waits for the result via XCom.
187
-
188
- Args:
189
- ctx: The context object for the task.
190
- user_prompt: The user's message to send to the LLM.
191
- previous_session_name: The name of the previous chat session (optional).
192
- start_new: Whether to start a new conversation (optional).
193
-
194
- Returns:
195
- The result from the LLM task, or None if the user prompt is empty.
196
- """
197
- if user_prompt.strip() == "":
198
- return None
199
- await _trigger_ask(
200
- ctx, user_prompt, modes, yolo_mode, previous_session_name, start_new
201
- )
202
- result = await _wait_ask_result(ctx)
203
- md_result = _render_markdown(result) if result is not None else ""
204
- ctx.print("\n🤖 >>", plain=True)
205
- ctx.print(md_result, plain=True)
206
- ctx.print("", plain=True)
207
- return result
208
-
209
-
210
- def _render_markdown(markdown_text: str) -> str:
211
- """
212
- Renders Markdown to a string, ensuring link URLs are visible.
213
- """
214
- from rich.console import Console
215
- from rich.markdown import Markdown
216
-
217
- console = Console()
218
- markdown = Markdown(markdown_text, hyperlinks=False)
219
- with console.capture() as capture:
220
- console.print(markdown)
221
- return capture.get()
222
-
223
-
224
- def get_llm_ask_input_mapping(callback_ctx: AnyContext):
225
- """
226
- Generates the input mapping for the LLM ask task from the callback context.
227
-
228
- Args:
229
- callback_ctx: The context object for the callback.
230
-
231
- Returns:
232
- A dictionary containing the input mapping for the LLM ask task.
233
- """
234
- data = callback_ctx.xcom.ask_trigger.pop()
235
- system_prompt = callback_ctx.input.system_prompt
236
- if system_prompt is None or system_prompt.strip() == "":
237
- system_prompt = llm_config.default_interactive_system_prompt
238
- return {
239
- "model": callback_ctx.input.model,
240
- "base-url": callback_ctx.input.base_url,
241
- "api-key": callback_ctx.input.api_key,
242
- "system-prompt": system_prompt,
243
- "start-new": data.get("start_new"),
244
- "previous-session": data.get("previous_session_name"),
245
- "message": data.get("message"),
246
- "modes": data.get("modes"),
247
- "yolo": data.get("yolo"),
248
- }
249
-
250
-
251
- async def _trigger_ask(
252
- ctx: AnyContext,
253
- user_prompt: str,
254
- modes: str,
255
- yolo_mode: bool,
256
- previous_session_name: str | None = None,
257
- start_new: bool = False,
258
- ):
259
- """
260
- Triggers the LLM ask task by pushing data to the 'ask_trigger' XCom queue.
261
-
262
- Args:
263
- ctx: The context object for the task.
264
- user_prompt: The user's message to send to the LLM.
265
- previous_session_name: The name of the previous chat session (optional).
266
- start_new: Whether to start a new conversation (optional).
267
- """
268
- if previous_session_name is None:
269
- previous_session_name = await _wait_ask_session_name(ctx)
270
- ctx.xcom["ask_trigger"].push(
271
- {
272
- "previous_session_name": previous_session_name,
273
- "start_new": start_new,
274
- "message": user_prompt,
275
- "modes": modes,
276
- "yolo": yolo_mode,
277
- }
278
- )
279
-
280
-
281
- async def _wait_ask_result(ctx: AnyContext) -> str | None:
282
- """
283
- Waits for and retrieves the LLM task result from the 'ask_result' XCom queue.
284
-
285
- Args:
286
- ctx: The context object for the task.
287
-
288
- Returns:
289
- The result string from the LLM task.
290
- """
291
- while "ask_result" not in ctx.xcom or len(ctx.xcom.ask_result) == 0:
292
- await asyncio.sleep(0.1)
293
- if "ask_error" in ctx.xcom and len(ctx.xcom.ask_error) > 0:
294
- ctx.xcom.ask_error.pop()
295
- return None
296
- return ctx.xcom.ask_result.pop()
297
-
298
-
299
- async def _wait_ask_session_name(ctx: AnyContext) -> str:
300
- """
301
- Waits for and retrieves the LLM chat session name from the 'ask_session_name' XCom queue.
302
-
303
- Args:
304
- ctx: The context object for the task.
305
-
306
- Returns:
307
- The session name string.
308
- """
309
- while "ask_session_name" not in ctx.xcom or len(ctx.xcom.ask_session_name) == 0:
310
- await asyncio.sleep(0.1)
311
- return ctx.xcom.ask_session_name.pop()
@@ -1,71 +0,0 @@
1
- import json
2
- import os
3
- from typing import Any
4
-
5
- from zrb.config.config import CFG
6
- from zrb.context.any_shared_context import AnySharedContext
7
- from zrb.task.llm.conversation_history_model import ConversationHistory
8
- from zrb.util.file import read_file, write_file
9
-
10
-
11
- def read_chat_conversation(ctx: AnySharedContext) -> dict[str, Any] | list | None:
12
- """Reads conversation history from the session file.
13
- Returns the raw dictionary or list loaded from JSON, or None if not found/empty.
14
- The LLMTask will handle parsing this into ConversationHistory.
15
- """
16
- if ctx.input.start_new:
17
- return None # Indicate no history to load
18
- previous_session_name = ctx.input.previous_session
19
- if not previous_session_name: # Check for empty string or None
20
- last_session_file_path = os.path.join(CFG.LLM_HISTORY_DIR, "last-session")
21
- if os.path.isfile(last_session_file_path):
22
- previous_session_name = read_file(last_session_file_path).strip()
23
- if not previous_session_name: # Handle empty last-session file
24
- return None
25
- else:
26
- return None # No previous session specified and no last session found
27
- conversation_file_path = os.path.join(
28
- CFG.LLM_HISTORY_DIR, f"{previous_session_name}.json"
29
- )
30
- if not os.path.isfile(conversation_file_path):
31
- ctx.log_warning(f"History file not found: {conversation_file_path}")
32
- return None
33
- try:
34
- content = read_file(conversation_file_path)
35
- if not content.strip():
36
- ctx.log_warning(f"History file is empty: {conversation_file_path}")
37
- return None
38
- # Return the raw loaded data (dict or list)
39
- return json.loads(content)
40
- except json.JSONDecodeError:
41
- ctx.log_warning(
42
- f"Could not decode JSON from history file '{conversation_file_path}'. "
43
- "Treating as empty history."
44
- )
45
- return None
46
- except Exception as e:
47
- ctx.log_warning(
48
- f"Error reading history file '{conversation_file_path}': {e}. "
49
- "Treating as empty history."
50
- )
51
- return None
52
-
53
-
54
- def write_chat_conversation(ctx: AnySharedContext, history_data: ConversationHistory):
55
- """Writes the conversation history data (including context) to a session file."""
56
- os.makedirs(CFG.LLM_HISTORY_DIR, exist_ok=True)
57
- current_session_name = ctx.session.name
58
- if not current_session_name:
59
- ctx.log_warning("Cannot write history: Session name is empty.")
60
- return
61
- conversation_file_path = os.path.join(
62
- CFG.LLM_HISTORY_DIR, f"{current_session_name}.json"
63
- )
64
- try:
65
- # Use model_dump_json to serialize the Pydantic model
66
- write_file(conversation_file_path, history_data.model_dump_json(indent=2))
67
- # Update the last-session pointer
68
- last_session_file_path = os.path.join(CFG.LLM_HISTORY_DIR, "last-session")
69
- write_file(last_session_file_path, current_session_name)
70
- except Exception as e:
71
- ctx.log_error(f"Error writing history file '{conversation_file_path}': {e}")
zrb/builtin/llm/input.py DELETED
@@ -1,27 +0,0 @@
1
- import os
2
-
3
- from zrb.context.any_shared_context import AnySharedContext
4
- from zrb.input.str_input import StrInput
5
- from zrb.util.file import read_file
6
- from zrb.util.string.conversion import to_pascal_case
7
-
8
-
9
- class PreviousSessionInput(StrInput):
10
-
11
- def to_html(self, ctx: AnySharedContext) -> str:
12
- name = self.name
13
- description = self.description
14
- default = self.get_default_str(ctx)
15
- script = read_file(
16
- file_path=os.path.join(os.path.dirname(__file__), "previous-session.js"),
17
- replace_map={
18
- "CURRENT_INPUT_NAME": name,
19
- "CurrentPascalInputName": to_pascal_case(name),
20
- },
21
- )
22
- return "\n".join(
23
- [
24
- f'<input name="{name}" placeholder="{description}" value="{default}" />',
25
- f"<script>{script}</script>",
26
- ]
27
- )
@@ -1,187 +0,0 @@
1
- from zrb.builtin.group import llm_group
2
- from zrb.builtin.llm.chat_session import get_llm_ask_input_mapping, read_user_prompt
3
- from zrb.builtin.llm.history import read_chat_conversation, write_chat_conversation
4
- from zrb.builtin.llm.input import PreviousSessionInput
5
- from zrb.builtin.llm.tool.api import get_current_location, get_current_weather
6
- from zrb.builtin.llm.tool.cli import run_shell_command
7
- from zrb.builtin.llm.tool.code import analyze_repo
8
- from zrb.builtin.llm.tool.file import (
9
- analyze_file,
10
- list_files,
11
- read_from_file,
12
- read_many_files,
13
- replace_in_file,
14
- search_files,
15
- write_many_files,
16
- write_to_file,
17
- )
18
- from zrb.builtin.llm.tool.web import (
19
- create_search_internet_tool,
20
- open_web_page,
21
- search_arxiv,
22
- search_wikipedia,
23
- )
24
- from zrb.callback.callback import Callback
25
- from zrb.config.config import CFG
26
- from zrb.config.llm_config import llm_config
27
- from zrb.input.bool_input import BoolInput
28
- from zrb.input.str_input import StrInput
29
- from zrb.input.text_input import TextInput
30
- from zrb.task.base_trigger import BaseTrigger
31
- from zrb.task.llm_task import LLMTask
32
-
33
- _llm_ask_inputs = [
34
- StrInput(
35
- "model",
36
- description="LLM Model",
37
- prompt="LLM Model",
38
- default="",
39
- allow_positional_parsing=False,
40
- always_prompt=False,
41
- allow_empty=True,
42
- ),
43
- StrInput(
44
- "base-url",
45
- description="LLM API Base URL",
46
- prompt="LLM API Base URL",
47
- default="",
48
- allow_positional_parsing=False,
49
- always_prompt=False,
50
- allow_empty=True,
51
- ),
52
- StrInput(
53
- "api-key",
54
- description="LLM API Key",
55
- prompt="LLM API Key",
56
- default="",
57
- allow_positional_parsing=False,
58
- always_prompt=False,
59
- allow_empty=True,
60
- ),
61
- TextInput(
62
- "system-prompt",
63
- description="System prompt",
64
- prompt="System prompt",
65
- default="",
66
- allow_positional_parsing=False,
67
- always_prompt=False,
68
- ),
69
- TextInput(
70
- "modes",
71
- description="Modes",
72
- prompt="Modes",
73
- default="coding",
74
- allow_positional_parsing=False,
75
- always_prompt=False,
76
- ),
77
- BoolInput(
78
- "start-new",
79
- description="Start new session (LLM Agent will forget past conversation)",
80
- prompt="Start new session (LLM Agent will forget past conversation)",
81
- default=False,
82
- allow_positional_parsing=False,
83
- always_prompt=False,
84
- ),
85
- BoolInput(
86
- "yolo",
87
- description="YOLO mode (LLM Agent will start in YOLO Mode)",
88
- prompt="YOLO mode (LLM Agent will start in YOLO Mode)",
89
- default=lambda ctx: llm_config.default_yolo_mode,
90
- allow_positional_parsing=False,
91
- always_prompt=False,
92
- ),
93
- TextInput("message", description="User message", prompt="Your message"),
94
- PreviousSessionInput(
95
- "previous-session",
96
- description="Previous conversation session",
97
- prompt="Previous conversation session (can be empty)",
98
- allow_positional_parsing=False,
99
- allow_empty=True,
100
- always_prompt=False,
101
- ),
102
- ]
103
-
104
- llm_ask: LLMTask = llm_group.add_task(
105
- LLMTask(
106
- name="llm-ask",
107
- input=_llm_ask_inputs,
108
- description="❓ Ask LLM",
109
- model=lambda ctx: None if ctx.input.model.strip() == "" else ctx.input.model,
110
- model_base_url=lambda ctx: (
111
- None if ctx.input.base_url.strip() == "" else ctx.input.base_url
112
- ),
113
- model_api_key=lambda ctx: (
114
- None if ctx.input.api_key.strip() == "" else ctx.input.api_key
115
- ),
116
- conversation_history_reader=read_chat_conversation,
117
- conversation_history_writer=write_chat_conversation,
118
- system_prompt=lambda ctx: (
119
- None if ctx.input.system_prompt.strip() == "" else ctx.input.system_prompt
120
- ),
121
- modes=lambda ctx: (
122
- None if ctx.input.modes.strip() == "" else ctx.input.modes.split(",")
123
- ),
124
- message="{ctx.input.message}",
125
- is_yolo_mode="{ctx.input.yolo}",
126
- retries=0,
127
- ),
128
- alias="ask",
129
- )
130
-
131
- llm_group.add_task(
132
- BaseTrigger(
133
- name="llm-chat",
134
- input=_llm_ask_inputs,
135
- description="💬 Chat with LLM",
136
- queue_name="ask_trigger",
137
- action=read_user_prompt,
138
- callback=Callback(
139
- task=llm_ask,
140
- input_mapping=get_llm_ask_input_mapping,
141
- result_queue="ask_result",
142
- error_queue="ask_error",
143
- session_name_queue="ask_session_name",
144
- ),
145
- retries=0,
146
- cli_only=True,
147
- ),
148
- alias="chat",
149
- )
150
-
151
- if CFG.LLM_ALLOW_ANALYZE_REPO:
152
- llm_ask.append_tool(analyze_repo)
153
-
154
- if CFG.LLM_ALLOW_ANALYZE_FILE:
155
- llm_ask.append_tool(analyze_file)
156
-
157
- if CFG.LLM_ALLOW_ACCESS_LOCAL_FILE:
158
- llm_ask.append_tool(
159
- search_files,
160
- list_files,
161
- read_from_file,
162
- read_many_files,
163
- replace_in_file,
164
- write_to_file,
165
- write_many_files,
166
- )
167
-
168
- if CFG.LLM_ALLOW_ACCESS_SHELL:
169
- llm_ask.append_tool(run_shell_command)
170
-
171
- if CFG.LLM_ALLOW_OPEN_WEB_PAGE:
172
- llm_ask.append_tool(open_web_page)
173
-
174
- if CFG.LLM_ALLOW_SEARCH_WIKIPEDIA:
175
- llm_ask.append_tool(search_wikipedia)
176
-
177
- if CFG.LLM_ALLOW_SEARCH_ARXIV:
178
- llm_ask.append_tool(search_arxiv)
179
-
180
- if CFG.LLM_ALLOW_GET_CURRENT_LOCATION:
181
- llm_ask.append_tool(get_current_location)
182
-
183
- if CFG.LLM_ALLOW_GET_CURRENT_WEATHER:
184
- llm_ask.append_tool(get_current_weather)
185
-
186
- if CFG.SERPAPI_KEY != "" and CFG.LLM_ALLOW_SEARCH_INTERNET:
187
- llm_ask.append_tool(create_search_internet_tool(CFG.SERPAPI_KEY))
@@ -1,21 +0,0 @@
1
- async function updatePreviousSession(event) {
2
- const currentInput = event.target;
3
- if (currentInput.name === "CURRENT_INPUT_NAME") {
4
- return
5
- }
6
- const previousSessionInput = submitTaskForm.querySelector('[name="CURRENT_INPUT_NAME"]');
7
- if (previousSessionInput) {
8
- const currentSessionName = cfg.SESSION_NAME
9
- previousSessionInput.value = currentSessionName;
10
- }
11
- }
12
-
13
- document.getElementById("submit-task-form").querySelectorAll("input[name], textarea[name]").forEach((element) => {
14
- element.addEventListener("input", updatePreviousSession);
15
- element.addEventListener("keyup", updatePreviousSession);
16
- });
17
-
18
- document.getElementById("submit-task-form").querySelectorAll("select[name]").forEach((element) => {
19
- element.addEventListener("change", updatePreviousSession);
20
- });
21
-
File without changes
@@ -1,71 +0,0 @@
1
- import json
2
- from typing import Literal
3
-
4
-
5
- def get_current_location() -> str:
6
- """
7
- Fetches the user's current geographical location based on their IP address.
8
-
9
- Use this tool when the user asks "Where am I?", "What is my current
10
- location?", or has a query that requires knowing their location to be
11
- answered.
12
-
13
- Returns:
14
- str: A JSON string containing the 'lat' and 'lon' of the current
15
- location. Example: '{"lat": 48.8584, "lon": 2.2945}'
16
- Raises:
17
- requests.RequestException: If the API request to the location service
18
- fails.
19
- """
20
- import requests
21
-
22
- try:
23
- response = requests.get("http://ip-api.com/json?fields=lat,lon", timeout=5)
24
- response.raise_for_status()
25
- return json.dumps(response.json())
26
- except requests.RequestException as e:
27
- raise requests.RequestException(f"Failed to get location: {e}") from None
28
-
29
-
30
- def get_current_weather(
31
- latitude: float,
32
- longitude: float,
33
- temperature_unit: Literal["celsius", "fahrenheit"],
34
- ) -> str:
35
- """
36
- Retrieves the current weather conditions for a given geographical location.
37
-
38
- Use this tool when the user asks about the weather. If the user does not
39
- provide a location, first use the `get_current_location` tool to
40
- determine their location.
41
-
42
- Args:
43
- latitude (float): The latitude of the location.
44
- longitude (float): The longitude of the location.
45
- temperature_unit (Literal["celsius", "fahrenheit"]): The desired unit
46
- for the temperature reading.
47
-
48
- Returns:
49
- str: A JSON string containing detailed weather data, including
50
- temperature, wind speed, and weather code.
51
- Raises:
52
- requests.RequestException: If the API request to the weather service
53
- fails.
54
- """
55
- import requests
56
-
57
- try:
58
- response = requests.get(
59
- "https://api.open-meteo.com/v1/forecast",
60
- params={
61
- "latitude": latitude,
62
- "longitude": longitude,
63
- "temperature_unit": temperature_unit,
64
- "current_weather": True,
65
- },
66
- timeout=5,
67
- )
68
- response.raise_for_status()
69
- return json.dumps(response.json())
70
- except requests.RequestException as e:
71
- raise requests.RequestException(f"Failed to get weather data: {e}") from None