zrb 1.0.0b3__py3-none-any.whl → 1.0.0b5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- zrb/builtin/llm/llm_chat.py +83 -5
- zrb/builtin/llm/previous-session.js +21 -0
- zrb/builtin/llm/tool/api.py +29 -0
- zrb/builtin/todo.py +1 -0
- zrb/config.py +13 -15
- zrb/input/any_input.py +5 -0
- zrb/input/base_input.py +6 -0
- zrb/input/bool_input.py +2 -0
- zrb/input/float_input.py +2 -0
- zrb/input/int_input.py +2 -0
- zrb/input/option_input.py +2 -0
- zrb/input/password_input.py +2 -0
- zrb/input/text_input.py +2 -0
- zrb/runner/common_util.py +1 -1
- zrb/runner/web_route/node_page/task/view.html +1 -1
- zrb/runner/web_route/static/resources/session/current-session.js +10 -7
- zrb/runner/web_route/static/resources/session/event.js +14 -2
- zrb/task/llm_task.py +85 -215
- zrb/util/llm/tool.py +33 -5
- {zrb-1.0.0b3.dist-info → zrb-1.0.0b5.dist-info}/METADATA +2 -2
- {zrb-1.0.0b3.dist-info → zrb-1.0.0b5.dist-info}/RECORD +23 -21
- {zrb-1.0.0b3.dist-info → zrb-1.0.0b5.dist-info}/WHEEL +0 -0
- {zrb-1.0.0b3.dist-info → zrb-1.0.0b5.dist-info}/entry_points.txt +0 -0
zrb/builtin/llm/llm_chat.py
CHANGED
@@ -1,17 +1,76 @@
|
|
1
|
+
import json
|
2
|
+
import os
|
3
|
+
from typing import Any
|
4
|
+
|
1
5
|
from zrb.builtin.group import llm_group
|
6
|
+
from zrb.builtin.llm.tool.api import get_current_location, get_current_weather
|
2
7
|
from zrb.builtin.llm.tool.cli import run_shell_command
|
3
|
-
from zrb.builtin.llm.tool.rag import create_rag_from_directory
|
4
8
|
from zrb.builtin.llm.tool.web import open_web_route, query_internet
|
5
9
|
from zrb.config import (
|
10
|
+
LLM_ALLOW_ACCESS_INTERNET,
|
6
11
|
LLM_ALLOW_ACCESS_SHELL,
|
7
|
-
|
8
|
-
LLM_HISTORY_FILE,
|
12
|
+
LLM_HISTORY_DIR,
|
9
13
|
LLM_MODEL,
|
10
14
|
LLM_SYSTEM_PROMPT,
|
11
15
|
)
|
16
|
+
from zrb.context.any_shared_context import AnySharedContext
|
17
|
+
from zrb.input.bool_input import BoolInput
|
12
18
|
from zrb.input.str_input import StrInput
|
13
19
|
from zrb.input.text_input import TextInput
|
14
20
|
from zrb.task.llm_task import LLMTask
|
21
|
+
from zrb.util.file import read_file, write_file
|
22
|
+
from zrb.util.string.conversion import to_pascal_case
|
23
|
+
|
24
|
+
|
25
|
+
class PreviousSessionInput(StrInput):
|
26
|
+
|
27
|
+
def to_html(self, ctx: AnySharedContext) -> str:
|
28
|
+
name = self.name
|
29
|
+
description = self.description
|
30
|
+
default = self.get_default_str(ctx)
|
31
|
+
script = read_file(
|
32
|
+
file_path=os.path.join(os.path.dirname(__file__), "previous-session.js"),
|
33
|
+
replace_map={
|
34
|
+
"CURRENT_INPUT_NAME": name,
|
35
|
+
"CurrentPascalInputName": to_pascal_case(name),
|
36
|
+
},
|
37
|
+
)
|
38
|
+
return "\n".join(
|
39
|
+
[
|
40
|
+
f'<input name="{name}" placeholder="{description}" value="{default}" />',
|
41
|
+
f"<script>{script}</script>",
|
42
|
+
]
|
43
|
+
)
|
44
|
+
|
45
|
+
|
46
|
+
def _read_chat_conversation(ctx: AnySharedContext) -> list[dict[str, Any]]:
|
47
|
+
if ctx.input.start_new:
|
48
|
+
return []
|
49
|
+
previous_session_name = ctx.input.previous_session
|
50
|
+
if previous_session_name == "" or previous_session_name is None:
|
51
|
+
last_session_file_path = os.path.join(LLM_HISTORY_DIR, "last-session")
|
52
|
+
if os.path.isfile(last_session_file_path):
|
53
|
+
previous_session_name = read_file(last_session_file_path).strip()
|
54
|
+
conversation_file_path = os.path.join(
|
55
|
+
LLM_HISTORY_DIR, f"{previous_session_name}.json"
|
56
|
+
)
|
57
|
+
if not os.path.isfile(conversation_file_path):
|
58
|
+
return []
|
59
|
+
return json.loads(read_file(conversation_file_path))
|
60
|
+
|
61
|
+
|
62
|
+
def _write_chat_conversation(
|
63
|
+
ctx: AnySharedContext, conversations: list[dict[str, Any]]
|
64
|
+
):
|
65
|
+
os.makedirs(LLM_HISTORY_DIR, exist_ok=True)
|
66
|
+
current_session_name = ctx.session.name
|
67
|
+
conversation_file_path = os.path.join(
|
68
|
+
LLM_HISTORY_DIR, f"{current_session_name}.json"
|
69
|
+
)
|
70
|
+
write_file(conversation_file_path, json.dumps(conversations, indent=2))
|
71
|
+
last_session_file_path = os.path.join(LLM_HISTORY_DIR, "last-session")
|
72
|
+
write_file(last_session_file_path, current_session_name)
|
73
|
+
|
15
74
|
|
16
75
|
llm_chat: LLMTask = llm_group.add_task(
|
17
76
|
LLMTask(
|
@@ -22,16 +81,33 @@ llm_chat: LLMTask = llm_group.add_task(
|
|
22
81
|
description="LLM Model",
|
23
82
|
prompt="LLM Model",
|
24
83
|
default_str=LLM_MODEL,
|
84
|
+
allow_positional_parsing=False,
|
25
85
|
),
|
26
86
|
TextInput(
|
27
87
|
"system-prompt",
|
28
88
|
description="System prompt",
|
29
89
|
prompt="System prompt",
|
30
90
|
default_str=LLM_SYSTEM_PROMPT,
|
91
|
+
allow_positional_parsing=False,
|
92
|
+
),
|
93
|
+
BoolInput(
|
94
|
+
"start-new",
|
95
|
+
description="Start new conversation (LLM will forget everything)",
|
96
|
+
prompt="Start new conversation (LLM will forget everything)",
|
97
|
+
default_str="false",
|
98
|
+
allow_positional_parsing=False,
|
31
99
|
),
|
32
100
|
TextInput("message", description="User message", prompt="Your message"),
|
101
|
+
PreviousSessionInput(
|
102
|
+
"previous-session",
|
103
|
+
description="Previous conversation session",
|
104
|
+
prompt="Previous conversation session (can be empty)",
|
105
|
+
allow_positional_parsing=False,
|
106
|
+
allow_empty=True,
|
107
|
+
),
|
33
108
|
],
|
34
|
-
|
109
|
+
conversation_history_reader=_read_chat_conversation,
|
110
|
+
conversation_history_writer=_write_chat_conversation,
|
35
111
|
description="Chat with LLM",
|
36
112
|
model="{ctx.input.model}",
|
37
113
|
system_prompt="{ctx.input['system-prompt']}",
|
@@ -44,6 +120,8 @@ llm_chat: LLMTask = llm_group.add_task(
|
|
44
120
|
if LLM_ALLOW_ACCESS_SHELL:
|
45
121
|
llm_chat.add_tool(run_shell_command)
|
46
122
|
|
47
|
-
if
|
123
|
+
if LLM_ALLOW_ACCESS_INTERNET:
|
48
124
|
llm_chat.add_tool(open_web_route)
|
49
125
|
llm_chat.add_tool(query_internet)
|
126
|
+
llm_chat.add_tool(get_current_location)
|
127
|
+
llm_chat.add_tool(get_current_weather)
|
@@ -0,0 +1,21 @@
|
|
1
|
+
async function updatePreviousSession(event) {
|
2
|
+
const currentInput = event.target;
|
3
|
+
if (currentInput.name === "CURRENT_INPUT_NAME") {
|
4
|
+
return
|
5
|
+
}
|
6
|
+
const previousSessionInput = submitTaskForm.querySelector('[name="CURRENT_INPUT_NAME"]');
|
7
|
+
if (previousSessionInput) {
|
8
|
+
const currentSessionName = cfg.SESSION_NAME
|
9
|
+
previousSessionInput.value = currentSessionName;
|
10
|
+
}
|
11
|
+
}
|
12
|
+
|
13
|
+
document.getElementById("submit-task-form").querySelectorAll("input[name], textarea[name]").forEach((element) => {
|
14
|
+
element.addEventListener("input", updatePreviousSession);
|
15
|
+
element.addEventListener("keyup", updatePreviousSession);
|
16
|
+
});
|
17
|
+
|
18
|
+
document.getElementById("submit-task-form").querySelectorAll("select[name]").forEach((element) => {
|
19
|
+
element.addEventListener("change", updatePreviousSession);
|
20
|
+
});
|
21
|
+
|
@@ -0,0 +1,29 @@
|
|
1
|
+
import json
|
2
|
+
from typing import Annotated, Literal
|
3
|
+
|
4
|
+
import requests
|
5
|
+
|
6
|
+
|
7
|
+
def get_current_location() -> (
|
8
|
+
Annotated[str, "JSON string representing latitude and longitude"]
|
9
|
+
): # noqa
|
10
|
+
"""Get the user's current location."""
|
11
|
+
return json.dumps(requests.get("http://ip-api.com/json?fields=lat,lon").json())
|
12
|
+
|
13
|
+
|
14
|
+
def get_current_weather(
|
15
|
+
latitude: float,
|
16
|
+
longitude: float,
|
17
|
+
temperature_unit: Literal["celsius", "fahrenheit"],
|
18
|
+
) -> str:
|
19
|
+
"""Get the current weather in a given location."""
|
20
|
+
resp = requests.get(
|
21
|
+
"https://api.open-meteo.com/v1/forecast",
|
22
|
+
params={
|
23
|
+
"latitude": latitude,
|
24
|
+
"longitude": longitude,
|
25
|
+
"temperature_unit": temperature_unit,
|
26
|
+
"current_weather": True,
|
27
|
+
},
|
28
|
+
)
|
29
|
+
return json.dumps(resp.json())
|
zrb/builtin/todo.py
CHANGED
@@ -294,6 +294,7 @@ def _get_default_stop_work_time_str() -> str:
|
|
294
294
|
description="Todo.txt content",
|
295
295
|
prompt="Todo.txt content (will override existing)",
|
296
296
|
default_str=lambda _: _get_todo_txt_content(),
|
297
|
+
allow_positional_parsing=False,
|
297
298
|
),
|
298
299
|
],
|
299
300
|
description="📝 Edit todo",
|
zrb/config.py
CHANGED
@@ -78,25 +78,23 @@ WEB_AUTH_REFRESH_TOKEN_EXPIRE_MINUTES = int(
|
|
78
78
|
)
|
79
79
|
LLM_MODEL = os.getenv("ZRB_LLM_MODEL", "ollama_chat/llama3.1")
|
80
80
|
|
81
|
-
_DEFAULT_PROMPT =
|
82
|
-
You are a
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
If unsure or lacking current data, inform the user and suggest verification.
|
91
|
-
Accuracy always takes precedence over completeness.
|
92
|
-
""".strip()
|
81
|
+
_DEFAULT_PROMPT = (
|
82
|
+
"You are a helpful AI assistant capable of using various tools to answer user queries. When solving a problem:\n"
|
83
|
+
"1. Carefully analyze the user's request and identify what information is needed to provide a complete answer.\n"
|
84
|
+
"2. Determine which available tools can help you gather the necessary information.\n"
|
85
|
+
"3. Call tools strategically and in a logical sequence to collect required data.\n"
|
86
|
+
"4. If a tool provides incomplete information, intelligently decide which additional tool or approach to use.\n"
|
87
|
+
"5. Always aim to provide the most accurate and helpful response possible."
|
88
|
+
)
|
93
89
|
LLM_SYSTEM_PROMPT = os.getenv("ZRB_LLM_SYSTEM_PROMPT", _DEFAULT_PROMPT)
|
90
|
+
LLM_HISTORY_DIR = os.getenv(
|
91
|
+
"ZRB_LLM_HISTORY_DIR", os.path.expanduser(os.path.join("~", ".zrb-llm-history"))
|
92
|
+
)
|
94
93
|
LLM_HISTORY_FILE = os.getenv(
|
95
|
-
"ZRB_LLM_HISTORY_FILE",
|
96
|
-
os.path.expanduser(os.path.join("~", ".zrb-llm-history.json")),
|
94
|
+
"ZRB_LLM_HISTORY_FILE", os.path.join(LLM_HISTORY_DIR, "history.json")
|
97
95
|
)
|
98
96
|
LLM_ALLOW_ACCESS_SHELL = to_boolean(os.getenv("ZRB_LLM_ACCESS_FILE", "1"))
|
99
|
-
|
97
|
+
LLM_ALLOW_ACCESS_INTERNET = to_boolean(os.getenv("ZRB_LLM_ACCESS_INTERNET", "1"))
|
100
98
|
RAG_EMBEDDING_MODEL = os.getenv("ZRB_RAG_EMBEDDING_MODEL", "ollama/nomic-embed-text")
|
101
99
|
RAG_CHUNK_SIZE = int(os.getenv("ZRB_RAG_CHUNK_SIZE", "1024"))
|
102
100
|
RAG_OVERLAP = int(os.getenv("ZRB_RAG_OVERLAP", "128"))
|
zrb/input/any_input.py
CHANGED
zrb/input/base_input.py
CHANGED
@@ -16,6 +16,7 @@ class BaseInput(AnyInput):
|
|
16
16
|
default_str: StrAttr = "",
|
17
17
|
auto_render: bool = True,
|
18
18
|
allow_empty: bool = False,
|
19
|
+
allow_positional_parsing: bool = True,
|
19
20
|
):
|
20
21
|
self._name = name
|
21
22
|
self._description = description
|
@@ -23,6 +24,7 @@ class BaseInput(AnyInput):
|
|
23
24
|
self._default_str = default_str
|
24
25
|
self._auto_render = auto_render
|
25
26
|
self._allow_empty = allow_empty
|
27
|
+
self._allow_positional_parsing = allow_positional_parsing
|
26
28
|
|
27
29
|
def __repr__(self):
|
28
30
|
return f"<{self.__class__.__name__} name={self._name}>"
|
@@ -39,6 +41,10 @@ class BaseInput(AnyInput):
|
|
39
41
|
def prompt_message(self) -> str:
|
40
42
|
return self._prompt if self._prompt is not None else self.name
|
41
43
|
|
44
|
+
@property
|
45
|
+
def allow_positional_parsing(self) -> bool:
|
46
|
+
return self._allow_positional_parsing
|
47
|
+
|
42
48
|
def to_html(self, ctx: AnySharedContext) -> str:
|
43
49
|
name = self.name
|
44
50
|
description = self.description
|
zrb/input/bool_input.py
CHANGED
@@ -13,6 +13,7 @@ class BoolInput(BaseInput):
|
|
13
13
|
default_str: StrAttr = "False",
|
14
14
|
auto_render: bool = True,
|
15
15
|
allow_empty: bool = False,
|
16
|
+
allow_positional_parsing: bool = True,
|
16
17
|
):
|
17
18
|
super().__init__(
|
18
19
|
name=name,
|
@@ -21,6 +22,7 @@ class BoolInput(BaseInput):
|
|
21
22
|
default_str=default_str,
|
22
23
|
auto_render=auto_render,
|
23
24
|
allow_empty=allow_empty,
|
25
|
+
allow_positional_parsing=allow_positional_parsing,
|
24
26
|
)
|
25
27
|
|
26
28
|
def to_html(self, ctx: AnySharedContext) -> str:
|
zrb/input/float_input.py
CHANGED
@@ -12,6 +12,7 @@ class FloatInput(BaseInput):
|
|
12
12
|
default_str: StrAttr = "0.0",
|
13
13
|
auto_render: bool = True,
|
14
14
|
allow_empty: bool = False,
|
15
|
+
allow_positional_parsing: bool = True,
|
15
16
|
):
|
16
17
|
super().__init__(
|
17
18
|
name=name,
|
@@ -20,6 +21,7 @@ class FloatInput(BaseInput):
|
|
20
21
|
default_str=default_str,
|
21
22
|
auto_render=auto_render,
|
22
23
|
allow_empty=allow_empty,
|
24
|
+
allow_positional_parsing=allow_positional_parsing,
|
23
25
|
)
|
24
26
|
|
25
27
|
def to_html(self, ctx: AnySharedContext) -> str:
|
zrb/input/int_input.py
CHANGED
@@ -12,6 +12,7 @@ class IntInput(BaseInput):
|
|
12
12
|
default_str: StrAttr = "0",
|
13
13
|
auto_render: bool = True,
|
14
14
|
allow_empty: bool = False,
|
15
|
+
allow_positional_parsing: bool = True,
|
15
16
|
):
|
16
17
|
super().__init__(
|
17
18
|
name=name,
|
@@ -20,6 +21,7 @@ class IntInput(BaseInput):
|
|
20
21
|
default_str=default_str,
|
21
22
|
auto_render=auto_render,
|
22
23
|
allow_empty=allow_empty,
|
24
|
+
allow_positional_parsing=allow_positional_parsing,
|
23
25
|
)
|
24
26
|
|
25
27
|
def to_html(self, ctx: AnySharedContext) -> str:
|
zrb/input/option_input.py
CHANGED
@@ -14,6 +14,7 @@ class OptionInput(BaseInput):
|
|
14
14
|
default_str: StrAttr = "",
|
15
15
|
auto_render: bool = True,
|
16
16
|
allow_empty: bool = False,
|
17
|
+
allow_positional_parsing: bool = True,
|
17
18
|
):
|
18
19
|
super().__init__(
|
19
20
|
name=name,
|
@@ -22,6 +23,7 @@ class OptionInput(BaseInput):
|
|
22
23
|
default_str=default_str,
|
23
24
|
auto_render=auto_render,
|
24
25
|
allow_empty=allow_empty,
|
26
|
+
allow_positional_parsing=allow_positional_parsing,
|
25
27
|
)
|
26
28
|
self._options = options
|
27
29
|
|
zrb/input/password_input.py
CHANGED
@@ -14,6 +14,7 @@ class PasswordInput(BaseInput):
|
|
14
14
|
default_str: str | Callable[[AnySharedContext], str] = "",
|
15
15
|
auto_render: bool = True,
|
16
16
|
allow_empty: bool = False,
|
17
|
+
allow_positional_parsing: bool = True,
|
17
18
|
):
|
18
19
|
super().__init__(
|
19
20
|
name=name,
|
@@ -22,6 +23,7 @@ class PasswordInput(BaseInput):
|
|
22
23
|
default_str=default_str,
|
23
24
|
auto_render=auto_render,
|
24
25
|
allow_empty=allow_empty,
|
26
|
+
allow_positional_parsing=allow_positional_parsing,
|
25
27
|
)
|
26
28
|
self._is_secret = True
|
27
29
|
|
zrb/input/text_input.py
CHANGED
@@ -18,6 +18,7 @@ class TextInput(BaseInput):
|
|
18
18
|
default_str: str | Callable[[AnySharedContext], str] = "",
|
19
19
|
auto_render: bool = True,
|
20
20
|
allow_empty: bool = False,
|
21
|
+
allow_positional_parsing: bool = True,
|
21
22
|
editor: str = DEFAULT_EDITOR,
|
22
23
|
extension: str = ".txt",
|
23
24
|
comment_start: str | None = None,
|
@@ -30,6 +31,7 @@ class TextInput(BaseInput):
|
|
30
31
|
default_str=default_str,
|
31
32
|
auto_render=auto_render,
|
32
33
|
allow_empty=allow_empty,
|
34
|
+
allow_positional_parsing=allow_positional_parsing,
|
33
35
|
)
|
34
36
|
self._editor = editor
|
35
37
|
self._extension = extension
|
zrb/runner/common_util.py
CHANGED
@@ -15,7 +15,7 @@ def get_run_kwargs(
|
|
15
15
|
if task_input.name in str_kwargs:
|
16
16
|
# Update shared context for next input default value
|
17
17
|
task_input.update_shared_context(shared_ctx, str_kwargs[task_input.name])
|
18
|
-
elif arg_index < len(args):
|
18
|
+
elif arg_index < len(args) and task_input.allow_positional_parsing:
|
19
19
|
run_kwargs[task_input.name] = args[arg_index]
|
20
20
|
# Update shared context for next input default value
|
21
21
|
task_input.update_shared_context(shared_ctx, run_kwargs[task_input.name])
|
@@ -4,18 +4,22 @@ const CURRENT_SESSION = {
|
|
4
4
|
const logTextarea = document.getElementById("log-textarea");
|
5
5
|
const submitTaskForm = document.getElementById("submit-task-form");
|
6
6
|
let isFinished = false;
|
7
|
+
let isInputUpdated = false;
|
7
8
|
let errorCount = 0;
|
8
9
|
while (!isFinished) {
|
9
10
|
try {
|
10
11
|
const data = await this.getCurrentSession();
|
11
12
|
// update inputs
|
12
|
-
|
13
|
-
|
14
|
-
const
|
15
|
-
|
16
|
-
|
17
|
-
input
|
13
|
+
if (!isInputUpdated) {
|
14
|
+
const dataInputs = data.input;
|
15
|
+
for (const inputName in dataInputs) {
|
16
|
+
const inputValue = dataInputs[inputName];
|
17
|
+
const input = submitTaskForm.querySelector(`[name="${inputName}"]`);
|
18
|
+
if (input) {
|
19
|
+
input.value = inputValue;
|
20
|
+
}
|
18
21
|
}
|
22
|
+
isInputUpdated = true;
|
19
23
|
}
|
20
24
|
resultLineCount = data.final_result.split("\n").length;
|
21
25
|
resultTextarea.rows = resultLineCount <= 5 ? resultLineCount : 5;
|
@@ -51,7 +55,6 @@ const CURRENT_SESSION = {
|
|
51
55
|
"Content-Type": "application/json"
|
52
56
|
},
|
53
57
|
});
|
54
|
-
console.log("RESPONSE", response);
|
55
58
|
return await response.json();
|
56
59
|
} catch (error) {
|
57
60
|
console.error("Error:", error);
|
@@ -20,9 +20,9 @@ window.addEventListener("load", async function () {
|
|
20
20
|
|
21
21
|
|
22
22
|
const submitTaskForm = document.getElementById("submit-task-form");
|
23
|
-
|
23
|
+
async function handleInputUpdate(event) {
|
24
24
|
const currentInput = event.target;
|
25
|
-
const inputs = Array.from(submitTaskForm.querySelectorAll("input[name], textarea[name]"));
|
25
|
+
const inputs = Array.from(submitTaskForm.querySelectorAll("input[name], textarea[name], select[name]"));
|
26
26
|
const inputMap = {};
|
27
27
|
const fixedInputNames = [];
|
28
28
|
for (const input of inputs) {
|
@@ -56,6 +56,10 @@ submitTaskForm.addEventListener("change", async function(event) {
|
|
56
56
|
if (input === currentInput) {
|
57
57
|
return;
|
58
58
|
}
|
59
|
+
if (value === "") {
|
60
|
+
return;
|
61
|
+
}
|
62
|
+
console.log(input, data);
|
59
63
|
input.value = value;
|
60
64
|
});
|
61
65
|
} else {
|
@@ -64,6 +68,14 @@ submitTaskForm.addEventListener("change", async function(event) {
|
|
64
68
|
} catch (error) {
|
65
69
|
console.error("Error during fetch:", error);
|
66
70
|
}
|
71
|
+
}
|
72
|
+
|
73
|
+
submitTaskForm.querySelectorAll("input[name], textarea[name]").forEach((element) => {
|
74
|
+
element.addEventListener("input", handleInputUpdate);
|
75
|
+
element.addEventListener("keyup", handleInputUpdate);
|
76
|
+
});
|
77
|
+
submitTaskForm.querySelectorAll("select[name]").forEach((element) => {
|
78
|
+
element.addEventListener("change", handleInputUpdate);
|
67
79
|
});
|
68
80
|
|
69
81
|
|
zrb/task/llm_task.py
CHANGED
@@ -3,7 +3,9 @@ import os
|
|
3
3
|
from collections.abc import Callable
|
4
4
|
from typing import Any
|
5
5
|
|
6
|
-
from
|
6
|
+
from pydantic_ai import Agent, Tool
|
7
|
+
from pydantic_ai.messages import ModelMessagesTypeAdapter
|
8
|
+
from pydantic_ai.settings import ModelSettings
|
7
9
|
|
8
10
|
from zrb.attr.type import StrAttr
|
9
11
|
from zrb.config import LLM_MODEL, LLM_SYSTEM_PROMPT
|
@@ -16,25 +18,10 @@ from zrb.task.base_task import BaseTask
|
|
16
18
|
from zrb.util.attr import get_str_attr
|
17
19
|
from zrb.util.cli.style import stylize_faint
|
18
20
|
from zrb.util.file import read_file, write_file
|
19
|
-
from zrb.util.llm.tool import callable_to_tool_schema
|
20
21
|
from zrb.util.run import run_async
|
21
22
|
|
22
23
|
ListOfDict = list[dict[str, Any]]
|
23
|
-
|
24
|
-
|
25
|
-
class AdditionalTool(BaseModel):
|
26
|
-
fn: Callable
|
27
|
-
name: str | None
|
28
|
-
|
29
|
-
|
30
|
-
def scratchpad(thought: str) -> str:
|
31
|
-
"""Use this tool to note your thought and planning"""
|
32
|
-
return thought
|
33
|
-
|
34
|
-
|
35
|
-
def end_conversation(final_answer: str) -> str:
|
36
|
-
"""End conversation with a final answer containing all necessary information"""
|
37
|
-
return final_answer
|
24
|
+
ToolOrCallable = Tool | Callable
|
38
25
|
|
39
26
|
|
40
27
|
class LLMTask(BaseTask):
|
@@ -48,17 +35,28 @@ class LLMTask(BaseTask):
|
|
48
35
|
input: list[AnyInput | None] | AnyInput | None = None,
|
49
36
|
env: list[AnyEnv | None] | AnyEnv | None = None,
|
50
37
|
model: StrAttr | None = LLM_MODEL,
|
38
|
+
model_settings: (
|
39
|
+
ModelSettings | Callable[[AnySharedContext], ModelSettings] | None
|
40
|
+
) = None,
|
51
41
|
render_model: bool = True,
|
42
|
+
agent: Agent | Callable[[AnySharedContext], Agent] | None = None,
|
52
43
|
system_prompt: StrAttr | None = LLM_SYSTEM_PROMPT,
|
53
44
|
render_system_prompt: bool = True,
|
54
45
|
message: StrAttr | None = None,
|
55
|
-
tools:
|
56
|
-
|
57
|
-
|
46
|
+
tools: (
|
47
|
+
list[ToolOrCallable] | Callable[[AnySharedContext], list[ToolOrCallable]]
|
48
|
+
) = [],
|
49
|
+
conversation_history: (
|
50
|
+
ListOfDict | Callable[[AnySharedContext], ListOfDict]
|
51
|
+
) = [],
|
52
|
+
conversation_history_reader: (
|
53
|
+
Callable[[AnySharedContext], ListOfDict] | None
|
54
|
+
) = None,
|
55
|
+
conversation_history_writer: (
|
56
|
+
Callable[[AnySharedContext, ListOfDict], None] | None
|
57
|
+
) = None,
|
58
|
+
conversation_history_file: StrAttr | None = None,
|
58
59
|
render_history_file: bool = True,
|
59
|
-
model_kwargs: (
|
60
|
-
dict[str, Any] | Callable[[AnySharedContext], dict[str, Any]]
|
61
|
-
) = {},
|
62
60
|
execute_condition: bool | str | Callable[[AnySharedContext], bool] = True,
|
63
61
|
retries: int = 2,
|
64
62
|
retry_period: float = 0,
|
@@ -68,6 +66,7 @@ class LLMTask(BaseTask):
|
|
68
66
|
readiness_failure_threshold: int = 1,
|
69
67
|
readiness_timeout: int = 60,
|
70
68
|
monitor_readiness: bool = False,
|
69
|
+
max_call_iteration: int = 20,
|
71
70
|
upstream: list[AnyTask] | AnyTask | None = None,
|
72
71
|
fallback: list[AnyTask] | AnyTask | None = None,
|
73
72
|
successor: list[AnyTask] | AnyTask | None = None,
|
@@ -94,166 +93,75 @@ class LLMTask(BaseTask):
|
|
94
93
|
successor=successor,
|
95
94
|
)
|
96
95
|
self._model = model
|
96
|
+
self._model_settings = (model_settings,)
|
97
|
+
self._agent = agent
|
97
98
|
self._render_model = render_model
|
98
|
-
self._model_kwargs = model_kwargs
|
99
99
|
self._system_prompt = system_prompt
|
100
100
|
self._render_system_prompt = render_system_prompt
|
101
101
|
self._message = message
|
102
102
|
self._tools = tools
|
103
|
-
self.
|
104
|
-
self.
|
103
|
+
self._additional_tools: list[ToolOrCallable] = []
|
104
|
+
self._conversation_history = conversation_history
|
105
|
+
self._conversation_history_reader = conversation_history_reader
|
106
|
+
self._conversation_history_writer = conversation_history_writer
|
107
|
+
self._conversation_history_file = conversation_history_file
|
105
108
|
self._render_history_file = render_history_file
|
109
|
+
self._max_call_iteration = max_call_iteration
|
106
110
|
|
107
|
-
def add_tool(self, tool:
|
108
|
-
self.
|
111
|
+
def add_tool(self, tool: ToolOrCallable):
|
112
|
+
self._additional_tools.append(tool)
|
109
113
|
|
110
114
|
async def _exec_action(self, ctx: AnyContext) -> Any:
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
try:
|
118
|
-
is_function_call_supported = supports_function_calling(model=model)
|
119
|
-
except Exception:
|
120
|
-
is_function_call_supported = False
|
121
|
-
litellm.add_function_to_prompt = True
|
122
|
-
if not is_function_call_supported:
|
123
|
-
ctx.log_warning(f"Model {model} doesn't support function call")
|
124
|
-
available_tools = self._get_available_tools(
|
125
|
-
ctx, include_end_conversation=not is_function_call_supported
|
115
|
+
history = await self._read_conversation_history(ctx)
|
116
|
+
user_prompt = self._get_message(ctx)
|
117
|
+
agent = self._get_agent(ctx)
|
118
|
+
result = await agent.run(
|
119
|
+
user_prompt=user_prompt,
|
120
|
+
message_history=ModelMessagesTypeAdapter.validate_python(history),
|
126
121
|
)
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
while True:
|
136
|
-
llm_response = await self._get_llm_response(
|
137
|
-
model, system_prompt, conversations, model_kwargs
|
138
|
-
)
|
139
|
-
llm_response_dict = llm_response.to_dict()
|
140
|
-
ctx.print(stylize_faint(f"{llm_response_dict}"))
|
141
|
-
conversations.append(llm_response_dict)
|
142
|
-
ctx.log_debug("RESPONSE MESSAGE", llm_response)
|
143
|
-
if is_function_call_supported:
|
144
|
-
if not llm_response.tool_calls:
|
145
|
-
# No tool call, end conversation
|
146
|
-
self._save_conversation(history_file, conversations)
|
147
|
-
return llm_response.content
|
148
|
-
await self._handle_tool_calls(
|
149
|
-
ctx, available_tools, conversations, llm_response
|
150
|
-
)
|
151
|
-
if not is_function_call_supported:
|
152
|
-
try:
|
153
|
-
json_payload = json.loads(llm_response.content)
|
154
|
-
function_name = _get_fallback_function_name(json_payload)
|
155
|
-
function_kwargs = _get_fallback_function_kwargs(json_payload)
|
156
|
-
tool_execution_message = (
|
157
|
-
await self._create_fallback_tool_exec_message(
|
158
|
-
available_tools, function_name, function_kwargs
|
159
|
-
)
|
160
|
-
)
|
161
|
-
ctx.print(stylize_faint(f"{tool_execution_message}"))
|
162
|
-
conversations.append(tool_execution_message)
|
163
|
-
if function_name == "end_conversation":
|
164
|
-
self._save_conversation(history_file, conversations)
|
165
|
-
return function_kwargs.get("final_answer", "")
|
166
|
-
except Exception as e:
|
167
|
-
ctx.log_error(e)
|
168
|
-
tool_execution_message = self._create_exec_scratchpad_message(
|
169
|
-
f"{e}"
|
170
|
-
)
|
171
|
-
conversations.append(tool_execution_message)
|
172
|
-
|
173
|
-
async def _handle_tool_calls(
|
174
|
-
self,
|
175
|
-
ctx: AnyContext,
|
176
|
-
available_tools: dict[str, Callable],
|
177
|
-
conversations: list[dict[str, Any]],
|
178
|
-
llm_response: Any,
|
122
|
+
new_history = json.loads(result.all_messages_json())
|
123
|
+
for history in new_history:
|
124
|
+
ctx.print(stylize_faint(json.dumps(history)))
|
125
|
+
await self._write_conversation_history(ctx, new_history)
|
126
|
+
return result.data
|
127
|
+
|
128
|
+
async def _write_conversation_history(
|
129
|
+
self, ctx: AnyContext, conversations: list[Any]
|
179
130
|
):
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
available_tools, tool_call
|
184
|
-
)
|
185
|
-
ctx.print(stylize_faint(f"{tool_execution_message}"))
|
186
|
-
conversations.append(tool_execution_message)
|
187
|
-
|
188
|
-
def _save_conversation(self, history_file: str, conversations: list[Any]):
|
131
|
+
if self._conversation_history_writer is not None:
|
132
|
+
await run_async(self._conversation_history_writer(ctx, conversations))
|
133
|
+
history_file = self._get_history_file(ctx)
|
189
134
|
if history_file != "":
|
190
135
|
write_file(history_file, json.dumps(conversations, indent=2))
|
191
136
|
|
192
|
-
|
193
|
-
self,
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
137
|
+
def _get_model_settings(self, ctx: AnyContext) -> ModelSettings | None:
|
138
|
+
if isinstance(self._model_settings, ModelSettings):
|
139
|
+
return self._model_settings
|
140
|
+
if callable(self._model_settings):
|
141
|
+
return self._model_settings(ctx)
|
142
|
+
return None
|
143
|
+
|
144
|
+
def _get_agent(self, ctx: AnyContext) -> Agent:
|
145
|
+
if isinstance(self._agent, Agent):
|
146
|
+
return self._agent
|
147
|
+
if callable(self._agent):
|
148
|
+
return self._agent(ctx)
|
149
|
+
tools_or_callables = list(
|
150
|
+
self._tools(ctx) if callable(self._tools) else self._tools
|
205
151
|
)
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
return
|
214
|
-
|
215
|
-
|
216
|
-
|
217
|
-
"content": await self._get_exec_tool_result(
|
218
|
-
available_tools, function_name, function_kwargs
|
219
|
-
),
|
220
|
-
}
|
221
|
-
|
222
|
-
async def _create_fallback_tool_exec_message(
|
223
|
-
self,
|
224
|
-
available_tools: dict[str, Callable],
|
225
|
-
function_name: str,
|
226
|
-
function_kwargs: dict[str, Any],
|
227
|
-
) -> dict[str, Any]:
|
228
|
-
result = await self._get_exec_tool_result(
|
229
|
-
available_tools, function_name, function_kwargs
|
230
|
-
)
|
231
|
-
return self._create_exec_scratchpad_message(
|
232
|
-
f"Result of {function_name} call: {result}"
|
152
|
+
tools_or_callables.extend(self._additional_tools)
|
153
|
+
tools = [
|
154
|
+
tool if isinstance(tool, Tool) else Tool(tool, takes_ctx=False)
|
155
|
+
for tool in tools_or_callables
|
156
|
+
]
|
157
|
+
for tool in tools:
|
158
|
+
print("tool", tool)
|
159
|
+
return Agent(
|
160
|
+
self._get_model(ctx),
|
161
|
+
system_prompt=self._get_system_prompt(ctx),
|
162
|
+
tools=tools,
|
233
163
|
)
|
234
164
|
|
235
|
-
def _create_exec_scratchpad_message(self, message: str) -> dict[str, Any]:
|
236
|
-
return {
|
237
|
-
"role": "assistant",
|
238
|
-
"content": json.dumps(
|
239
|
-
{"name": "scratchpad", "arguments": {"thought": message}}
|
240
|
-
),
|
241
|
-
}
|
242
|
-
|
243
|
-
async def _get_exec_tool_result(
|
244
|
-
self,
|
245
|
-
available_tools: dict[str, Callable],
|
246
|
-
function_name: str,
|
247
|
-
function_kwargs: dict[str, Any],
|
248
|
-
) -> str:
|
249
|
-
if function_name not in available_tools:
|
250
|
-
return f"[ERROR] Invalid tool: {function_name}"
|
251
|
-
function_to_call = available_tools[function_name]
|
252
|
-
try:
|
253
|
-
return await run_async(function_to_call(**function_kwargs))
|
254
|
-
except Exception as e:
|
255
|
-
return f"[ERROR] {e}"
|
256
|
-
|
257
165
|
def _get_model(self, ctx: AnyContext) -> str:
|
258
166
|
return get_str_attr(
|
259
167
|
ctx, self._model, "ollama_chat/llama3.1", auto_render=self._render_model
|
@@ -270,62 +178,24 @@ class LLMTask(BaseTask):
|
|
270
178
|
def _get_message(self, ctx: AnyContext) -> str:
|
271
179
|
return get_str_attr(ctx, self._message, "How are you?", auto_render=True)
|
272
180
|
|
273
|
-
def
|
274
|
-
self
|
275
|
-
|
276
|
-
|
277
|
-
|
278
|
-
model_kwargs = self._model_kwargs(ctx)
|
279
|
-
else:
|
280
|
-
model_kwargs = self._model_kwargs
|
281
|
-
model_kwargs["tools"] = [
|
282
|
-
callable_to_tool_schema(tool) for tool in available_tools.values()
|
283
|
-
]
|
284
|
-
return model_kwargs
|
285
|
-
|
286
|
-
def _get_available_tools(
|
287
|
-
self, ctx: AnyContext, include_end_conversation: bool
|
288
|
-
) -> dict[str, Callable]:
|
289
|
-
tools = {"scratchpad": scratchpad}
|
290
|
-
if include_end_conversation:
|
291
|
-
tools["end_conversation"] = end_conversation
|
292
|
-
tool_list = self._tools(ctx) if callable(self._tools) else self._tools
|
293
|
-
for tool in tool_list:
|
294
|
-
tools[tool.__name__] = tool
|
295
|
-
return tools
|
296
|
-
|
297
|
-
def _get_history(self, ctx: AnyContext) -> ListOfDict:
|
298
|
-
if callable(self._history):
|
299
|
-
return self._history(ctx)
|
181
|
+
async def _read_conversation_history(self, ctx: AnyContext) -> ListOfDict:
|
182
|
+
if self._conversation_history_reader is not None:
|
183
|
+
return await run_async(self._conversation_history_reader(ctx))
|
184
|
+
if callable(self._conversation_history):
|
185
|
+
return self._conversation_history(ctx)
|
300
186
|
history_file = self._get_history_file(ctx)
|
301
187
|
if (
|
302
|
-
len(self.
|
188
|
+
len(self._conversation_history) == 0
|
303
189
|
and history_file != ""
|
304
190
|
and os.path.isfile(history_file)
|
305
191
|
):
|
306
192
|
return json.loads(read_file(history_file))
|
307
|
-
return self.
|
193
|
+
return self._conversation_history
|
308
194
|
|
309
195
|
def _get_history_file(self, ctx: AnyContext) -> str:
|
310
196
|
return get_str_attr(
|
311
|
-
ctx,
|
197
|
+
ctx,
|
198
|
+
self._conversation_history_file,
|
199
|
+
"",
|
200
|
+
auto_render=self._render_history_file,
|
312
201
|
)
|
313
|
-
|
314
|
-
|
315
|
-
def _get_fallback_function_name(json_payload: dict[str, Any]) -> str:
|
316
|
-
for key in ("name",):
|
317
|
-
if key in json_payload:
|
318
|
-
return json_payload[key]
|
319
|
-
raise ValueError("Function name not provided")
|
320
|
-
|
321
|
-
|
322
|
-
def _get_fallback_function_kwargs(json_payload: dict[str, Any]) -> str:
|
323
|
-
for key in (
|
324
|
-
"arguments",
|
325
|
-
"args",
|
326
|
-
"parameters",
|
327
|
-
"params",
|
328
|
-
):
|
329
|
-
if key in json_payload:
|
330
|
-
return json_payload[key]
|
331
|
-
raise ValueError("Function arguments not provided")
|
zrb/util/llm/tool.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
import inspect
|
2
2
|
from collections.abc import Callable
|
3
|
-
from typing import Any, get_type_hints
|
3
|
+
from typing import Annotated, Any, Literal, get_type_hints
|
4
4
|
|
5
5
|
|
6
6
|
def callable_to_tool_schema(callable_obj: Callable) -> dict[str, Any]:
|
@@ -21,10 +21,14 @@ def callable_to_tool_schema(callable_obj: Callable) -> dict[str, Any]:
|
|
21
21
|
# Build parameter schema
|
22
22
|
param_schema = {"type": "object", "properties": {}, "required": []}
|
23
23
|
for param_name, param in sig.parameters.items():
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
24
|
+
# Get the type hint or default to str
|
25
|
+
param_type = hints.get(param_name, str)
|
26
|
+
|
27
|
+
# Handle annotated types (e.g., Annotated[str, "description"])
|
28
|
+
json_type, param_metadata = _process_type_annotation(param_type)
|
29
|
+
param_schema["properties"][param_name] = param_metadata
|
30
|
+
|
31
|
+
# Mark required parameters
|
28
32
|
if param.default is inspect.Parameter.empty:
|
29
33
|
param_schema["required"].append(param_name)
|
30
34
|
return {
|
@@ -37,6 +41,30 @@ def callable_to_tool_schema(callable_obj: Callable) -> dict[str, Any]:
|
|
37
41
|
}
|
38
42
|
|
39
43
|
|
44
|
+
def _process_type_annotation(py_type: Any) -> tuple[str, dict]:
|
45
|
+
"""
|
46
|
+
Process type annotations and return the JSON Schema type and metadata.
|
47
|
+
|
48
|
+
:param py_type: The type annotation.
|
49
|
+
:return: A tuple of (JSON type, parameter metadata).
|
50
|
+
"""
|
51
|
+
if hasattr(py_type, "__origin__") and py_type.__origin__ is Literal:
|
52
|
+
# Handle Literal (enum)
|
53
|
+
enum_values = list(py_type.__args__)
|
54
|
+
return "string", {"type": "string", "enum": enum_values}
|
55
|
+
|
56
|
+
if hasattr(py_type, "__origin__") and py_type.__origin__ is Annotated:
|
57
|
+
# Handle Annotated types
|
58
|
+
base_type = py_type.__args__[0]
|
59
|
+
description = py_type.__args__[1]
|
60
|
+
json_type = _python_type_to_json_type(base_type)
|
61
|
+
return json_type, {"type": json_type, "description": description}
|
62
|
+
|
63
|
+
# Fallback to basic type conversion
|
64
|
+
json_type = _python_type_to_json_type(py_type)
|
65
|
+
return json_type, {"type": json_type}
|
66
|
+
|
67
|
+
|
40
68
|
def _python_type_to_json_type(py_type):
|
41
69
|
"""
|
42
70
|
Map Python types to JSON Schema types.
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: zrb
|
3
|
-
Version: 1.0.
|
3
|
+
Version: 1.0.0b5
|
4
4
|
Summary: Your Automation Powerhouse
|
5
5
|
Home-page: https://github.com/state-alchemists/zrb
|
6
6
|
License: AGPL-3.0-or-later
|
@@ -21,8 +21,8 @@ Requires-Dist: chromadb (>=0.5.20,<0.6.0) ; extra == "rag"
|
|
21
21
|
Requires-Dist: fastapi[standard] (>=0.115.6,<0.116.0)
|
22
22
|
Requires-Dist: isort (>=5.13.2,<5.14.0)
|
23
23
|
Requires-Dist: libcst (>=1.5.0,<2.0.0)
|
24
|
-
Requires-Dist: litellm (>=1.52.12,<2.0.0)
|
25
24
|
Requires-Dist: pdfplumber (>=0.11.4,<0.12.0) ; extra == "rag"
|
25
|
+
Requires-Dist: pydantic-ai (>=0.0.19,<0.0.20)
|
26
26
|
Requires-Dist: python-dotenv (>=1.0.1,<2.0.0)
|
27
27
|
Requires-Dist: python-jose[cryptography] (>=3.3.0,<4.0.0)
|
28
28
|
Requires-Dist: requests (>=2.32.3,<3.0.0)
|
@@ -7,7 +7,9 @@ zrb/builtin/base64.py,sha256=1YnSwASp7OEAvQcsnHZGpJEvYoI1Z2zTIJ1bCDHfcPQ,921
|
|
7
7
|
zrb/builtin/git.py,sha256=xHzg0srhp1uOSXWvwA--Fo8idkt0G9010iJ8uIndzg4,5463
|
8
8
|
zrb/builtin/git_subtree.py,sha256=GwI8befmvXEoX1xyZ4jkeG8nsyCkuRG1lzPiGss3yqw,3493
|
9
9
|
zrb/builtin/group.py,sha256=-phJfVpTX3_gUwS1u8-RbZUHe-X41kxDBSmrVh4rq8E,1682
|
10
|
-
zrb/builtin/llm/llm_chat.py,sha256
|
10
|
+
zrb/builtin/llm/llm_chat.py,sha256=vUUchYKJuHr-N_HarpKRFsV0EdQDAFZzAfbK4C0vx88,4508
|
11
|
+
zrb/builtin/llm/previous-session.js,sha256=xMKZvJoAbrwiyHS0OoPrWuaKxWYLoyR5sguePIoCjTY,816
|
12
|
+
zrb/builtin/llm/tool/api.py,sha256=yQ3XV8O7Fx7hHssLSOcmiHDnevPhz9ktWi44HK7zTls,801
|
11
13
|
zrb/builtin/llm/tool/cli.py,sha256=to_IjkfrMGs6eLfG0cpVN9oyADWYsJQCtyluUhUdBww,253
|
12
14
|
zrb/builtin/llm/tool/rag.py,sha256=jJRLERW6824JeEzEQ_OqLMaaa3mjuNqsRcRWoL1wVx0,5192
|
13
15
|
zrb/builtin/llm/tool/web.py,sha256=N2HYuXbKPUpjVAq_UnQMbUrTIE8u0Ut3TeQadZ7_NJc,2217
|
@@ -162,14 +164,14 @@ zrb/builtin/shell/autocomplete/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5
|
|
162
164
|
zrb/builtin/shell/autocomplete/bash.py,sha256=-7YDVV7txgJH9mAYSYN0jmvUEeDIzWFvVNY-cY0myF8,1181
|
163
165
|
zrb/builtin/shell/autocomplete/subcmd.py,sha256=WZI6cGWJcn80zSyxOHG7sCMO3Ucix3mZf4xm_xyB_Y0,606
|
164
166
|
zrb/builtin/shell/autocomplete/zsh.py,sha256=9hlq0Wt3fhRz326mAQTypEd4_4lZdrbBx_3A-Ti3mvw,1022
|
165
|
-
zrb/builtin/todo.py,sha256=
|
167
|
+
zrb/builtin/todo.py,sha256=8swb5i9KWqaLfKKcKSQhu5K1QtW3RAYR1vFqsBjw-GY,10840
|
166
168
|
zrb/callback/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
167
169
|
zrb/callback/any_callback.py,sha256=Yhdv5UWHAZSVzj5K2JdxcVQx8x8VX8aZJEivj3NTfZc,247
|
168
170
|
zrb/callback/callback.py,sha256=hKefB_Jd1XGjPSLQdMKDsGLHPzEGO2dqrIArLl_EmD0,848
|
169
171
|
zrb/cmd/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
170
172
|
zrb/cmd/cmd_result.py,sha256=L8bQJzWCpcYexIxHBNsXj2pT3BtLmWex0iJSMkvimOA,597
|
171
173
|
zrb/cmd/cmd_val.py,sha256=7Doowyg6BK3ISSGBLt-PmlhzaEkBjWWm51cED6fAUOQ,1014
|
172
|
-
zrb/config.py,sha256=
|
174
|
+
zrb/config.py,sha256=vBBhSZ5xQV5S_20PQS54ShFgV-ko3Zbzxw23wDDJnbQ,4556
|
173
175
|
zrb/content_transformer/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
174
176
|
zrb/content_transformer/any_content_transformer.py,sha256=v8ZUbcix1GGeDQwB6OKX_1TjpY__ksxWVeqibwa_iZA,850
|
175
177
|
zrb/content_transformer/content_transformer.py,sha256=YU6Xr3G_IaCWKQGsf9z9YlCclbiwcJ7ytQv3wKpPIiI,2125
|
@@ -189,18 +191,18 @@ zrb/group/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
189
191
|
zrb/group/any_group.py,sha256=1rNcsi5eu_86JAx_6Jy46SK4BTeppcb89MORynJd-4o,1115
|
190
192
|
zrb/group/group.py,sha256=JFmWVEQ9PVy2WCf5pUG74iwL2xcGxXaAjT-NArAeloM,1861
|
191
193
|
zrb/input/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
192
|
-
zrb/input/any_input.py,sha256=
|
193
|
-
zrb/input/base_input.py,sha256=
|
194
|
-
zrb/input/bool_input.py,sha256=
|
195
|
-
zrb/input/float_input.py,sha256=
|
196
|
-
zrb/input/int_input.py,sha256=
|
197
|
-
zrb/input/option_input.py,sha256=
|
198
|
-
zrb/input/password_input.py,sha256=
|
194
|
+
zrb/input/any_input.py,sha256=tvls3uJBVPLW5Oawxof9qWCX-lH1h_RErxjYNu1bwtY,899
|
195
|
+
zrb/input/base_input.py,sha256=uHOZeU4OyO05e-kMXDo5MLC76ZWJXzhdhB6akaBzhtU,3383
|
196
|
+
zrb/input/bool_input.py,sha256=mqD1c7RT683Aui2IsdJUoNiSo7iU3iYrm53e2SjaOrQ,1493
|
197
|
+
zrb/input/float_input.py,sha256=8HhVWZsyqw1WaN3CysIgvGXkHfVsuLChUEggHODxOTk,1125
|
198
|
+
zrb/input/int_input.py,sha256=e5F049nwe_Iaiymuh41dZtBOY-DG1AhVP-8gF0IO0n4,1115
|
199
|
+
zrb/input/option_input.py,sha256=8K0fn5joYeiAA8GevckVrGghbrIcD9GPShDfhAU67cw,2049
|
200
|
+
zrb/input/password_input.py,sha256=6TW3J7K3ilcstv681PIXL_Mgd6pkNDTCMH8exlqDOiM,1421
|
199
201
|
zrb/input/str_input.py,sha256=NevZHX9rf1g8eMatPyy-kUX3DglrVAQpzvVpKAzf7bA,81
|
200
|
-
zrb/input/text_input.py,sha256=
|
202
|
+
zrb/input/text_input.py,sha256=cVmuNBTFl7oulwG6VVWI-QTECLSdP_0xwIaCZpkkaM8,3143
|
201
203
|
zrb/runner/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
202
204
|
zrb/runner/cli.py,sha256=G_ILZCFzpV-kRE3dm1kq6BorB51TLJ34Qmhgy5SIMlU,6734
|
203
|
-
zrb/runner/common_util.py,sha256=
|
205
|
+
zrb/runner/common_util.py,sha256=mjEBSmfdY2Sun2U5-8y8gGwF82OiRM8sgiYDOdW9NA4,1338
|
204
206
|
zrb/runner/web_app.py,sha256=Ji2AWeFpJu5guXmur7mAAbjMToyjgmPDdfYu8047FFI,2616
|
205
207
|
zrb/runner/web_config/config.py,sha256=0wR58KreAmawGGfamm0GLZY344HaXs7qfDgHLavBDwo,3125
|
206
208
|
zrb/runner/web_config/config_factory.py,sha256=GNByKviNhQF5qG2ypmC_mV2xglzWHLVQC0x2SQJjrbA,894
|
@@ -223,7 +225,7 @@ zrb/runner/web_route/node_page/group/view.html,sha256=wISun627ciFZcvGpxANG0pr1zg
|
|
223
225
|
zrb/runner/web_route/node_page/node_page_route.py,sha256=LYi60eZ5ZGgykTIeSQk5Hn9OYjh3ocYgBIAue7Bznvw,2268
|
224
226
|
zrb/runner/web_route/node_page/task/partial/input.html,sha256=X2jy0q7TLQGP853exZMed0lqPezL3gzn6mnhB5QKfkc,178
|
225
227
|
zrb/runner/web_route/node_page/task/show_task_page.py,sha256=0HIFEuy5DLOKqff4Wib5WaMe5Om0B4C7BH63pPIA-OU,2639
|
226
|
-
zrb/runner/web_route/node_page/task/view.html,sha256=
|
228
|
+
zrb/runner/web_route/node_page/task/view.html,sha256=T6kXNYKGhHsY5A74vQt_tj68WrULmPRaSuMqI1ygD1o,3605
|
227
229
|
zrb/runner/web_route/refresh_token_api_route.py,sha256=JOuzhQUtRA62w3l27mq-jXgpaV7Rbj20jzxpQacssio,1478
|
228
230
|
zrb/runner/web_route/static/refresh-token.template.js,sha256=v_nF7nU1AXp-KtsHNNzamhciEi7NCSTPEDT5hCxn29g,735
|
229
231
|
zrb/runner/web_route/static/resources/common.css,sha256=u5rGLsPx2943z324iQ2X81krM3z-kc-8e1SkBdYAvKU,157
|
@@ -232,8 +234,8 @@ zrb/runner/web_route/static/resources/login/event.js,sha256=1-NxaUwU-X7Tu2RAwVkz
|
|
232
234
|
zrb/runner/web_route/static/resources/logout/event.js,sha256=MfZxrTa2yL49Lbh7cCZDdqsIcf9e1q3W8-WjmZXV5pA,692
|
233
235
|
zrb/runner/web_route/static/resources/pico.min.css,sha256=_Esfkjs_U_igYn-tXBUaK3AEKb7d4l9DlmaOiw9bXfI,82214
|
234
236
|
zrb/runner/web_route/static/resources/session/common-util.js,sha256=t7_s5DXgMyZlT8L8LYZTkzOT6vWVeZvmCKjt-bflQY0,2117
|
235
|
-
zrb/runner/web_route/static/resources/session/current-session.js,sha256=
|
236
|
-
zrb/runner/web_route/static/resources/session/event.js,sha256=
|
237
|
+
zrb/runner/web_route/static/resources/session/current-session.js,sha256=GlRBLwItCwITqVR_hUQFr6W1myD9WRl8R_TTbrzCovw,6739
|
238
|
+
zrb/runner/web_route/static/resources/session/event.js,sha256=X5OlSHefK0SDB9VkFCRyBKE_Pb7mqM319mW9jRGoDOk,4716
|
237
239
|
zrb/runner/web_route/static/resources/session/past-session.js,sha256=RwGJYKSp75K8NZ-iZP58XppWgdzkiKFaiC5wgcMLxDo,5470
|
238
240
|
zrb/runner/web_route/static/static_route.py,sha256=7x069VfACZLkLykY0vLL5t13jIQPgkeEJtkpbfNQfLg,1540
|
239
241
|
zrb/runner/web_route/task_input_api_route.py,sha256=xkZ36vmHXMPj0ekp6ocUysO0QUgl1PLaSHt3YL_OfK8,1749
|
@@ -260,7 +262,7 @@ zrb/task/base_task.py,sha256=ImA0ReyB6neVUfY4nKLnL0h2EMGIJ9wvvNvIAN92-RE,21194
|
|
260
262
|
zrb/task/base_trigger.py,sha256=jC722rDvodaBLeNaFghkTyv1u0QXrK6BLZUUqcmBJ7Q,4581
|
261
263
|
zrb/task/cmd_task.py,sha256=JpClYoEmJTqKSxhuCErXd2kHLS3Hk2zXeYnl7entNeU,10378
|
262
264
|
zrb/task/http_check.py,sha256=Gf5rOB2Se2EdizuN9rp65HpGmfZkGc-clIAlHmPVehs,2565
|
263
|
-
zrb/task/llm_task.py,sha256=
|
265
|
+
zrb/task/llm_task.py,sha256=hU1waf3NFc41YxdIe0k9z1eKrcPDaBUo4imuEq737TU,7782
|
264
266
|
zrb/task/make_task.py,sha256=PD3b_aYazthS8LHeJsLAhwKDEgdurQZpymJDKeN60u0,2265
|
265
267
|
zrb/task/rsync_task.py,sha256=pVVslZ46qgcpU_EKhyTQEQie8kUOMuTsVQdbQG2L-yk,6318
|
266
268
|
zrb/task/scaffolder.py,sha256=rME18w1HJUHXgi9eTYXx_T2G4JdqDYzBoNOkdOOo5-o,6806
|
@@ -291,7 +293,7 @@ zrb/util/file.py,sha256=cBPkIonfcWytoqtG3ScJd6FFK7HVYeCIuLmfAFO1HIQ,791
|
|
291
293
|
zrb/util/git.py,sha256=o_kLF1fySv5uQdLlhY-ztc-z0zLOdcDf0IpuPAl2ciA,4733
|
292
294
|
zrb/util/git_subtree.py,sha256=US8oCHUOKgt14Ie6SaEelwTs__jLGLPsDQZvI-1P4KY,2640
|
293
295
|
zrb/util/group.py,sha256=Bg7HrSycoK110U5s_Tca6-uUQuZ5CMgb8wxZSrvDQ98,2790
|
294
|
-
zrb/util/llm/tool.py,sha256=
|
296
|
+
zrb/util/llm/tool.py,sha256=NkENrUlGxcqqU7jzHAH7DBXNcm_ndEo2dFnJ5nhvWmk,2991
|
295
297
|
zrb/util/load.py,sha256=i8_83ApWJXlZlbFMNfEptrOzfXdvtaIhAErsd6tU9y8,1649
|
296
298
|
zrb/util/run.py,sha256=DGHUP9x1Q8V8UF3FbpmjLGuhVVCCLfjTH2teT8qXlNI,207
|
297
299
|
zrb/util/string/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -301,7 +303,7 @@ zrb/util/string/name.py,sha256=8picJfUBXNpdh64GNaHv3om23QHhUZux7DguFLrXHp8,1163
|
|
301
303
|
zrb/util/todo.py,sha256=1nDdwPc22oFoK_1ZTXyf3638Bg6sqE2yp_U4_-frHoc,16015
|
302
304
|
zrb/xcom/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
303
305
|
zrb/xcom/xcom.py,sha256=o79rxR9wphnShrcIushA0Qt71d_p3ZTxjNf7x9hJB78,1571
|
304
|
-
zrb-1.0.
|
305
|
-
zrb-1.0.
|
306
|
-
zrb-1.0.
|
307
|
-
zrb-1.0.
|
306
|
+
zrb-1.0.0b5.dist-info/METADATA,sha256=BhCdzcbWjo2XbxLbBVo7JtQpGUAnQS1CnKyRgTkN-cM,4228
|
307
|
+
zrb-1.0.0b5.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
308
|
+
zrb-1.0.0b5.dist-info/entry_points.txt,sha256=-Pg3ElWPfnaSM-XvXqCxEAa-wfVI6BEgcs386s8C8v8,46
|
309
|
+
zrb-1.0.0b5.dist-info/RECORD,,
|
File without changes
|
File without changes
|