zrb 1.0.0b4__py3-none-any.whl → 1.0.0b6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -92,8 +92,8 @@ llm_chat: LLMTask = llm_group.add_task(
92
92
  ),
93
93
  BoolInput(
94
94
  "start-new",
95
- description="Start new conversation session",
96
- prompt="Forget everything and start new conversation session",
95
+ description="Start new conversation (LLM will forget everything)",
96
+ prompt="Start new conversation (LLM will forget everything)",
97
97
  default_str="false",
98
98
  allow_positional_parsing=False,
99
99
  ),
@@ -1,7 +1,6 @@
1
- let hasUpdateCurrentPascalInputName = false;
2
- document.getElementById("submit-task-form").addEventListener("change", async function(event) {
1
+ async function updatePreviousSession(event) {
3
2
  const currentInput = event.target;
4
- if (hasUpdateCurrentPascalInputName || currentInput.name === "CURRENT_INPUT_NAME") {
3
+ if (currentInput.name === "CURRENT_INPUT_NAME") {
5
4
  return
6
5
  }
7
6
  const previousSessionInput = submitTaskForm.querySelector('[name="CURRENT_INPUT_NAME"]');
@@ -9,5 +8,14 @@ document.getElementById("submit-task-form").addEventListener("change", async fun
9
8
  const currentSessionName = cfg.SESSION_NAME
10
9
  previousSessionInput.value = currentSessionName;
11
10
  }
12
- hasUpdateCurrentPascalInputName = true;
11
+ }
12
+
13
+ document.getElementById("submit-task-form").querySelectorAll("input[name], textarea[name]").forEach((element) => {
14
+ element.addEventListener("input", updatePreviousSession);
15
+ element.addEventListener("keyup", updatePreviousSession);
13
16
  });
17
+
18
+ document.getElementById("submit-task-form").querySelectorAll("select[name]").forEach((element) => {
19
+ element.addEventListener("change", updatePreviousSession);
20
+ });
21
+
@@ -3,7 +3,6 @@ import json
3
3
  import os
4
4
  import sys
5
5
 
6
- import litellm
7
6
  import ulid
8
7
 
9
8
  from zrb.config import (
@@ -30,7 +29,9 @@ def create_rag_from_directory(
30
29
  async def retrieve(query: str) -> str:
31
30
  from chromadb import PersistentClient
32
31
  from chromadb.config import Settings
32
+ from fastembed import TextEmbedding
33
33
 
34
+ embedding_model = TextEmbedding(model_name=model)
34
35
  client = PersistentClient(
35
36
  path=vector_db_path, settings=Settings(allow_reset=True)
36
37
  )
@@ -75,10 +76,8 @@ def create_rag_from_directory(
75
76
  ),
76
77
  file=sys.stderr,
77
78
  )
78
- response = await litellm.aembedding(
79
- model=model, input=[chunk]
80
- )
81
- vector = response["data"][0]["embedding"]
79
+ embedding_result = list(embedding_model.embed([chunk]))
80
+ vector = embedding_result[0]
82
81
  collection.upsert(
83
82
  ids=[chunk_id],
84
83
  embeddings=[vector],
@@ -102,9 +101,8 @@ def create_rag_from_directory(
102
101
  )
103
102
 
104
103
  print(stylize_faint("Vectorizing query"), file=sys.stderr)
105
- query_response = await litellm.aembedding(model=model, input=[query])
106
- query_vector = query_response["data"][0]["embedding"]
107
-
104
+ embedding_result = list(embedding_model.embed([query]))
105
+ query_vector = embedding_result[0]
108
106
  print(stylize_faint("Searching documents"), file=sys.stderr)
109
107
  results = collection.query(
110
108
  query_embeddings=query_vector,
zrb/config.py CHANGED
@@ -78,22 +78,14 @@ WEB_AUTH_REFRESH_TOKEN_EXPIRE_MINUTES = int(
78
78
  )
79
79
  LLM_MODEL = os.getenv("ZRB_LLM_MODEL", "ollama_chat/llama3.1")
80
80
 
81
- _DEFAULT_PROMPT = """
82
- You are a helpful assistant and you have access to several tools.
83
- Your goal is to provide a final answer by executing a series of planning, actions, reasoning, and evaluations.
84
-
85
- Breakdown user request into several actionable tasks. For example, when user ask about current weather on current location, you should get the current location first.
86
-
87
- DO NOT TRY TO SIMULATE TOOL OUTPUT.
88
-
89
- ERROR HANDLING
90
- 1. If you receive an error, read the error message carefully and identify the specific issue.
91
- 2. Adjust your response accordingly and perform the review process again before resubmitting.
92
-
93
- REMINDER:
94
- - ALWAYS double-check your response format and function arguments before submitting.
95
- - DON'T make up answers.
96
- """.strip()
81
+ _DEFAULT_PROMPT = (
82
+ "You are a helpful AI assistant capable of using various tools to answer user queries. When solving a problem:\n"
83
+ "1. Carefully analyze the user's request and identify what information is needed to provide a complete answer.\n"
84
+ "2. Determine which available tools can help you gather the necessary information.\n"
85
+ "3. Call tools strategically and in a logical sequence to collect required data.\n"
86
+ "4. If a tool provides incomplete information, intelligently decide which additional tool or approach to use.\n"
87
+ "5. Always aim to provide the most accurate and helpful response possible."
88
+ )
97
89
  LLM_SYSTEM_PROMPT = os.getenv("ZRB_LLM_SYSTEM_PROMPT", _DEFAULT_PROMPT)
98
90
  LLM_HISTORY_DIR = os.getenv(
99
91
  "ZRB_LLM_HISTORY_DIR", os.path.expanduser(os.path.join("~", ".zrb-llm-history"))
@@ -103,7 +95,10 @@ LLM_HISTORY_FILE = os.getenv(
103
95
  )
104
96
  LLM_ALLOW_ACCESS_SHELL = to_boolean(os.getenv("ZRB_LLM_ACCESS_FILE", "1"))
105
97
  LLM_ALLOW_ACCESS_INTERNET = to_boolean(os.getenv("ZRB_LLM_ACCESS_INTERNET", "1"))
106
- RAG_EMBEDDING_MODEL = os.getenv("ZRB_RAG_EMBEDDING_MODEL", "ollama/nomic-embed-text")
98
+ # noqa See: https://qdrant.github.io/fastembed/examples/Supported_Models/#supported-text-embedding-models
99
+ RAG_EMBEDDING_MODEL = os.getenv(
100
+ "ZRB_RAG_EMBEDDING_MODEL", "nomic-ai/nomic-embed-text-v1.5-Q"
101
+ )
107
102
  RAG_CHUNK_SIZE = int(os.getenv("ZRB_RAG_CHUNK_SIZE", "1024"))
108
103
  RAG_OVERLAP = int(os.getenv("ZRB_RAG_OVERLAP", "128"))
109
104
  RAG_MAX_RESULT_COUNT = int(os.getenv("ZRB_RAG_MAX_RESULT_COUNT", "5"))
@@ -63,7 +63,7 @@
63
63
  <article>
64
64
  <form id="submit-task-form" onsubmit="submitNewSessionForm(event)">
65
65
  {task_inputs}
66
- <button>🚀 Run</button>
66
+ <button>🚀 Run New Session</button>
67
67
  </form>
68
68
  </article>
69
69
  <article>
@@ -4,18 +4,22 @@ const CURRENT_SESSION = {
4
4
  const logTextarea = document.getElementById("log-textarea");
5
5
  const submitTaskForm = document.getElementById("submit-task-form");
6
6
  let isFinished = false;
7
+ let isInputUpdated = false;
7
8
  let errorCount = 0;
8
9
  while (!isFinished) {
9
10
  try {
10
11
  const data = await this.getCurrentSession();
11
12
  // update inputs
12
- const dataInputs = data.input;
13
- for (const inputName in dataInputs) {
14
- const inputValue = dataInputs[inputName];
15
- const input = submitTaskForm.querySelector(`[name="${inputName}"]`);
16
- if (input) {
17
- input.value = inputValue;
13
+ if (!isInputUpdated) {
14
+ const dataInputs = data.input;
15
+ for (const inputName in dataInputs) {
16
+ const inputValue = dataInputs[inputName];
17
+ const input = submitTaskForm.querySelector(`[name="${inputName}"]`);
18
+ if (input) {
19
+ input.value = inputValue;
20
+ }
18
21
  }
22
+ isInputUpdated = true;
19
23
  }
20
24
  resultLineCount = data.final_result.split("\n").length;
21
25
  resultTextarea.rows = resultLineCount <= 5 ? resultLineCount : 5;
@@ -20,7 +20,7 @@ window.addEventListener("load", async function () {
20
20
 
21
21
 
22
22
  const submitTaskForm = document.getElementById("submit-task-form");
23
- submitTaskForm.addEventListener("change", async function(event) {
23
+ async function handleInputUpdate(event) {
24
24
  const currentInput = event.target;
25
25
  const inputs = Array.from(submitTaskForm.querySelectorAll("input[name], textarea[name], select[name]"));
26
26
  const inputMap = {};
@@ -59,6 +59,7 @@ submitTaskForm.addEventListener("change", async function(event) {
59
59
  if (value === "") {
60
60
  return;
61
61
  }
62
+ console.log(input, data);
62
63
  input.value = value;
63
64
  });
64
65
  } else {
@@ -67,6 +68,14 @@ submitTaskForm.addEventListener("change", async function(event) {
67
68
  } catch (error) {
68
69
  console.error("Error during fetch:", error);
69
70
  }
71
+ }
72
+
73
+ submitTaskForm.querySelectorAll("input[name], textarea[name]").forEach((element) => {
74
+ element.addEventListener("input", handleInputUpdate);
75
+ element.addEventListener("keyup", handleInputUpdate);
76
+ });
77
+ submitTaskForm.querySelectorAll("select[name]").forEach((element) => {
78
+ element.addEventListener("change", handleInputUpdate);
70
79
  });
71
80
 
72
81
 
zrb/task/llm_task.py CHANGED
@@ -3,7 +3,9 @@ import os
3
3
  from collections.abc import Callable
4
4
  from typing import Any
5
5
 
6
- from pydantic import BaseModel
6
+ from pydantic_ai import Agent, Tool
7
+ from pydantic_ai.messages import ModelMessagesTypeAdapter
8
+ from pydantic_ai.settings import ModelSettings
7
9
 
8
10
  from zrb.attr.type import StrAttr
9
11
  from zrb.config import LLM_MODEL, LLM_SYSTEM_PROMPT
@@ -16,25 +18,10 @@ from zrb.task.base_task import BaseTask
16
18
  from zrb.util.attr import get_str_attr
17
19
  from zrb.util.cli.style import stylize_faint
18
20
  from zrb.util.file import read_file, write_file
19
- from zrb.util.llm.tool import callable_to_tool_schema
20
21
  from zrb.util.run import run_async
21
22
 
22
23
  ListOfDict = list[dict[str, Any]]
23
-
24
-
25
- class AdditionalTool(BaseModel):
26
- fn: Callable
27
- name: str | None
28
-
29
-
30
- def scratchpad(thought: str) -> str:
31
- """Write your thought, analysis, reasoning, and evaluation here."""
32
- return thought
33
-
34
-
35
- def end_conversation(final_answer: str) -> str:
36
- """End conversation with a final answer containing all necessary information"""
37
- return final_answer
24
+ ToolOrCallable = Tool | Callable
38
25
 
39
26
 
40
27
  class LLMTask(BaseTask):
@@ -48,11 +35,17 @@ class LLMTask(BaseTask):
48
35
  input: list[AnyInput | None] | AnyInput | None = None,
49
36
  env: list[AnyEnv | None] | AnyEnv | None = None,
50
37
  model: StrAttr | None = LLM_MODEL,
38
+ model_settings: (
39
+ ModelSettings | Callable[[AnySharedContext], ModelSettings] | None
40
+ ) = None,
51
41
  render_model: bool = True,
42
+ agent: Agent | Callable[[AnySharedContext], Agent] | None = None,
52
43
  system_prompt: StrAttr | None = LLM_SYSTEM_PROMPT,
53
44
  render_system_prompt: bool = True,
54
45
  message: StrAttr | None = None,
55
- tools: list[Callable] | Callable[[AnySharedContext], list[Callable]] = [],
46
+ tools: (
47
+ list[ToolOrCallable] | Callable[[AnySharedContext], list[ToolOrCallable]]
48
+ ) = [],
56
49
  conversation_history: (
57
50
  ListOfDict | Callable[[AnySharedContext], ListOfDict]
58
51
  ) = [],
@@ -64,9 +57,6 @@ class LLMTask(BaseTask):
64
57
  ) = None,
65
58
  conversation_history_file: StrAttr | None = None,
66
59
  render_history_file: bool = True,
67
- model_kwargs: (
68
- dict[str, Any] | Callable[[AnySharedContext], dict[str, Any]]
69
- ) = {},
70
60
  execute_condition: bool | str | Callable[[AnySharedContext], bool] = True,
71
61
  retries: int = 2,
72
62
  retry_period: float = 0,
@@ -76,6 +66,7 @@ class LLMTask(BaseTask):
76
66
  readiness_failure_threshold: int = 1,
77
67
  readiness_timeout: int = 60,
78
68
  monitor_readiness: bool = False,
69
+ max_call_iteration: int = 20,
79
70
  upstream: list[AnyTask] | AnyTask | None = None,
80
71
  fallback: list[AnyTask] | AnyTask | None = None,
81
72
  successor: list[AnyTask] | AnyTask | None = None,
@@ -102,97 +93,37 @@ class LLMTask(BaseTask):
102
93
  successor=successor,
103
94
  )
104
95
  self._model = model
96
+ self._model_settings = (model_settings,)
97
+ self._agent = agent
105
98
  self._render_model = render_model
106
- self._model_kwargs = model_kwargs
107
99
  self._system_prompt = system_prompt
108
100
  self._render_system_prompt = render_system_prompt
109
101
  self._message = message
110
102
  self._tools = tools
103
+ self._additional_tools: list[ToolOrCallable] = []
111
104
  self._conversation_history = conversation_history
112
105
  self._conversation_history_reader = conversation_history_reader
113
106
  self._conversation_history_writer = conversation_history_writer
114
107
  self._conversation_history_file = conversation_history_file
115
108
  self._render_history_file = render_history_file
109
+ self._max_call_iteration = max_call_iteration
116
110
 
117
- def add_tool(self, tool: Callable):
118
- self._tools.append(tool)
111
+ def add_tool(self, tool: ToolOrCallable):
112
+ self._additional_tools.append(tool)
119
113
 
120
114
  async def _exec_action(self, ctx: AnyContext) -> Any:
121
- import litellm
122
- from litellm.utils import supports_function_calling
123
-
124
- user_message = {"role": "user", "content": self._get_message(ctx)}
125
- ctx.print(stylize_faint(f"{user_message}"))
126
- model = self._get_model(ctx)
127
- try:
128
- is_function_call_supported = supports_function_calling(model=model)
129
- except Exception:
130
- is_function_call_supported = False
131
- litellm.add_function_to_prompt = True
132
- if not is_function_call_supported:
133
- ctx.log_warning(f"Model {model} doesn't support function call")
134
- available_tools = self._get_available_tools(
135
- ctx, include_end_conversation=not is_function_call_supported
136
- )
137
- model_kwargs = self._get_model_kwargs(ctx, available_tools)
138
- ctx.log_debug("MODEL KWARGS", model_kwargs)
139
- system_prompt = self._get_system_prompt(ctx)
140
- ctx.log_debug("SYSTEM PROMPT", system_prompt)
141
115
  history = await self._read_conversation_history(ctx)
142
- ctx.log_debug("HISTORY PROMPT", history)
143
- conversations = history + [user_message]
144
- while True:
145
- llm_response = await self._get_llm_response(
146
- model, system_prompt, conversations, model_kwargs
147
- )
148
- llm_response_dict = llm_response.to_dict()
149
- ctx.print(stylize_faint(f"{llm_response_dict}"))
150
- conversations.append(llm_response_dict)
151
- ctx.log_debug("RESPONSE MESSAGE", llm_response)
152
- if is_function_call_supported:
153
- if not llm_response.tool_calls:
154
- # No tool call, end conversation
155
- await self._write_conversation_history(ctx, conversations)
156
- return llm_response.content
157
- await self._handle_tool_calls(
158
- ctx, available_tools, conversations, llm_response
159
- )
160
- if not is_function_call_supported:
161
- try:
162
- json_payload = json.loads(llm_response.content)
163
- function_name = _get_fallback_function_name(json_payload)
164
- function_kwargs = _get_fallback_function_kwargs(json_payload)
165
- tool_execution_message = (
166
- await self._create_fallback_tool_exec_message(
167
- available_tools, function_name, function_kwargs
168
- )
169
- )
170
- ctx.print(stylize_faint(f"{tool_execution_message}"))
171
- conversations.append(tool_execution_message)
172
- if function_name == "end_conversation":
173
- await self._write_conversation_history(ctx, conversations)
174
- return function_kwargs.get("final_answer", "")
175
- except Exception as e:
176
- ctx.log_error(e)
177
- tool_execution_message = self._create_exec_scratchpad_message(
178
- f"{e}"
179
- )
180
- conversations.append(tool_execution_message)
181
-
182
- async def _handle_tool_calls(
183
- self,
184
- ctx: AnyContext,
185
- available_tools: dict[str, Callable],
186
- conversations: list[dict[str, Any]],
187
- llm_response: Any,
188
- ):
189
- # noqa Reference: https://docs.litellm.ai/docs/completion/function_call#full-code---parallel-function-calling-with-gpt-35-turbo-1106
190
- for tool_call in llm_response.tool_calls:
191
- tool_execution_message = await self._create_tool_exec_message(
192
- available_tools, tool_call
193
- )
194
- ctx.print(stylize_faint(f"{tool_execution_message}"))
195
- conversations.append(tool_execution_message)
116
+ user_prompt = self._get_message(ctx)
117
+ agent = self._get_agent(ctx)
118
+ result = await agent.run(
119
+ user_prompt=user_prompt,
120
+ message_history=ModelMessagesTypeAdapter.validate_python(history),
121
+ )
122
+ new_history = json.loads(result.all_messages_json())
123
+ for history in new_history:
124
+ ctx.print(stylize_faint(json.dumps(history)))
125
+ await self._write_conversation_history(ctx, new_history)
126
+ return result.data
196
127
 
197
128
  async def _write_conversation_history(
198
129
  self, ctx: AnyContext, conversations: list[Any]
@@ -203,71 +134,32 @@ class LLMTask(BaseTask):
203
134
  if history_file != "":
204
135
  write_file(history_file, json.dumps(conversations, indent=2))
205
136
 
206
- async def _get_llm_response(
207
- self,
208
- model: str,
209
- system_prompt: str,
210
- conversations: list[Any],
211
- model_kwargs: dict[str, Any],
212
- ) -> Any:
213
- from litellm import acompletion
214
-
215
- llm_response = await acompletion(
216
- model=model,
217
- messages=[{"role": "system", "content": system_prompt}] + conversations,
218
- **model_kwargs,
137
+ def _get_model_settings(self, ctx: AnyContext) -> ModelSettings | None:
138
+ if isinstance(self._model_settings, ModelSettings):
139
+ return self._model_settings
140
+ if callable(self._model_settings):
141
+ return self._model_settings(ctx)
142
+ return None
143
+
144
+ def _get_agent(self, ctx: AnyContext) -> Agent:
145
+ if isinstance(self._agent, Agent):
146
+ return self._agent
147
+ if callable(self._agent):
148
+ return self._agent(ctx)
149
+ tools_or_callables = list(
150
+ self._tools(ctx) if callable(self._tools) else self._tools
219
151
  )
220
- return llm_response.choices[0].message
221
-
222
- async def _create_tool_exec_message(
223
- self, available_tools: dict[str, Callable], tool_call: Any
224
- ) -> dict[str, Any]:
225
- function_name = tool_call.function.name
226
- function_kwargs = json.loads(tool_call.function.arguments)
227
- return {
228
- "tool_call_id": tool_call.id,
229
- "role": "tool",
230
- "name": function_name,
231
- "content": await self._get_exec_tool_result(
232
- available_tools, function_name, function_kwargs
233
- ),
234
- }
235
-
236
- async def _create_fallback_tool_exec_message(
237
- self,
238
- available_tools: dict[str, Callable],
239
- function_name: str,
240
- function_kwargs: dict[str, Any],
241
- ) -> dict[str, Any]:
242
- result = await self._get_exec_tool_result(
243
- available_tools, function_name, function_kwargs
244
- )
245
- return self._create_exec_scratchpad_message(
246
- f"Result of {function_name} call: {result}"
152
+ tools_or_callables.extend(self._additional_tools)
153
+ tools = [
154
+ tool if isinstance(tool, Tool) else Tool(tool, takes_ctx=False)
155
+ for tool in tools_or_callables
156
+ ]
157
+ return Agent(
158
+ self._get_model(ctx),
159
+ system_prompt=self._get_system_prompt(ctx),
160
+ tools=tools,
247
161
  )
248
162
 
249
- def _create_exec_scratchpad_message(self, message: str) -> dict[str, Any]:
250
- return {
251
- "role": "assistant",
252
- "content": json.dumps(
253
- {"name": "scratchpad", "arguments": {"thought": message}}
254
- ),
255
- }
256
-
257
- async def _get_exec_tool_result(
258
- self,
259
- available_tools: dict[str, Callable],
260
- function_name: str,
261
- function_kwargs: dict[str, Any],
262
- ) -> str:
263
- if function_name not in available_tools:
264
- return f"[ERROR] Invalid tool: {function_name}"
265
- function_to_call = available_tools[function_name]
266
- try:
267
- return await run_async(function_to_call(**function_kwargs))
268
- except Exception as e:
269
- return f"[ERROR] {e}"
270
-
271
163
  def _get_model(self, ctx: AnyContext) -> str:
272
164
  return get_str_attr(
273
165
  ctx, self._model, "ollama_chat/llama3.1", auto_render=self._render_model
@@ -284,30 +176,6 @@ class LLMTask(BaseTask):
284
176
  def _get_message(self, ctx: AnyContext) -> str:
285
177
  return get_str_attr(ctx, self._message, "How are you?", auto_render=True)
286
178
 
287
- def _get_model_kwargs(
288
- self, ctx: AnyContext, available_tools: dict[str, Callable]
289
- ) -> dict[str, Any]:
290
- model_kwargs = {}
291
- if callable(self._model_kwargs):
292
- model_kwargs = self._model_kwargs(ctx)
293
- else:
294
- model_kwargs = self._model_kwargs
295
- model_kwargs["tools"] = [
296
- callable_to_tool_schema(tool) for tool in available_tools.values()
297
- ]
298
- return model_kwargs
299
-
300
- def _get_available_tools(
301
- self, ctx: AnyContext, include_end_conversation: bool
302
- ) -> dict[str, Callable]:
303
- tools = {"scratchpad": scratchpad}
304
- if include_end_conversation:
305
- tools["end_conversation"] = end_conversation
306
- tool_list = self._tools(ctx) if callable(self._tools) else self._tools
307
- for tool in tool_list:
308
- tools[tool.__name__] = tool
309
- return tools
310
-
311
179
  async def _read_conversation_history(self, ctx: AnyContext) -> ListOfDict:
312
180
  if self._conversation_history_reader is not None:
313
181
  return await run_async(self._conversation_history_reader(ctx))
@@ -329,22 +197,3 @@ class LLMTask(BaseTask):
329
197
  "",
330
198
  auto_render=self._render_history_file,
331
199
  )
332
-
333
-
334
- def _get_fallback_function_name(json_payload: dict[str, Any]) -> str:
335
- for key in ("name",):
336
- if key in json_payload:
337
- return json_payload[key]
338
- raise ValueError("Function name not provided")
339
-
340
-
341
- def _get_fallback_function_kwargs(json_payload: dict[str, Any]) -> str:
342
- for key in (
343
- "arguments",
344
- "args",
345
- "parameters",
346
- "params",
347
- ):
348
- if key in json_payload:
349
- return json_payload[key]
350
- raise ValueError("Function arguments not provided")
zrb/util/llm/tool.py CHANGED
@@ -1,6 +1,6 @@
1
1
  import inspect
2
2
  from collections.abc import Callable
3
- from typing import Any, get_type_hints
3
+ from typing import Annotated, Any, Literal, get_type_hints
4
4
 
5
5
 
6
6
  def callable_to_tool_schema(callable_obj: Callable) -> dict[str, Any]:
@@ -21,10 +21,14 @@ def callable_to_tool_schema(callable_obj: Callable) -> dict[str, Any]:
21
21
  # Build parameter schema
22
22
  param_schema = {"type": "object", "properties": {}, "required": []}
23
23
  for param_name, param in sig.parameters.items():
24
- param_type = hints.get(param_name, str) # Default type is string
25
- param_schema["properties"][param_name] = {
26
- "type": _python_type_to_json_type(param_type)
27
- }
24
+ # Get the type hint or default to str
25
+ param_type = hints.get(param_name, str)
26
+
27
+ # Handle annotated types (e.g., Annotated[str, "description"])
28
+ json_type, param_metadata = _process_type_annotation(param_type)
29
+ param_schema["properties"][param_name] = param_metadata
30
+
31
+ # Mark required parameters
28
32
  if param.default is inspect.Parameter.empty:
29
33
  param_schema["required"].append(param_name)
30
34
  return {
@@ -37,6 +41,30 @@ def callable_to_tool_schema(callable_obj: Callable) -> dict[str, Any]:
37
41
  }
38
42
 
39
43
 
44
+ def _process_type_annotation(py_type: Any) -> tuple[str, dict]:
45
+ """
46
+ Process type annotations and return the JSON Schema type and metadata.
47
+
48
+ :param py_type: The type annotation.
49
+ :return: A tuple of (JSON type, parameter metadata).
50
+ """
51
+ if hasattr(py_type, "__origin__") and py_type.__origin__ is Literal:
52
+ # Handle Literal (enum)
53
+ enum_values = list(py_type.__args__)
54
+ return "string", {"type": "string", "enum": enum_values}
55
+
56
+ if hasattr(py_type, "__origin__") and py_type.__origin__ is Annotated:
57
+ # Handle Annotated types
58
+ base_type = py_type.__args__[0]
59
+ description = py_type.__args__[1]
60
+ json_type = _python_type_to_json_type(base_type)
61
+ return json_type, {"type": json_type, "description": description}
62
+
63
+ # Fallback to basic type conversion
64
+ json_type = _python_type_to_json_type(py_type)
65
+ return json_type, {"type": json_type}
66
+
67
+
40
68
  def _python_type_to_json_type(py_type):
41
69
  """
42
70
  Map Python types to JSON Schema types.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: zrb
3
- Version: 1.0.0b4
3
+ Version: 1.0.0b6
4
4
  Summary: Your Automation Powerhouse
5
5
  Home-page: https://github.com/state-alchemists/zrb
6
6
  License: AGPL-3.0-or-later
@@ -19,10 +19,11 @@ Requires-Dist: beautifulsoup4 (>=4.12.3,<5.0.0)
19
19
  Requires-Dist: black (>=24.10.0,<24.11.0)
20
20
  Requires-Dist: chromadb (>=0.5.20,<0.6.0) ; extra == "rag"
21
21
  Requires-Dist: fastapi[standard] (>=0.115.6,<0.116.0)
22
+ Requires-Dist: fastembed (>=0.5.1,<0.6.0)
22
23
  Requires-Dist: isort (>=5.13.2,<5.14.0)
23
24
  Requires-Dist: libcst (>=1.5.0,<2.0.0)
24
- Requires-Dist: litellm (>=1.52.12,<2.0.0)
25
25
  Requires-Dist: pdfplumber (>=0.11.4,<0.12.0) ; extra == "rag"
26
+ Requires-Dist: pydantic-ai (>=0.0.19,<0.0.20)
26
27
  Requires-Dist: python-dotenv (>=1.0.1,<2.0.0)
27
28
  Requires-Dist: python-jose[cryptography] (>=3.3.0,<4.0.0)
28
29
  Requires-Dist: requests (>=2.32.3,<3.0.0)
@@ -7,11 +7,11 @@ zrb/builtin/base64.py,sha256=1YnSwASp7OEAvQcsnHZGpJEvYoI1Z2zTIJ1bCDHfcPQ,921
7
7
  zrb/builtin/git.py,sha256=xHzg0srhp1uOSXWvwA--Fo8idkt0G9010iJ8uIndzg4,5463
8
8
  zrb/builtin/git_subtree.py,sha256=GwI8befmvXEoX1xyZ4jkeG8nsyCkuRG1lzPiGss3yqw,3493
9
9
  zrb/builtin/group.py,sha256=-phJfVpTX3_gUwS1u8-RbZUHe-X41kxDBSmrVh4rq8E,1682
10
- zrb/builtin/llm/llm_chat.py,sha256=zrUzVjHBTOJS81--npePIJQo7zzmeERUVRMon91yI3c,4488
11
- zrb/builtin/llm/previous-session.js,sha256=lZcQIdxr_5Qygm9U2tkMlOg_-gz7G-r6YEdQqz0_RnE,578
10
+ zrb/builtin/llm/llm_chat.py,sha256=vUUchYKJuHr-N_HarpKRFsV0EdQDAFZzAfbK4C0vx88,4508
11
+ zrb/builtin/llm/previous-session.js,sha256=xMKZvJoAbrwiyHS0OoPrWuaKxWYLoyR5sguePIoCjTY,816
12
12
  zrb/builtin/llm/tool/api.py,sha256=yQ3XV8O7Fx7hHssLSOcmiHDnevPhz9ktWi44HK7zTls,801
13
13
  zrb/builtin/llm/tool/cli.py,sha256=to_IjkfrMGs6eLfG0cpVN9oyADWYsJQCtyluUhUdBww,253
14
- zrb/builtin/llm/tool/rag.py,sha256=jJRLERW6824JeEzEQ_OqLMaaa3mjuNqsRcRWoL1wVx0,5192
14
+ zrb/builtin/llm/tool/rag.py,sha256=PawaLZL-ThctxtBtsQuP3XsgTxQKyCGFqrudCANPJKk,5162
15
15
  zrb/builtin/llm/tool/web.py,sha256=N2HYuXbKPUpjVAq_UnQMbUrTIE8u0Ut3TeQadZ7_NJc,2217
16
16
  zrb/builtin/md5.py,sha256=0pNlrfZA0wlZlHvFHLgyqN0JZJWGKQIF5oXxO44_OJk,949
17
17
  zrb/builtin/project/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -171,7 +171,7 @@ zrb/callback/callback.py,sha256=hKefB_Jd1XGjPSLQdMKDsGLHPzEGO2dqrIArLl_EmD0,848
171
171
  zrb/cmd/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
172
172
  zrb/cmd/cmd_result.py,sha256=L8bQJzWCpcYexIxHBNsXj2pT3BtLmWex0iJSMkvimOA,597
173
173
  zrb/cmd/cmd_val.py,sha256=7Doowyg6BK3ISSGBLt-PmlhzaEkBjWWm51cED6fAUOQ,1014
174
- zrb/config.py,sha256=tFPjLqgcb6cH33trooDgJRsoJBSneB03ccnzx90iH9M,4664
174
+ zrb/config.py,sha256=RB0ikM82RnCDZaarJOEBfD7GeYPfcq6jLb6F359il1s,4677
175
175
  zrb/content_transformer/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
176
176
  zrb/content_transformer/any_content_transformer.py,sha256=v8ZUbcix1GGeDQwB6OKX_1TjpY__ksxWVeqibwa_iZA,850
177
177
  zrb/content_transformer/content_transformer.py,sha256=YU6Xr3G_IaCWKQGsf9z9YlCclbiwcJ7ytQv3wKpPIiI,2125
@@ -225,7 +225,7 @@ zrb/runner/web_route/node_page/group/view.html,sha256=wISun627ciFZcvGpxANG0pr1zg
225
225
  zrb/runner/web_route/node_page/node_page_route.py,sha256=LYi60eZ5ZGgykTIeSQk5Hn9OYjh3ocYgBIAue7Bznvw,2268
226
226
  zrb/runner/web_route/node_page/task/partial/input.html,sha256=X2jy0q7TLQGP853exZMed0lqPezL3gzn6mnhB5QKfkc,178
227
227
  zrb/runner/web_route/node_page/task/show_task_page.py,sha256=0HIFEuy5DLOKqff4Wib5WaMe5Om0B4C7BH63pPIA-OU,2639
228
- zrb/runner/web_route/node_page/task/view.html,sha256=CwrIiPJAwHEEIVNp_wkTzW7kQJjTh41FSDamCswI3S8,3593
228
+ zrb/runner/web_route/node_page/task/view.html,sha256=T6kXNYKGhHsY5A74vQt_tj68WrULmPRaSuMqI1ygD1o,3605
229
229
  zrb/runner/web_route/refresh_token_api_route.py,sha256=JOuzhQUtRA62w3l27mq-jXgpaV7Rbj20jzxpQacssio,1478
230
230
  zrb/runner/web_route/static/refresh-token.template.js,sha256=v_nF7nU1AXp-KtsHNNzamhciEi7NCSTPEDT5hCxn29g,735
231
231
  zrb/runner/web_route/static/resources/common.css,sha256=u5rGLsPx2943z324iQ2X81krM3z-kc-8e1SkBdYAvKU,157
@@ -234,8 +234,8 @@ zrb/runner/web_route/static/resources/login/event.js,sha256=1-NxaUwU-X7Tu2RAwVkz
234
234
  zrb/runner/web_route/static/resources/logout/event.js,sha256=MfZxrTa2yL49Lbh7cCZDdqsIcf9e1q3W8-WjmZXV5pA,692
235
235
  zrb/runner/web_route/static/resources/pico.min.css,sha256=_Esfkjs_U_igYn-tXBUaK3AEKb7d4l9DlmaOiw9bXfI,82214
236
236
  zrb/runner/web_route/static/resources/session/common-util.js,sha256=t7_s5DXgMyZlT8L8LYZTkzOT6vWVeZvmCKjt-bflQY0,2117
237
- zrb/runner/web_route/static/resources/session/current-session.js,sha256=Stewc8SOpv14ARJMlWJW4hegGMFCKwcnnNQZ2wKKW6I,6571
238
- zrb/runner/web_route/static/resources/session/event.js,sha256=1WujNriQupSX-zE-uqXlrzgZn6jchDKzWTATvDuZjhA,4359
237
+ zrb/runner/web_route/static/resources/session/current-session.js,sha256=GlRBLwItCwITqVR_hUQFr6W1myD9WRl8R_TTbrzCovw,6739
238
+ zrb/runner/web_route/static/resources/session/event.js,sha256=X5OlSHefK0SDB9VkFCRyBKE_Pb7mqM319mW9jRGoDOk,4716
239
239
  zrb/runner/web_route/static/resources/session/past-session.js,sha256=RwGJYKSp75K8NZ-iZP58XppWgdzkiKFaiC5wgcMLxDo,5470
240
240
  zrb/runner/web_route/static/static_route.py,sha256=7x069VfACZLkLykY0vLL5t13jIQPgkeEJtkpbfNQfLg,1540
241
241
  zrb/runner/web_route/task_input_api_route.py,sha256=xkZ36vmHXMPj0ekp6ocUysO0QUgl1PLaSHt3YL_OfK8,1749
@@ -262,7 +262,7 @@ zrb/task/base_task.py,sha256=ImA0ReyB6neVUfY4nKLnL0h2EMGIJ9wvvNvIAN92-RE,21194
262
262
  zrb/task/base_trigger.py,sha256=jC722rDvodaBLeNaFghkTyv1u0QXrK6BLZUUqcmBJ7Q,4581
263
263
  zrb/task/cmd_task.py,sha256=JpClYoEmJTqKSxhuCErXd2kHLS3Hk2zXeYnl7entNeU,10378
264
264
  zrb/task/http_check.py,sha256=Gf5rOB2Se2EdizuN9rp65HpGmfZkGc-clIAlHmPVehs,2565
265
- zrb/task/llm_task.py,sha256=Hxu2kaAMHPquCE96bBt5MFh9A2EF_-bYeAMf0fOqSz8,13466
265
+ zrb/task/llm_task.py,sha256=ptXC3x9Dwn7-4JrGQyEtzOXZ4dNQATDgCeowkvwAu9U,7723
266
266
  zrb/task/make_task.py,sha256=PD3b_aYazthS8LHeJsLAhwKDEgdurQZpymJDKeN60u0,2265
267
267
  zrb/task/rsync_task.py,sha256=pVVslZ46qgcpU_EKhyTQEQie8kUOMuTsVQdbQG2L-yk,6318
268
268
  zrb/task/scaffolder.py,sha256=rME18w1HJUHXgi9eTYXx_T2G4JdqDYzBoNOkdOOo5-o,6806
@@ -293,7 +293,7 @@ zrb/util/file.py,sha256=cBPkIonfcWytoqtG3ScJd6FFK7HVYeCIuLmfAFO1HIQ,791
293
293
  zrb/util/git.py,sha256=o_kLF1fySv5uQdLlhY-ztc-z0zLOdcDf0IpuPAl2ciA,4733
294
294
  zrb/util/git_subtree.py,sha256=US8oCHUOKgt14Ie6SaEelwTs__jLGLPsDQZvI-1P4KY,2640
295
295
  zrb/util/group.py,sha256=Bg7HrSycoK110U5s_Tca6-uUQuZ5CMgb8wxZSrvDQ98,2790
296
- zrb/util/llm/tool.py,sha256=Ux58wYSymVCXxgIWDUObsluZzYzmfyB0uwgKagL0bVg,1898
296
+ zrb/util/llm/tool.py,sha256=NkENrUlGxcqqU7jzHAH7DBXNcm_ndEo2dFnJ5nhvWmk,2991
297
297
  zrb/util/load.py,sha256=i8_83ApWJXlZlbFMNfEptrOzfXdvtaIhAErsd6tU9y8,1649
298
298
  zrb/util/run.py,sha256=DGHUP9x1Q8V8UF3FbpmjLGuhVVCCLfjTH2teT8qXlNI,207
299
299
  zrb/util/string/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -303,7 +303,7 @@ zrb/util/string/name.py,sha256=8picJfUBXNpdh64GNaHv3om23QHhUZux7DguFLrXHp8,1163
303
303
  zrb/util/todo.py,sha256=1nDdwPc22oFoK_1ZTXyf3638Bg6sqE2yp_U4_-frHoc,16015
304
304
  zrb/xcom/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
305
305
  zrb/xcom/xcom.py,sha256=o79rxR9wphnShrcIushA0Qt71d_p3ZTxjNf7x9hJB78,1571
306
- zrb-1.0.0b4.dist-info/METADATA,sha256=G-QIlz6Gls_B72tCDApTqI-7B0minjgSYXZJDBAqp8Y,4224
307
- zrb-1.0.0b4.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
308
- zrb-1.0.0b4.dist-info/entry_points.txt,sha256=-Pg3ElWPfnaSM-XvXqCxEAa-wfVI6BEgcs386s8C8v8,46
309
- zrb-1.0.0b4.dist-info/RECORD,,
306
+ zrb-1.0.0b6.dist-info/METADATA,sha256=ZGOBMXjA82fN6lsJ3yNmJTGqwoZWJcNEKCxXtTouk68,4270
307
+ zrb-1.0.0b6.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
308
+ zrb-1.0.0b6.dist-info/entry_points.txt,sha256=-Pg3ElWPfnaSM-XvXqCxEAa-wfVI6BEgcs386s8C8v8,46
309
+ zrb-1.0.0b6.dist-info/RECORD,,
File without changes