versionhq 1.1.9.0__py3-none-any.whl → 1.1.9.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
versionhq/__init__.py CHANGED
@@ -18,7 +18,7 @@ from versionhq.tool.model import Tool
18
18
  from versionhq.tool.composio import Composio
19
19
 
20
20
 
21
- __version__ = "1.1.9.0"
21
+ __version__ = "1.1.9.1"
22
22
  __all__ = [
23
23
  "Agent",
24
24
  "Customer",
@@ -6,8 +6,8 @@ from pydantic import BaseModel, PrivateAttr
6
6
  class CacheHandler(BaseModel):
7
7
  _cache: Dict[str, Any] = PrivateAttr(default_factory=dict)
8
8
 
9
- def add(self, tool, input, output):
9
+ def add(self, tool: str, input: str, output: Any) -> None:
10
10
  self._cache[f"{tool}-{input}"] = output
11
11
 
12
- def read(self, tool, input) -> Optional[str]:
12
+ def read(self, tool: str, input: str) -> Optional[str]:
13
13
  return self._cache.get(f"{tool}-{input}")
@@ -1,6 +1,7 @@
1
1
  import threading
2
2
  import time
3
3
  from typing import Optional
4
+ from typing_extensions import Self
4
5
 
5
6
  from pydantic import BaseModel, Field, PrivateAttr, model_validator
6
7
 
@@ -16,14 +17,14 @@ class RPMController(BaseModel):
16
17
  _shutdown_flag: bool = PrivateAttr(default=False)
17
18
 
18
19
  @model_validator(mode="after")
19
- def reset_counter(self):
20
+ def reset_counter(self) -> Self:
20
21
  if self.max_rpm is not None:
21
22
  if not self._shutdown_flag:
22
23
  self._lock = threading.Lock()
23
24
  self._reset_request_count()
24
25
  return self
25
26
 
26
- def check_or_wait(self):
27
+ def check_or_wait(self) -> bool:
27
28
  if self.max_rpm is None:
28
29
  return True
29
30
 
@@ -46,16 +47,16 @@ class RPMController(BaseModel):
46
47
  else:
47
48
  return _check_and_increment()
48
49
 
49
- def stop_rpm_counter(self):
50
+ def stop_rpm_counter(self) -> None:
50
51
  if self._timer:
51
52
  self._timer.cancel()
52
53
  self._timer = None
53
54
 
54
- def _wait_for_next_minute(self):
55
+ def _wait_for_next_minute(self) -> None:
55
56
  time.sleep(60)
56
57
  self._current_rpm = 0
57
58
 
58
- def _reset_request_count(self):
59
+ def _reset_request_count(self) -> None:
59
60
  def _reset():
60
61
  self._current_rpm = 0
61
62
  if not self._shutdown_flag:
@@ -12,7 +12,7 @@ class UsageMetrics(BaseModel):
12
12
  completion_tokens: int = Field(default=0, description="Number of tokens used in completions")
13
13
  successful_requests: int = Field(default=0, description="Number of successful requests made")
14
14
 
15
- def add_usage_metrics(self, usage_metrics: "UsageMetrics"):
15
+ def add_usage_metrics(self, usage_metrics: "UsageMetrics") -> None:
16
16
  """
17
17
  Add the usage metrics from another UsageMetrics object.
18
18
  """
versionhq/agent/model.py CHANGED
@@ -1,7 +1,7 @@
1
1
  import os
2
2
  import uuid
3
- from abc import ABC
4
3
  from typing import Any, Dict, List, Optional, TypeVar
4
+ from typing_extensions import Self
5
5
  from dotenv import load_dotenv
6
6
  from pydantic import UUID4, BaseModel, Field, InstanceOf, PrivateAttr, model_validator, field_validator
7
7
  from pydantic_core import PydanticCustomError
@@ -16,7 +16,6 @@ from versionhq.llm.model import LLM, DEFAULT_CONTEXT_WINDOW
16
16
  from versionhq.task import TaskOutputFormat
17
17
  from versionhq.task.model import ResponseField
18
18
  from versionhq.tool.model import Tool, ToolSet
19
- from versionhq.tool.tool_handler import ToolHandler
20
19
 
21
20
  load_dotenv(override=True)
22
21
  T = TypeVar("T", bound="Agent")
@@ -51,18 +50,18 @@ class TokenProcess:
51
50
  completion_tokens: int = 0
52
51
  successful_requests: int = 0
53
52
 
54
- def sum_prompt_tokens(self, tokens: int):
53
+ def sum_prompt_tokens(self, tokens: int) -> None:
55
54
  self.prompt_tokens = self.prompt_tokens + tokens
56
55
  self.total_tokens = self.total_tokens + tokens
57
56
 
58
- def sum_completion_tokens(self, tokens: int):
57
+ def sum_completion_tokens(self, tokens: int) -> None:
59
58
  self.completion_tokens = self.completion_tokens + tokens
60
59
  self.total_tokens = self.total_tokens + tokens
61
60
 
62
- def sum_cached_prompt_tokens(self, tokens: int):
61
+ def sum_cached_prompt_tokens(self, tokens: int) -> None:
63
62
  self.cached_prompt_tokens = self.cached_prompt_tokens + tokens
64
63
 
65
- def sum_successful_requests(self, requests: int):
64
+ def sum_successful_requests(self, requests: int) -> None:
66
65
  self.successful_requests = self.successful_requests + requests
67
66
 
68
67
  def get_summary(self) -> UsageMetrics:
@@ -76,7 +75,7 @@ class TokenProcess:
76
75
 
77
76
 
78
77
  # @track_agent()
79
- class Agent(ABC, BaseModel):
78
+ class Agent(BaseModel):
80
79
  """
81
80
  Agent class that run on LLM.
82
81
  Agents execute tasks alone or in the team, using RAG tools and knowledge base if any.
@@ -97,10 +96,7 @@ class Agent(ABC, BaseModel):
97
96
  backstory: Optional[str] = Field(default=None, description="system context passed to the LLM")
98
97
  knowledge: Optional[str] = Field(default=None, description="external knowledge fed to the agent")
99
98
  skillsets: Optional[List[str]] = Field(default_factory=list)
100
-
101
- # tools
102
- tools: Optional[List[Any]] = Field(default_factory=list)
103
- tool_handler: InstanceOf[ToolHandler] = Field(default=None, description="handle tool cache and last used tool")
99
+ tools: Optional[List[Tool | Any]] = Field(default_factory=list)
104
100
 
105
101
  # team, task execution rules
106
102
  team: Optional[List[Any]] = Field(default=None, description="Team to which the agent belongs")
@@ -146,7 +142,7 @@ class Agent(ABC, BaseModel):
146
142
 
147
143
 
148
144
  @model_validator(mode="after")
149
- def validate_required_fields(self):
145
+ def validate_required_fields(self) -> Self:
150
146
  required_fields = ["role", "goal"]
151
147
  for field in required_fields:
152
148
  if getattr(self, field) is None:
@@ -155,7 +151,7 @@ class Agent(ABC, BaseModel):
155
151
 
156
152
 
157
153
  @model_validator(mode="after")
158
- def set_up_llm(self):
154
+ def set_up_llm(self) -> Self:
159
155
  """
160
156
  Set up the base model and function calling model (if any) using the LLM class.
161
157
  Pass the model config params: `llm`, `max_tokens`, `max_execution_time`, `step_callback`,`respect_context_window` to the LLM class.
@@ -270,31 +266,42 @@ class Agent(ABC, BaseModel):
270
266
 
271
267
 
272
268
  @model_validator(mode="after")
273
- def set_up_tools(self):
269
+ def set_up_tools(self) -> Self:
274
270
  """
275
271
  Similar to the LLM set up, when the agent has tools, we will declare them using the Tool class.
276
272
  """
277
-
278
273
  if not self.tools:
279
274
  pass
280
275
 
281
276
  else:
282
- tools_in_class_format = []
283
- for tool in self.tools:
284
- if isinstance(tool, Tool):
285
- tools_in_class_format.append(tool)
286
- elif isinstance(tool, str):
287
- tool_to_add = Tool(name=tool)
288
- tools_in_class_format.append(tool_to_add)
277
+ tool_list = []
278
+ def empty_func():
279
+ return "empty function"
280
+
281
+ for item in self.tools:
282
+ if isinstance(item, Tool):
283
+ tool_list.append(item)
284
+
285
+ elif isinstance(item, dict):
286
+ if "function" not in item:
287
+ setattr(item, "function", empty_func)
288
+ tool = Tool(**item)
289
+ tool_list.append(tool)
290
+
291
+ elif isinstance(item, str):
292
+ tool = Tool(name=item, function=empty_func)
293
+ tool_list.append(tool)
294
+
289
295
  else:
290
- pass
291
- self.tools = tools_in_class_format
296
+ tool_list.append(item) # address custom tool
297
+
298
+ self.tools = tool_list
292
299
 
293
300
  return self
294
301
 
295
302
 
296
303
  @model_validator(mode="after")
297
- def set_up_backstory(self):
304
+ def set_up_backstory(self) -> Self:
298
305
  """
299
306
  Set up the backstory using a templated BACKSTORY when the backstory is None
300
307
  """
@@ -305,7 +312,7 @@ class Agent(ABC, BaseModel):
305
312
  role=self.role,
306
313
  knowledge=self.knowledge if isinstance(self.knowledge, str) else None,
307
314
  skillsets=", ".join([item for item in self.skillsets]),
308
- rag_tool_overview=", ".join([item.name for item in self.tools]),
315
+ rag_tool_overview=", ".join([item.name for item in self.tools if hasattr(item, "name")]) if self.tools else "",
309
316
  goal=self.goal,
310
317
  )
311
318
  self.backstory = backstory
@@ -313,9 +320,7 @@ class Agent(ABC, BaseModel):
313
320
  return self
314
321
 
315
322
 
316
- def invoke(
317
- self, prompts: str, output_formats: List[TaskOutputFormat], response_fields: List[ResponseField], **kwargs
318
- ) -> Dict[str, Any]:
323
+ def invoke(self, prompts: str, output_formats: List[TaskOutputFormat], response_fields: List[ResponseField], **kwargs) -> Dict[str, Any]:
319
324
  """
320
325
  Receive the system prompt in string and create formatted prompts using the system prompt and the agent's backstory.
321
326
  Then call the base model.
@@ -358,11 +363,10 @@ class Agent(ABC, BaseModel):
358
363
  return {"output": response.output if hasattr(response, "output") else response}
359
364
 
360
365
 
361
- def execute_task(self, task, context: Optional[str] = None, tools: Optional[str] = None) -> str:
366
+ def execute_task(self, task, context: Optional[str] = None) -> str:
362
367
  """
363
- Execute the task and return the output in string.
364
- To simplify, the tools are cascaded from the `tools_called` under the `task` Task instance if any.
365
- When the tools are given, the agent must use them.
368
+ Execute the task and return the response in string.
369
+ The agent utilizes the tools in task or their own tools if the task.can_use_agent_tools is True.
366
370
  The agent must consider the context to excute the task as well when it is given.
367
371
  """
368
372
 
@@ -371,13 +375,29 @@ class Agent(ABC, BaseModel):
371
375
  task_prompt += context
372
376
 
373
377
  tool_results = []
374
- if task.tools_called:
375
- for tool_called in task.tools_called:
376
- tool_result = tool_called.tool.run()
378
+ if task.tools:
379
+ for item in task.tools:
380
+ if isinstance(item, ToolSet):
381
+ tool_result = item.tool.run(**item.kwargs)
382
+ tool_results.append(tool_result)
383
+ elif isinstance(item, Tool):
384
+ tool_result = item.run()
385
+ tool_results.append(tool_result)
386
+ else:
387
+ try:
388
+ item.run()
389
+ except:
390
+ pass
391
+
392
+ if task.can_use_agent_tools is True and self.tools:
393
+ for tool in self.tools:
394
+ tool_result = tool.run()
377
395
  tool_results.append(tool_result)
378
396
 
379
- if task.take_tool_res_as_final:
380
- return tool_results
397
+ if task.take_tool_res_as_final:
398
+ return tool_results
399
+
400
+
381
401
 
382
402
  # if self.team and self.team._train:
383
403
  # task_prompt = self._training_handler(task_prompt=task_prompt)
@@ -395,9 +415,7 @@ class Agent(ABC, BaseModel):
395
415
  self._times_executed += 1
396
416
  if self._times_executed > self.max_retry_limit:
397
417
  raise e
398
- result = self.execute_task(
399
- task, context, [tool_called.tool for tool_called in task.tools_called]
400
- )
418
+ result = self.execute_task(task, context)
401
419
 
402
420
  if self.max_rpm and self._rpm_controller:
403
421
  self._rpm_controller.stop_rpm_counter()
versionhq/agent/parser.py CHANGED
@@ -73,9 +73,7 @@ class AgentParser:
73
73
  def parse(self, text: str) -> AgentAction | AgentFinish:
74
74
  thought = self._extract_thought(text)
75
75
  includes_answer = FINAL_ANSWER_ACTION in text
76
- regex = (
77
- r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
78
- )
76
+ regex = r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
79
77
  action_match = re.search(regex, text, re.DOTALL)
80
78
  if action_match:
81
79
  if includes_answer:
@@ -127,18 +125,9 @@ class AgentParser:
127
125
  def _safe_repair_json(self, tool_input: str) -> str:
128
126
  UNABLE_TO_REPAIR_JSON_RESULTS = ['""', "{}"]
129
127
 
130
- # Skip repair if the input starts and ends with square brackets
131
- # Explanation: The JSON parser has issues handling inputs that are enclosed in square brackets ('[]').
132
- # These are typically valid JSON arrays or strings that do not require repair. Attempting to repair such inputs
133
- # might lead to unintended alterations, such as wrapping the entire input in additional layers or modifying
134
- # the structure in a way that changes its meaning. By skipping the repair for inputs that start and end with
135
- # square brackets, we preserve the integrity of these valid JSON structures and avoid unnecessary modifications.
136
128
  if tool_input.startswith("[") and tool_input.endswith("]"):
137
129
  return tool_input
138
130
 
139
- # Before repair, handle common LLM issues:
140
- # 1. Replace """ with " to avoid JSON parser errors
141
-
142
131
  tool_input = tool_input.replace('"""', '"')
143
132
 
144
133
  result = repair_json(tool_input)
@@ -1,16 +1,7 @@
1
1
  import uuid
2
2
  from abc import ABC
3
3
  from typing import Any, Dict, List, Callable, Type, Optional, get_args, get_origin
4
- from pydantic import (
5
- UUID4,
6
- InstanceOf,
7
- BaseModel,
8
- ConfigDict,
9
- Field,
10
- create_model,
11
- field_validator,
12
- model_validator,
13
- )
4
+ from pydantic import UUID4, InstanceOf, BaseModel, ConfigDict, Field, create_model, field_validator, model_validator
14
5
  from pydantic_core import PydanticCustomError
15
6
 
16
7
  from versionhq.clients.product.model import Product, ProductProvider
@@ -23,18 +14,11 @@ class Customer(ABC, BaseModel):
23
14
 
24
15
  id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
25
16
  name: Optional[str] = Field(default=None, description="customer's name if any")
26
- product_list: Optional[List[Product]] = Field(
27
- default=list, description="store products that the customer is associated with"
28
- )
29
- analysis: str = Field(
30
- default=None, description="store the latest analysis results on the customer"
31
- )
32
- on_workflow: bool = Field(
33
- default=False, description="`True` if they are on some messaging workflows"
34
- )
35
- on: Optional[str] = Field(
36
- default=None, description="destination service for this customer if any"
37
- )
17
+ product_list: Optional[List[Product]] = Field(default=list, description="store products that the customer is associated with")
18
+ analysis: str = Field(default=None, description="store the latest analysis results on the customer")
19
+ on_workflow: bool = Field(default=False, description="`True` if they are on some messaging workflows")
20
+ on: Optional[str] = Field(default=None, description="destination service for this customer if any")
21
+
38
22
 
39
23
  @field_validator("id", mode="before")
40
24
  @classmethod
@@ -2,6 +2,7 @@ import uuid
2
2
  from abc import ABC
3
3
  from datetime import date, datetime, time, timedelta
4
4
  from typing import Any, Dict, List, Callable, Type, Optional, get_args, get_origin
5
+ from typing_extensions import Self
5
6
  from pydantic import UUID4, InstanceOf, BaseModel, ConfigDict, Field, field_validator, model_validator
6
7
  from pydantic_core import PydanticCustomError
7
8
 
@@ -63,7 +64,7 @@ class MessagingComponent(ABC, BaseModel):
63
64
  condition: str = Field(default=None, max_length=128, description="condition to execute the next messaging component")
64
65
 
65
66
 
66
- def store_scoring_result(self, scoring_subject: str, score_raw: int | Score | ScoreFormat = None):
67
+ def store_scoring_result(self, scoring_subject: str, score_raw: int | Score | ScoreFormat = None) -> Self:
67
68
  """
68
69
  Set up the `score` field
69
70
  """
@@ -154,7 +155,7 @@ class MessagingWorkflow(ABC, BaseModel):
154
155
 
155
156
 
156
157
  @property
157
- def name(self):
158
+ def name(self) -> str:
158
159
  if self.customer.id:
159
160
  return f"Workflow ID: {self.id} - on {self.product.id} for {self.customer.id}"
160
161
  else:
versionhq/task/model.py CHANGED
@@ -4,7 +4,7 @@ import uuid
4
4
  from concurrent.futures import Future
5
5
  from hashlib import md5
6
6
  from typing import Any, Dict, List, Set, Optional, Tuple, Callable, Type
7
- from typing_extensions import Annotated
7
+ from typing_extensions import Annotated, Self
8
8
 
9
9
  from pydantic import UUID4, BaseModel, Field, PrivateAttr, field_validator, model_validator, create_model, InstanceOf
10
10
  from pydantic_core import PydanticCustomError
@@ -72,6 +72,7 @@ class TaskOutput(BaseModel):
72
72
  raw: str = Field(default="", description="Raw output of the task")
73
73
  json_dict: Dict[str, Any] = Field(default=None, description="`raw` converted to dictionary")
74
74
  pydantic: Optional[Any] = Field(default=None, description="`raw` converted to the abs. pydantic model")
75
+ tool_output: Optional[Any] = Field(default=None, description="store tool result when the task takes tool output as its final output")
75
76
 
76
77
  def __str__(self) -> str:
77
78
  return str(self.pydantic) if self.pydantic else str(self.json_dict) if self.json_dict else self.raw
@@ -125,6 +126,7 @@ class Task(BaseModel):
125
126
  _original_description: str = PrivateAttr(default=None)
126
127
  _logger: Logger = PrivateAttr()
127
128
  _task_output_handler = TaskOutputStorageHandler()
129
+ config: Optional[Dict[str, Any]] = Field(default=None, description="configuration for the agent")
128
130
 
129
131
  id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True, description="unique identifier for the object, not set by user")
130
132
  name: Optional[str] = Field(default=None)
@@ -134,33 +136,35 @@ class Task(BaseModel):
134
136
  expected_output_json: bool = Field(default=True)
135
137
  expected_output_pydantic: bool = Field(default=False)
136
138
  output_field_list: List[ResponseField] = Field(
137
- default=[ResponseField(title="output", type=str, required=False)],
139
+ default_factory=list,
138
140
  description="provide output key and data type. this will be cascaded to the agent via task.prompt()"
139
141
  )
140
142
  output: Optional[TaskOutput] = Field(default=None, description="store the final task output in TaskOutput class")
141
143
 
142
144
  # task setup
143
145
  context: Optional[List["Task"]] = Field(default=None, description="other tasks whose outputs should be used as context")
144
- tools_called: Optional[List[ToolSet]] = Field(default_factory=list, description="tools that the agent can use for this task")
146
+ prompt_context: Optional[str] = Field(default=None)
147
+
148
+ # tool usage
149
+ tools: Optional[List[ToolSet | Tool | Any]] = Field(default_factory=list, description="tools that the agent can use aside from their tools")
150
+ can_use_agent_tools: bool = Field(default=False, description="whether the agent can use their own tools when executing the task")
145
151
  take_tool_res_as_final: bool = Field(default=False, description="when set True, tools res will be stored in the `TaskOutput`")
146
- allow_delegation: bool = Field(default=False, description="ask other agents for help and run the task instead")
147
152
 
148
- prompt_context: Optional[str] = Field(default=None)
153
+ # execution rules
154
+ allow_delegation: bool = Field(default=False, description="ask other agents for help and run the task instead")
149
155
  async_execution: bool = Field(default=False,description="whether the task should be executed asynchronously or not")
150
- config: Optional[Dict[str, Any]] = Field(default=None, description="configuration for the agent")
151
156
  callback: Optional[Any] = Field(default=None, description="callback to be executed after the task is completed.")
152
157
  callback_kwargs: Optional[Dict[str, Any]] = Field(default_factory=dict, description="kwargs for the callback when the callback is callable")
153
158
 
154
159
  # recording
155
160
  processed_by_agents: Set[str] = Field(default_factory=set, description="store responsible agents' roles")
156
- used_tools: int = 0
157
161
  tools_errors: int = 0
158
162
  delegations: int = 0
159
163
 
160
164
 
161
165
  @model_validator(mode="before")
162
166
  @classmethod
163
- def process_model_config(cls, values: Dict[str, Any]):
167
+ def process_model_config(cls, values: Dict[str, Any]) -> None:
164
168
  return process_config(values_to_update=values, model_class=cls)
165
169
 
166
170
 
@@ -172,7 +176,7 @@ class Task(BaseModel):
172
176
 
173
177
 
174
178
  @model_validator(mode="after")
175
- def validate_required_fields(self):
179
+ def validate_required_fields(self) -> Self:
176
180
  required_fields = ["description",]
177
181
  for field in required_fields:
178
182
  if getattr(self, field) is None:
@@ -181,7 +185,7 @@ class Task(BaseModel):
181
185
 
182
186
 
183
187
  @model_validator(mode="after")
184
- def set_attributes_based_on_config(self) -> "Task":
188
+ def set_attributes_based_on_config(self) -> Self:
185
189
  """
186
190
  Set attributes based on the agent configuration.
187
191
  """
@@ -192,12 +196,21 @@ class Task(BaseModel):
192
196
  return self
193
197
 
194
198
 
195
- ## comment out as we set raw as the default TaskOutputFormat
196
- # @model_validator(mode="after")
197
- # def validate_output_format(self):
198
- # if self.expected_output_json == False and self.expected_output_pydantic == False:
199
- # raise PydanticCustomError("Need to choose at least one output format.")
200
- # return self
199
+ @model_validator(mode="after")
200
+ def set_up_tools(self) -> Self:
201
+ if not self.tools:
202
+ pass
203
+ else:
204
+ tool_list = []
205
+ for item in self.tools:
206
+ if isinstance(item, Tool) or isinstance(item, ToolSet):
207
+ tool_list.append(item)
208
+ elif (isinstance(item, dict) and "function" not in item) or isinstance(item, str):
209
+ pass
210
+ else:
211
+ tool_list.append(item) # address custom tool
212
+ self.tools = tool_list
213
+ return self
201
214
 
202
215
 
203
216
  @model_validator(mode="after")
@@ -261,7 +274,6 @@ class Task(BaseModel):
261
274
  return output_json_dict
262
275
 
263
276
 
264
-
265
277
  def create_pydantic_output(self, output_json_dict: Dict[str, Any], raw_result: Any = None) -> Optional[Any]:
266
278
  """
267
279
  Create pydantic output from the `raw` result.
@@ -302,11 +314,10 @@ class Task(BaseModel):
302
314
  """
303
315
  if inputs:
304
316
  self.description = self._original_description.format(**inputs)
305
- # self.expected_output = self._original_expected_output.format(**inputs)
306
317
 
307
318
 
308
319
  # task execution
309
- def execute_sync(self, agent, context: Optional[str] = None, tools: Optional[List[Any]] = None) -> TaskOutput:
320
+ def execute_sync(self, agent, context: Optional[str] = None) -> TaskOutput:
310
321
  """
311
322
  Execute the task synchronously.
312
323
  When the task has context, make sure we have executed all the tasks in the context first.
@@ -320,26 +331,26 @@ class Task(BaseModel):
320
331
  return self._execute_core(agent, context)
321
332
 
322
333
 
323
- def execute_async(self, agent, context: Optional[str] = None, tools: Optional[List[Any]] = None) -> Future[TaskOutput]:
334
+ def execute_async(self, agent, context: Optional[str] = None) -> Future[TaskOutput]:
324
335
  """
325
336
  Execute the task asynchronously.
326
337
  """
327
338
 
328
339
  future: Future[TaskOutput] = Future()
329
- threading.Thread(daemon=True, target=self._execute_task_async, args=(agent, context, tools, future)).start()
340
+ threading.Thread(daemon=True, target=self._execute_task_async, args=(agent, context, future)).start()
330
341
  return future
331
342
 
332
343
 
333
- def _execute_task_async(self, agent, context: Optional[str], tools: Optional[List[Any]], future: Future[TaskOutput]) -> None:
344
+ def _execute_task_async(self, agent, context: Optional[str], future: Future[TaskOutput]) -> None:
334
345
  """
335
346
  Execute the task asynchronously with context handling.
336
347
  """
337
348
 
338
- result = self._execute_core(agent, context, tools)
349
+ result = self._execute_core(agent, context)
339
350
  future.set_result(result)
340
351
 
341
352
 
342
- def _execute_core(self, agent, context: Optional[str], tools: Optional[List[Any]] = []) -> TaskOutput:
353
+ def _execute_core(self, agent, context: Optional[str]) -> TaskOutput:
343
354
  """
344
355
  Run the core execution logic of the task.
345
356
  To speed up the process, when the format is not expected to return, we will skip the conversion process.
@@ -349,13 +360,14 @@ class Task(BaseModel):
349
360
  from versionhq.team.model import Team
350
361
 
351
362
  self.prompt_context = context
363
+ task_output: InstanceOf[TaskOutput] = None
352
364
 
353
365
  if self.allow_delegation:
354
366
  agent_to_delegate = None
355
367
 
356
368
  if hasattr(agent, "team") and isinstance(agent.team, Team):
357
369
  if agent.team.managers:
358
- idling_manager_agents = [manager.agent for manager in agent.team.managers if manager.task is None]
370
+ idling_manager_agents = [manager.agent for manager in agent.team.managers if manager.is_idling]
359
371
  agent_to_delegate = idling_manager_agents[0] if idling_manager_agents else agent.team.managers[0]
360
372
  else:
361
373
  peers = [member.agent for member in agent.team.members if member.is_manager == False and member.agent.id is not agent.id]
@@ -367,20 +379,26 @@ class Task(BaseModel):
367
379
  agent = agent_to_delegate
368
380
  self.delegations += 1
369
381
 
370
- output_raw, output_json_dict, output_pydantic = agent.execute_task(task=self, context=context, tools=tools), None, None
382
+ if self.take_tool_res_as_final is True:
383
+ output = agent.execute_task(task=self, context=context)
384
+ task_output = TaskOutput(task_id=self.id, tool_output=output)
371
385
 
372
- if self.expected_output_json:
373
- output_json_dict = self.create_json_output(raw_result=output_raw)
386
+ else:
387
+ output_raw, output_json_dict, output_pydantic = agent.execute_task(task=self, context=context), None, None
374
388
 
375
- if self.expected_output_pydantic:
376
- output_pydantic = self.create_pydantic_output(output_json_dict=output_json_dict)
389
+ if self.expected_output_json:
390
+ output_json_dict = self.create_json_output(raw_result=output_raw)
391
+
392
+ if self.expected_output_pydantic:
393
+ output_pydantic = self.create_pydantic_output(output_json_dict=output_json_dict)
394
+
395
+ task_output = TaskOutput(
396
+ task_id=self.id,
397
+ raw=output_raw,
398
+ pydantic=output_pydantic,
399
+ json_dict=output_json_dict
400
+ )
377
401
 
378
- task_output = TaskOutput(
379
- task_id=self.id,
380
- raw=output_raw,
381
- pydantic=output_pydantic,
382
- json_dict=output_json_dict
383
- )
384
402
  self.output = task_output
385
403
  self.processed_by_agents.add(agent.role)
386
404
 
@@ -400,6 +418,7 @@ class Task(BaseModel):
400
418
  # else pydantic_output.model_dump_json() if pydantic_output else result
401
419
  # )
402
420
  # self._save_file(content)
421
+
403
422
  return task_output
404
423
 
405
424
 
@@ -463,7 +482,7 @@ Your outputs MUST adhere to the following format and should NOT include any irre
463
482
  Task ID: {str(self.id)}
464
483
  "Description": {self.description}
465
484
  "Prompt": {self.output_prompt}
466
- "Tools": {", ".join([tool_called.tool.name for tool_called in self.tools_called])}
485
+ "Tools": {", ".join([tool.name for tool in self.tools])}
467
486
  """
468
487
 
469
488
 
versionhq/team/model.py CHANGED
@@ -1,7 +1,6 @@
1
1
  import uuid
2
2
  import warnings
3
3
  import json
4
- from abc import ABC
5
4
  from enum import Enum
6
5
  from dotenv import load_dotenv
7
6
  from concurrent.futures import Future
@@ -108,13 +107,14 @@ class TeamOutput(BaseModel):
108
107
  return res
109
108
 
110
109
 
111
- class TeamMember(ABC, BaseModel):
110
+ class TeamMember(BaseModel):
112
111
  agent: Agent | None = Field(default=None, description="store the agent to be a member")
113
112
  is_manager: bool = Field(default=False)
114
113
  task: Optional[Task] = Field(default=None)
115
114
 
116
- def update(self, task: Task):
117
- self.task = task
115
+ @property
116
+ def is_idling(self):
117
+ return bool(self.task is None)
118
118
 
119
119
 
120
120
  class Team(BaseModel):
@@ -253,8 +253,8 @@ class Team(BaseModel):
253
253
  """
254
254
 
255
255
  team_planner = TeamPlanner(tasks=self.tasks, planner_llm=self.planning_llm)
256
- idling_managers: List[TeamMember] = [member for member in self.members if member.task is None and member.is_manager is True]
257
- idling_members: List[TeamMember] = [member for member in self.members if member.task is None and member.is_manager is False]
256
+ idling_managers: List[TeamMember] = [member for member in self.members if member.is_idling and member.is_manager is True]
257
+ idling_members: List[TeamMember] = [member for member in self.members if member.is_idling and member.is_manager is False]
258
258
  unassigned_tasks: List[Task] = self.member_tasks_without_agent
259
259
  new_team_members: List[TeamMember] = []
260
260
 
@@ -380,16 +380,15 @@ class Team(BaseModel):
380
380
  if skipped_task_output:
381
381
  continue
382
382
 
383
- # self._prepare_agent_tools(task)
384
383
  # self._log_task_start(task, responsible_agent)
385
384
 
386
385
  if task.async_execution:
387
386
  context = create_raw_outputs(tasks=[task, ], task_outputs=([last_sync_output,] if last_sync_output else []))
388
- future = task.execute_async(agent=responsible_agent, context=context, tools=responsible_agent.tools)
387
+ future = task.execute_async(agent=responsible_agent, context=context)
389
388
  futures.append((task, future, task_index))
390
389
  else:
391
390
  context = create_raw_outputs(tasks=[task,], task_outputs=([last_sync_output,] if last_sync_output else [] ))
392
- task_output = task.execute_sync(agent=responsible_agent, context=context, tools=responsible_agent.tools)
391
+ task_output = task.execute_sync(agent=responsible_agent, context=context)
393
392
  if self.managers and responsible_agent in [manager.agent for manager in self.managers]:
394
393
  lead_task_output = task_output
395
394
 
versionhq/tool/model.py CHANGED
@@ -1,20 +1,21 @@
1
1
  from abc import ABC, abstractmethod
2
2
  from inspect import signature
3
3
  from typing import Any, Dict, Callable, Type, Optional, get_args, get_origin
4
+ from typing_extensions import Self
4
5
  from pydantic import InstanceOf, BaseModel, ConfigDict, Field, field_validator, model_validator
5
6
 
6
7
  from versionhq._utils.cache_handler import CacheHandler
7
8
 
8
9
 
9
10
  class BaseTool(ABC, BaseModel):
11
+ """
12
+ Abstract class for Tool class.
13
+ """
10
14
 
11
15
  class _ArgsSchemaPlaceholder(BaseModel):
12
16
  pass
13
17
 
14
- name: str = Field(default=None)
15
- goal: str = Field(default=None)
16
18
  args_schema: Type[BaseModel] = Field(default_factory=_ArgsSchemaPlaceholder)
17
- cache_function: Callable = lambda _args=None, _result=None: True
18
19
 
19
20
 
20
21
  @field_validator("args_schema", mode="before")
@@ -26,11 +27,7 @@ class BaseTool(ABC, BaseModel):
26
27
  return type(
27
28
  f"{cls.__name__}Schema",
28
29
  (BaseModel,),
29
- {
30
- "__annotations__": {
31
- k: v for k, v in cls._run.__annotations__.items() if k != "return"
32
- },
33
- },
30
+ { "__annotations__": { k: v for k, v in cls._run.__annotations__.items() if k != "return" }},
34
31
  )
35
32
 
36
33
 
@@ -39,8 +36,32 @@ class BaseTool(ABC, BaseModel):
39
36
  """any handling"""
40
37
 
41
38
 
42
- def run(self, *args: Any, **kwargs: Any) -> Any:
43
- return self._run(*args, **kwargs)
39
+
40
+ class Tool(BaseTool):
41
+ name: str = Field(default=None)
42
+ goal: str = Field(default=None)
43
+ function: Callable = Field(default=None)
44
+ cache_function: Callable = lambda _args=None, _result=None: True
45
+ tool_handler: Optional[Dict[str, Any]] = Field(
46
+ default=None,
47
+ description="store tool_handler to record the usage of this tool. to avoid circular import, set as Dict format",
48
+ )
49
+
50
+ @model_validator(mode="after")
51
+ def set_up_tool_handler(self) -> Self:
52
+ from versionhq.tool.tool_handler import ToolHandler
53
+
54
+ if self.tool_handler:
55
+ ToolHandler(**self.tool_handler)
56
+ return self
57
+
58
+
59
+ @model_validator(mode="after")
60
+ def set_up_function(self) -> Self:
61
+ if self.function is None:
62
+ self.function = self._run
63
+ self._set_args_schema_from_func()
64
+ return self
44
65
 
45
66
 
46
67
  @staticmethod
@@ -85,51 +106,15 @@ class BaseTool(ABC, BaseModel):
85
106
  self.args_schema = type(
86
107
  class_name,
87
108
  (BaseModel,),
88
- { "__annotations__": { k: v for k, v in self._run.__annotations__.items() if k != "return" } },
109
+ { "__annotations__": {
110
+ k: v for k, v in self._run.__annotations__.items() if k != "return"
111
+ } },
89
112
  )
90
113
  return self
91
114
 
92
115
 
93
- @property
94
- def description(self) -> str:
95
- args_schema = {
96
- name: {
97
- "description": field.description,
98
- "type": self._get_arg_annotations(field.annotation),
99
- }
100
- for name, field in self.args_schema.model_fields.items()
101
- }
102
-
103
- return f"Tool Name: {self.name}\nTool Arguments: {args_schema}\nGoal: {self.goal}"
104
-
105
-
106
-
107
- class Tool(BaseTool):
108
-
109
- function: Callable = Field(default=None)
110
- tool_handler: Optional[Dict[str, Any]] = Field(
111
- default=None,
112
- description="store tool_handler to record the usage of this tool. to avoid circular import, set as Dict format",
113
- )
114
-
115
- @model_validator(mode="after")
116
- def set_up_tool_handler(self):
117
- from versionhq.tool.tool_handler import ToolHandler
118
-
119
- if self.tool_handler:
120
- ToolHandler(**self.tool_handler)
121
- return self
122
-
123
- @model_validator(mode="after")
124
- def set_up_function(self):
125
- if self.function is None:
126
- self.function = self._run
127
- self._set_args_schema_from_func()
128
- return self
129
-
130
-
131
116
  def _run(self, *args: Any, **kwargs: Any) -> Any:
132
- return self.function(*args, **kwargs)
117
+ return self.run(*args, **kwargs)
133
118
 
134
119
 
135
120
  def run(self, *args, **kwargs) -> Any:
@@ -138,35 +123,47 @@ class Tool(BaseTool):
138
123
  """
139
124
  from versionhq.tool.tool_handler import ToolHandler
140
125
 
126
+ if self.function:
127
+ return self.function(*args, **kwargs)
128
+
141
129
  result = None
130
+ acceptable_args = self.args_schema.model_json_schema()["properties"].keys()
131
+ acceptable_kwargs = { k: v for k, v in kwargs.items() if k in acceptable_args }
132
+ tool_called = ToolSet(tool=self, kwargs=acceptable_kwargs)
133
+
134
+ if self.tool_handler:
135
+ if self.tool_handler.has_called_before(tool_called):
136
+ self.tool_handler.error = "Agent execution error"
142
137
 
143
- if self.function is not None:
144
- result = self.function(*args, **kwargs)
138
+ elif self.tool_handler.cache:
139
+ result = self.tools_handler.cache.read(tool=tool_called.tool.name, input=tool_called.kwargs)
140
+ if result is None:
141
+ parsed_kwargs = self._parse_args(raw_args=acceptable_kwargs)
142
+ result = self.function(**parsed_kwargs) if self.function else None
145
143
 
146
144
  else:
147
- acceptable_args = self.args_schema.model_json_schema()["properties"].keys()
148
- acceptable_kwargs = { k: v for k, v in kwargs.items() if k in acceptable_args }
149
- tool_called = ToolSet(tool=self, kwargs=acceptable_kwargs)
150
-
151
- if self.tool_handler:
152
- if self.tool_handler.has_called_before(tool_called):
153
- self.tool_handler.error = "Agent execution error"
154
-
155
- elif self.tool_handler.cache:
156
- result = self.tools_handler.cache.read(tool=tool_called.tool.name, input=tool_called.kwargs)
157
- if result is None:
158
- parsed_kwargs = self._parse_args(input=acceptable_kwargs)
159
- result = self.function(**parsed_kwargs)
160
-
161
- else:
162
- tool_handler = ToolHandler(last_used_tool=tool_called, cache_handler=CacheHandler())
163
- self.tool_handler = tool_handler
164
- parsed_kwargs = self._parse_args(input=acceptable_kwargs)
165
- result = self.function(**parsed_kwargs)
145
+ tool_handler = ToolHandler(last_used_tool=tool_called, cache_handler=CacheHandler())
146
+ self.tool_handler = tool_handler
147
+ parsed_kwargs = self._parse_args(raw_args=acceptable_kwargs)
148
+ result = self.function(**parsed_kwargs) if self.function else None
166
149
 
167
150
  return result
168
151
 
169
152
 
153
+ @property
154
+ def description(self) -> str:
155
+ args_schema = {
156
+ name: {
157
+ "description": field.description,
158
+ "type": self._get_arg_annotations(field.annotation),
159
+ }
160
+ for name, field in self.args_schema.model_fields.items()
161
+ }
162
+
163
+ return f"Tool Name: {self.name}\nTool Arguments: {args_schema}\nGoal: {self.goal}"
164
+
165
+
166
+
170
167
  # @classmethod
171
168
  # def from_composio(
172
169
  # cls, func: Callable = None, tool_name: str = "Composio tool"
@@ -204,12 +201,12 @@ class ToolSet(BaseModel):
204
201
  """
205
202
  Store the tool called and any kwargs used.
206
203
  """
207
- tool: InstanceOf[Tool] = Field(..., description="store the tool instance to be called.")
204
+ tool: InstanceOf[Tool] | Any = Field(..., description="store the tool instance to be called.")
208
205
  kwargs: Optional[Dict[str, Any]] = Field(..., description="kwargs passed to the tool")
209
206
 
210
207
 
211
208
  class InstructorToolSet(BaseModel):
212
- tool: InstanceOf[Tool] = Field(..., description="store the tool instance to be called.")
209
+ tool: InstanceOf[Tool] | Any = Field(..., description="store the tool instance to be called.")
213
210
  kwargs: Optional[Dict[str, Any]] = Field(..., description="kwargs passed to the tool")
214
211
 
215
212
 
@@ -1,6 +1,7 @@
1
- from typing import Any, Optional
1
+ from typing import Optional, Any
2
2
  from pydantic import InstanceOf
3
- from versionhq.tool.model import ToolSet, InstructorToolSet, CacheTool
3
+
4
+ from versionhq.tool.model import ToolSet, InstructorToolSet
4
5
  from versionhq._utils.cache_handler import CacheHandler
5
6
 
6
7
 
@@ -14,9 +15,9 @@ class ToolHandler:
14
15
  error: Optional[str]
15
16
 
16
17
  def __init__(
17
- self,
18
- last_used_tool: InstanceOf[ToolSet] | InstanceOf[InstructorToolSet] = None,
19
- cache_handler: Optional[CacheHandler] = None
18
+ self,
19
+ last_used_tool: InstanceOf[ToolSet] | InstanceOf[InstructorToolSet] = None,
20
+ cache_handler: Optional[CacheHandler] = None
20
21
  ):
21
22
  self.cache = cache_handler
22
23
  self.last_used_tool = last_used_tool
@@ -31,8 +32,9 @@ class ToolHandler:
31
32
 
32
33
  self.last_used_tool = last_used_tool
33
34
 
34
- if self.cache and should_cache and last_used_tool.tool_name != CacheTool().name:
35
- self.cache.add(last_used_tool.tool_name, input.last_used_tool.arguments, output=output)
35
+ from versionhq.tool.model import CacheTool
36
+ if self.cache and should_cache and last_used_tool.tool.name != CacheTool().name:
37
+ self.cache.add(tool=last_used_tool.tool.name, input=last_used_tool.kwargs, output=output)
36
38
 
37
39
 
38
40
  def has_called_before(self, tool_called: ToolSet = None) -> bool:
@@ -40,4 +42,4 @@ class ToolHandler:
40
42
  return False
41
43
 
42
44
  if tool_called := self.last_used_tool:
43
- return bool((tool_called.tool.name == self.last_used_tool.tool.name) and (tool_called.arguments == self.last_used_tool.arguments))
45
+ return bool((tool_called.tool.name == self.last_used_tool.tool.name) and (tool_called.kwargs == self.last_used_tool.kwargs))
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: versionhq
3
- Version: 1.1.9.0
3
+ Version: 1.1.9.1
4
4
  Summary: LLM orchestration frameworks for model-agnostic AI agents that handle complex outbound workflows
5
5
  Author-email: Kuriko Iwai <kuriko@versi0n.io>
6
6
  License: MIT License
@@ -1,24 +1,24 @@
1
- versionhq/__init__.py,sha256=rtE16vRgpvQ78kBeWEl5kWQgGtGW0G5vHOgwC5hlu1w,931
1
+ versionhq/__init__.py,sha256=HIOFr73z0KeJs9qFVvW1DjRFj7JyiJcRGGxylbQT_bQ,931
2
2
  versionhq/_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
- versionhq/_utils/cache_handler.py,sha256=zDQKzIn7vp-M2-uepHFxgJstjfftZS5mzXKL_-4uVvI,370
3
+ versionhq/_utils/cache_handler.py,sha256=3-lw_5ZMWC8hnPAkSQULJ2V1FvZZ-wg9mQaUJGSOjI8,403
4
4
  versionhq/_utils/i18n.py,sha256=TwA_PnYfDLA6VqlUDPuybdV9lgi3Frh_ASsb_X8jJo8,1483
5
5
  versionhq/_utils/logger.py,sha256=lqRYH45KHMQ4mwE1woa5xNmngYu4O749AYECsnWWpmA,1851
6
6
  versionhq/_utils/process_config.py,sha256=UqoWD5IR4VLxEDGxIyVUylw_ppXwk8Wx1ynVuD-pUSg,822
7
- versionhq/_utils/rpm_controller.py,sha256=T7waIGeblu5K58erY4lqVLcPsWM7W9UFdU3DG9Dsk0w,2214
8
- versionhq/_utils/usage_metrics.py,sha256=c33a_28y8ECUgflsKN3mkNm0fNkWgZmXwybMwIqoKXA,1098
7
+ versionhq/_utils/rpm_controller.py,sha256=dUgFd6JtdjiLLTRmrjsBHdTaLn73XFuKpLbJh7thf2A,2289
8
+ versionhq/_utils/usage_metrics.py,sha256=hhq1OCW8Z4V93vwW2O2j528EyjOlF8wlTsX5IL-7asA,1106
9
9
  versionhq/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
- versionhq/agent/model.py,sha256=Sx4NJf3rElndcVzmPdgDgUFM3ATuvtXBwdM4AxB84UQ,18595
11
- versionhq/agent/parser.py,sha256=db5kfk-lR1Ph9-rsTvSeW1NjR6GJ00iaqTNYxJy3N8o,5487
10
+ versionhq/agent/model.py,sha256=8QtZfbeys9cCujc4whKfXdoP0aQKMuvL2qN8WQmugew,19152
11
+ versionhq/agent/parser.py,sha256=Z_swUPO3piJQuYU8oVYwXWeR2zjmNb4PxbXZeR-GlIg,4694
12
12
  versionhq/agent/TEMPLATES/Backstory.py,sha256=cdngBx1GEv7nroR46FEhnysnBJ9mEVL763_9np6Skkc,395
13
13
  versionhq/agent/TEMPLATES/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
14
  versionhq/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
15
15
  versionhq/clients/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
16
  versionhq/clients/customer/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
17
- versionhq/clients/customer/model.py,sha256=rQnCv_wdCdrYAsUjyB6X6ULiuWfqcBBoarXcQT5kj88,1762
17
+ versionhq/clients/customer/model.py,sha256=ruxqSvjBHrSJnNq9Jj5Ko1CW6l8RLiPzhbC_F7tbOnM,1670
18
18
  versionhq/clients/product/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
19
19
  versionhq/clients/product/model.py,sha256=HxiSv8zq5L0H210jXWfjX_Yg1oyWhi2YASR68JEtmDY,2408
20
20
  versionhq/clients/workflow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
21
- versionhq/clients/workflow/model.py,sha256=Suaih9odeAZOvNJ_wEy_iGAi8KnNJ1f_jIaXuhZTeAY,5833
21
+ versionhq/clients/workflow/model.py,sha256=LPet39sK7vUpBQ1mymdNX1xUPseGWv_5W7je_7sif_M,5883
22
22
  versionhq/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
23
23
  versionhq/llm/llm_vars.py,sha256=YZoXqFBW7XpclUZ14_AAz7WOjoyCXnGcI959GSpX2q0,5343
24
24
  versionhq/llm/model.py,sha256=mXzSuf1s6MebGT7_yqgNppde0NIlAF8bjIXAp2MZ9Uw,8247
@@ -27,17 +27,17 @@ versionhq/storage/task_output_storage.py,sha256=xoBJHeqUyQt6iJoR1WQTghP-fyxXL66q
27
27
  versionhq/task/__init__.py,sha256=g4mCATnn1mUXxsfQ5p6IpPawr8O421wVIT8kMKEcxQw,180
28
28
  versionhq/task/formatter.py,sha256=N8Kmk9vtrMtBdgJ8J7RmlKNMdZWSmV8O1bDexmCWgU0,643
29
29
  versionhq/task/log_handler.py,sha256=KJRrcNZgFSKhlNzvtYFnvtp6xukaF1s7ifX9u4zWrN8,1683
30
- versionhq/task/model.py,sha256=Q2PqyepTLLeoAm7htHHtbc2j2ZXY0lrP2BVbhBCuPq4,19373
30
+ versionhq/task/model.py,sha256=EbgYHLNq8l1zfRDnF-yEcuSZ0aNvzbRmHYgfVyJq84c,19910
31
31
  versionhq/team/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
32
- versionhq/team/model.py,sha256=gkw0rLkEcyAQqYk_fLA83oSvcl7p9dZSocWU0cKeHV4,20301
32
+ versionhq/team/model.py,sha256=E52OUVzUtvR--51SFRJos3JdYKri1t2jbvvzoOvShQc,20181
33
33
  versionhq/team/team_planner.py,sha256=uzX2yed7A7gNSs6qH5jIq2zXMVF5BwQQ4HPATsB9DSQ,3675
34
34
  versionhq/tool/__init__.py,sha256=oU2Y84b7vywWq1xmFaBdXdH8Y9lGv7dmk2LEcj4dL-s,1692
35
35
  versionhq/tool/composio.py,sha256=e-Vfr-eFm0ipiOerB_zAC1Sl90A39OD_k4QqgszWXWQ,5779
36
36
  versionhq/tool/decorator.py,sha256=W_WjzZy8y43AoiFjHLPUQfNipmpOPe-wQknCWloPwmY,1195
37
- versionhq/tool/model.py,sha256=ovoI3PaEuz9UvxLUKLidvSRdXDmaYxTMsZqpQPRugi0,7557
38
- versionhq/tool/tool_handler.py,sha256=r30Fwp32IX716pcERm67Z7kHWQ7eUa6EYBNSyWrPcjo,1485
39
- versionhq-1.1.9.0.dist-info/LICENSE,sha256=7CCXuMrAjPVsUvZrsBq9DsxI2rLDUSYXR_qj4yO_ZII,1077
40
- versionhq-1.1.9.0.dist-info/METADATA,sha256=qZaCrHOVr6y8nzrSNHtvDo0iHQSYtHqzE4LwKmZIpR8,15955
41
- versionhq-1.1.9.0.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
42
- versionhq-1.1.9.0.dist-info/top_level.txt,sha256=DClQwxDWqIUGeRJkA8vBlgeNsYZs4_nJWMonzFt5Wj0,10
43
- versionhq-1.1.9.0.dist-info/RECORD,,
37
+ versionhq/tool/model.py,sha256=8A1x8gEdTuP5teUf6o3VqJhrPI5m-XBoBXxgHiWCKcI,7499
38
+ versionhq/tool/tool_handler.py,sha256=rmm8snegwug4jl0Sbi_CteFajkbPAZ5koTQWDMwcIrQ,1510
39
+ versionhq-1.1.9.1.dist-info/LICENSE,sha256=7CCXuMrAjPVsUvZrsBq9DsxI2rLDUSYXR_qj4yO_ZII,1077
40
+ versionhq-1.1.9.1.dist-info/METADATA,sha256=-mFMCnD5q2XGVgaNyE8ddBpX8tgAR4sqlJ-Cmm0hBQA,15955
41
+ versionhq-1.1.9.1.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
42
+ versionhq-1.1.9.1.dist-info/top_level.txt,sha256=DClQwxDWqIUGeRJkA8vBlgeNsYZs4_nJWMonzFt5Wj0,10
43
+ versionhq-1.1.9.1.dist-info/RECORD,,