versionhq 1.1.8.1__py3-none-any.whl → 1.1.9.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
versionhq/__init__.py CHANGED
@@ -15,9 +15,10 @@ from versionhq.llm.model import LLM
15
15
  from versionhq.task.model import Task, TaskOutput
16
16
  from versionhq.team.model import Team, TeamOutput
17
17
  from versionhq.tool.model import Tool
18
+ from versionhq.tool.composio import Composio
18
19
 
19
20
 
20
- __version__ = "1.1.8.1"
21
+ __version__ = "1.1.9.1"
21
22
  __all__ = [
22
23
  "Agent",
23
24
  "Customer",
@@ -33,4 +34,5 @@ __all__ = [
33
34
  "Team",
34
35
  "TeamOutput",
35
36
  "Tool",
37
+ "Composio"
36
38
  ]
@@ -6,8 +6,8 @@ from pydantic import BaseModel, PrivateAttr
6
6
  class CacheHandler(BaseModel):
7
7
  _cache: Dict[str, Any] = PrivateAttr(default_factory=dict)
8
8
 
9
- def add(self, tool, input, output):
9
+ def add(self, tool: str, input: str, output: Any) -> None:
10
10
  self._cache[f"{tool}-{input}"] = output
11
11
 
12
- def read(self, tool, input) -> Optional[str]:
12
+ def read(self, tool: str, input: str) -> Optional[str]:
13
13
  return self._cache.get(f"{tool}-{input}")
@@ -1,6 +1,7 @@
1
1
  import threading
2
2
  import time
3
3
  from typing import Optional
4
+ from typing_extensions import Self
4
5
 
5
6
  from pydantic import BaseModel, Field, PrivateAttr, model_validator
6
7
 
@@ -16,14 +17,14 @@ class RPMController(BaseModel):
16
17
  _shutdown_flag: bool = PrivateAttr(default=False)
17
18
 
18
19
  @model_validator(mode="after")
19
- def reset_counter(self):
20
+ def reset_counter(self) -> Self:
20
21
  if self.max_rpm is not None:
21
22
  if not self._shutdown_flag:
22
23
  self._lock = threading.Lock()
23
24
  self._reset_request_count()
24
25
  return self
25
26
 
26
- def check_or_wait(self):
27
+ def check_or_wait(self) -> bool:
27
28
  if self.max_rpm is None:
28
29
  return True
29
30
 
@@ -46,16 +47,16 @@ class RPMController(BaseModel):
46
47
  else:
47
48
  return _check_and_increment()
48
49
 
49
- def stop_rpm_counter(self):
50
+ def stop_rpm_counter(self) -> None:
50
51
  if self._timer:
51
52
  self._timer.cancel()
52
53
  self._timer = None
53
54
 
54
- def _wait_for_next_minute(self):
55
+ def _wait_for_next_minute(self) -> None:
55
56
  time.sleep(60)
56
57
  self._current_rpm = 0
57
58
 
58
- def _reset_request_count(self):
59
+ def _reset_request_count(self) -> None:
59
60
  def _reset():
60
61
  self._current_rpm = 0
61
62
  if not self._shutdown_flag:
@@ -12,7 +12,7 @@ class UsageMetrics(BaseModel):
12
12
  completion_tokens: int = Field(default=0, description="Number of tokens used in completions")
13
13
  successful_requests: int = Field(default=0, description="Number of successful requests made")
14
14
 
15
- def add_usage_metrics(self, usage_metrics: "UsageMetrics"):
15
+ def add_usage_metrics(self, usage_metrics: "UsageMetrics") -> None:
16
16
  """
17
17
  Add the usage metrics from another UsageMetrics object.
18
18
  """
versionhq/agent/model.py CHANGED
@@ -1,7 +1,7 @@
1
1
  import os
2
2
  import uuid
3
- from abc import ABC
4
3
  from typing import Any, Dict, List, Optional, TypeVar
4
+ from typing_extensions import Self
5
5
  from dotenv import load_dotenv
6
6
  from pydantic import UUID4, BaseModel, Field, InstanceOf, PrivateAttr, model_validator, field_validator
7
7
  from pydantic_core import PydanticCustomError
@@ -15,8 +15,7 @@ from versionhq.llm.llm_vars import LLM_VARS
15
15
  from versionhq.llm.model import LLM, DEFAULT_CONTEXT_WINDOW
16
16
  from versionhq.task import TaskOutputFormat
17
17
  from versionhq.task.model import ResponseField
18
- from versionhq.tool.model import Tool, ToolCalled
19
- from versionhq.tool.tool_handler import ToolHandler
18
+ from versionhq.tool.model import Tool, ToolSet
20
19
 
21
20
  load_dotenv(override=True)
22
21
  T = TypeVar("T", bound="Agent")
@@ -51,18 +50,18 @@ class TokenProcess:
51
50
  completion_tokens: int = 0
52
51
  successful_requests: int = 0
53
52
 
54
- def sum_prompt_tokens(self, tokens: int):
53
+ def sum_prompt_tokens(self, tokens: int) -> None:
55
54
  self.prompt_tokens = self.prompt_tokens + tokens
56
55
  self.total_tokens = self.total_tokens + tokens
57
56
 
58
- def sum_completion_tokens(self, tokens: int):
57
+ def sum_completion_tokens(self, tokens: int) -> None:
59
58
  self.completion_tokens = self.completion_tokens + tokens
60
59
  self.total_tokens = self.total_tokens + tokens
61
60
 
62
- def sum_cached_prompt_tokens(self, tokens: int):
61
+ def sum_cached_prompt_tokens(self, tokens: int) -> None:
63
62
  self.cached_prompt_tokens = self.cached_prompt_tokens + tokens
64
63
 
65
- def sum_successful_requests(self, requests: int):
64
+ def sum_successful_requests(self, requests: int) -> None:
66
65
  self.successful_requests = self.successful_requests + requests
67
66
 
68
67
  def get_summary(self) -> UsageMetrics:
@@ -76,7 +75,7 @@ class TokenProcess:
76
75
 
77
76
 
78
77
  # @track_agent()
79
- class Agent(ABC, BaseModel):
78
+ class Agent(BaseModel):
80
79
  """
81
80
  Agent class that run on LLM.
82
81
  Agents execute tasks alone or in the team, using RAG tools and knowledge base if any.
@@ -97,10 +96,7 @@ class Agent(ABC, BaseModel):
97
96
  backstory: Optional[str] = Field(default=None, description="system context passed to the LLM")
98
97
  knowledge: Optional[str] = Field(default=None, description="external knowledge fed to the agent")
99
98
  skillsets: Optional[List[str]] = Field(default_factory=list)
100
-
101
- # tools
102
- tools: Optional[List[Any]] = Field(default_factory=list)
103
- tool_handler: InstanceOf[ToolHandler] = Field(default=None, description="handle tool cache and last used tool")
99
+ tools: Optional[List[Tool | Any]] = Field(default_factory=list)
104
100
 
105
101
  # team, task execution rules
106
102
  team: Optional[List[Any]] = Field(default=None, description="Team to which the agent belongs")
@@ -146,7 +142,7 @@ class Agent(ABC, BaseModel):
146
142
 
147
143
 
148
144
  @model_validator(mode="after")
149
- def validate_required_fields(self):
145
+ def validate_required_fields(self) -> Self:
150
146
  required_fields = ["role", "goal"]
151
147
  for field in required_fields:
152
148
  if getattr(self, field) is None:
@@ -155,7 +151,7 @@ class Agent(ABC, BaseModel):
155
151
 
156
152
 
157
153
  @model_validator(mode="after")
158
- def set_up_llm(self):
154
+ def set_up_llm(self) -> Self:
159
155
  """
160
156
  Set up the base model and function calling model (if any) using the LLM class.
161
157
  Pass the model config params: `llm`, `max_tokens`, `max_execution_time`, `step_callback`,`respect_context_window` to the LLM class.
@@ -270,31 +266,42 @@ class Agent(ABC, BaseModel):
270
266
 
271
267
 
272
268
  @model_validator(mode="after")
273
- def set_up_tools(self):
269
+ def set_up_tools(self) -> Self:
274
270
  """
275
271
  Similar to the LLM set up, when the agent has tools, we will declare them using the Tool class.
276
272
  """
277
-
278
273
  if not self.tools:
279
274
  pass
280
275
 
281
276
  else:
282
- tools_in_class_format = []
283
- for tool in self.tools:
284
- if isinstance(tool, Tool):
285
- tools_in_class_format.append(tool)
286
- elif isinstance(tool, str):
287
- tool_to_add = Tool(name=tool)
288
- tools_in_class_format.append(tool_to_add)
277
+ tool_list = []
278
+ def empty_func():
279
+ return "empty function"
280
+
281
+ for item in self.tools:
282
+ if isinstance(item, Tool):
283
+ tool_list.append(item)
284
+
285
+ elif isinstance(item, dict):
286
+ if "function" not in item:
287
+ setattr(item, "function", empty_func)
288
+ tool = Tool(**item)
289
+ tool_list.append(tool)
290
+
291
+ elif isinstance(item, str):
292
+ tool = Tool(name=item, function=empty_func)
293
+ tool_list.append(tool)
294
+
289
295
  else:
290
- pass
291
- self.tools = tools_in_class_format
296
+ tool_list.append(item) # address custom tool
297
+
298
+ self.tools = tool_list
292
299
 
293
300
  return self
294
301
 
295
302
 
296
303
  @model_validator(mode="after")
297
- def set_up_backstory(self):
304
+ def set_up_backstory(self) -> Self:
298
305
  """
299
306
  Set up the backstory using a templated BACKSTORY when the backstory is None
300
307
  """
@@ -305,7 +312,7 @@ class Agent(ABC, BaseModel):
305
312
  role=self.role,
306
313
  knowledge=self.knowledge if isinstance(self.knowledge, str) else None,
307
314
  skillsets=", ".join([item for item in self.skillsets]),
308
- rag_tool_overview=", ".join([item.name for item in self.tools]),
315
+ rag_tool_overview=", ".join([item.name for item in self.tools if hasattr(item, "name")]) if self.tools else "",
309
316
  goal=self.goal,
310
317
  )
311
318
  self.backstory = backstory
@@ -313,9 +320,7 @@ class Agent(ABC, BaseModel):
313
320
  return self
314
321
 
315
322
 
316
- def invoke(
317
- self, prompts: str, output_formats: List[TaskOutputFormat], response_fields: List[ResponseField], **kwargs
318
- ) -> Dict[str, Any]:
323
+ def invoke(self, prompts: str, output_formats: List[TaskOutputFormat], response_fields: List[ResponseField], **kwargs) -> Dict[str, Any]:
319
324
  """
320
325
  Receive the system prompt in string and create formatted prompts using the system prompt and the agent's backstory.
321
326
  Then call the base model.
@@ -358,11 +363,10 @@ class Agent(ABC, BaseModel):
358
363
  return {"output": response.output if hasattr(response, "output") else response}
359
364
 
360
365
 
361
- def execute_task(self, task, context: Optional[str] = None, tools: Optional[str] = None) -> str:
366
+ def execute_task(self, task, context: Optional[str] = None) -> str:
362
367
  """
363
- Execute the task and return the output in string.
364
- To simplify, the tools are cascaded from the `tools_called` under the `task` Task instance if any.
365
- When the tools are given, the agent must use them.
368
+ Execute the task and return the response in string.
369
+ The agent utilizes the tools in task or their own tools if the task.can_use_agent_tools is True.
366
370
  The agent must consider the context to excute the task as well when it is given.
367
371
  """
368
372
 
@@ -371,13 +375,29 @@ class Agent(ABC, BaseModel):
371
375
  task_prompt += context
372
376
 
373
377
  tool_results = []
374
- if task.tools_called:
375
- for tool_called in task.tools_called:
376
- tool_result = tool_called.tool.run()
378
+ if task.tools:
379
+ for item in task.tools:
380
+ if isinstance(item, ToolSet):
381
+ tool_result = item.tool.run(**item.kwargs)
382
+ tool_results.append(tool_result)
383
+ elif isinstance(item, Tool):
384
+ tool_result = item.run()
385
+ tool_results.append(tool_result)
386
+ else:
387
+ try:
388
+ item.run()
389
+ except:
390
+ pass
391
+
392
+ if task.can_use_agent_tools is True and self.tools:
393
+ for tool in self.tools:
394
+ tool_result = tool.run()
377
395
  tool_results.append(tool_result)
378
396
 
379
- if task.take_tool_res_as_final:
380
- return tool_results
397
+ if task.take_tool_res_as_final:
398
+ return tool_results
399
+
400
+
381
401
 
382
402
  # if self.team and self.team._train:
383
403
  # task_prompt = self._training_handler(task_prompt=task_prompt)
@@ -395,9 +415,7 @@ class Agent(ABC, BaseModel):
395
415
  self._times_executed += 1
396
416
  if self._times_executed > self.max_retry_limit:
397
417
  raise e
398
- result = self.execute_task(
399
- task, context, [tool_called.tool for tool_called in task.tools_called]
400
- )
418
+ result = self.execute_task(task, context)
401
419
 
402
420
  if self.max_rpm and self._rpm_controller:
403
421
  self._rpm_controller.stop_rpm_counter()
versionhq/agent/parser.py CHANGED
@@ -73,9 +73,7 @@ class AgentParser:
73
73
  def parse(self, text: str) -> AgentAction | AgentFinish:
74
74
  thought = self._extract_thought(text)
75
75
  includes_answer = FINAL_ANSWER_ACTION in text
76
- regex = (
77
- r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
78
- )
76
+ regex = r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
79
77
  action_match = re.search(regex, text, re.DOTALL)
80
78
  if action_match:
81
79
  if includes_answer:
@@ -127,18 +125,9 @@ class AgentParser:
127
125
  def _safe_repair_json(self, tool_input: str) -> str:
128
126
  UNABLE_TO_REPAIR_JSON_RESULTS = ['""', "{}"]
129
127
 
130
- # Skip repair if the input starts and ends with square brackets
131
- # Explanation: The JSON parser has issues handling inputs that are enclosed in square brackets ('[]').
132
- # These are typically valid JSON arrays or strings that do not require repair. Attempting to repair such inputs
133
- # might lead to unintended alterations, such as wrapping the entire input in additional layers or modifying
134
- # the structure in a way that changes its meaning. By skipping the repair for inputs that start and end with
135
- # square brackets, we preserve the integrity of these valid JSON structures and avoid unnecessary modifications.
136
128
  if tool_input.startswith("[") and tool_input.endswith("]"):
137
129
  return tool_input
138
130
 
139
- # Before repair, handle common LLM issues:
140
- # 1. Replace """ with " to avoid JSON parser errors
141
-
142
131
  tool_input = tool_input.replace('"""', '"')
143
132
 
144
133
  result = repair_json(tool_input)
@@ -1,16 +1,7 @@
1
1
  import uuid
2
2
  from abc import ABC
3
3
  from typing import Any, Dict, List, Callable, Type, Optional, get_args, get_origin
4
- from pydantic import (
5
- UUID4,
6
- InstanceOf,
7
- BaseModel,
8
- ConfigDict,
9
- Field,
10
- create_model,
11
- field_validator,
12
- model_validator,
13
- )
4
+ from pydantic import UUID4, InstanceOf, BaseModel, ConfigDict, Field, create_model, field_validator, model_validator
14
5
  from pydantic_core import PydanticCustomError
15
6
 
16
7
  from versionhq.clients.product.model import Product, ProductProvider
@@ -23,18 +14,11 @@ class Customer(ABC, BaseModel):
23
14
 
24
15
  id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
25
16
  name: Optional[str] = Field(default=None, description="customer's name if any")
26
- product_list: Optional[List[Product]] = Field(
27
- default=list, description="store products that the customer is associated with"
28
- )
29
- analysis: str = Field(
30
- default=None, description="store the latest analysis results on the customer"
31
- )
32
- on_workflow: bool = Field(
33
- default=False, description="`True` if they are on some messaging workflows"
34
- )
35
- on: Optional[str] = Field(
36
- default=None, description="destination service for this customer if any"
37
- )
17
+ product_list: Optional[List[Product]] = Field(default=list, description="store products that the customer is associated with")
18
+ analysis: str = Field(default=None, description="store the latest analysis results on the customer")
19
+ on_workflow: bool = Field(default=False, description="`True` if they are on some messaging workflows")
20
+ on: Optional[str] = Field(default=None, description="destination service for this customer if any")
21
+
38
22
 
39
23
  @field_validator("id", mode="before")
40
24
  @classmethod
@@ -2,6 +2,7 @@ import uuid
2
2
  from abc import ABC
3
3
  from datetime import date, datetime, time, timedelta
4
4
  from typing import Any, Dict, List, Callable, Type, Optional, get_args, get_origin
5
+ from typing_extensions import Self
5
6
  from pydantic import UUID4, InstanceOf, BaseModel, ConfigDict, Field, field_validator, model_validator
6
7
  from pydantic_core import PydanticCustomError
7
8
 
@@ -63,7 +64,7 @@ class MessagingComponent(ABC, BaseModel):
63
64
  condition: str = Field(default=None, max_length=128, description="condition to execute the next messaging component")
64
65
 
65
66
 
66
- def store_scoring_result(self, scoring_subject: str, score_raw: int | Score | ScoreFormat = None):
67
+ def store_scoring_result(self, scoring_subject: str, score_raw: int | Score | ScoreFormat = None) -> Self:
67
68
  """
68
69
  Set up the `score` field
69
70
  """
@@ -117,15 +118,6 @@ class MessagingWorkflow(ABC, BaseModel):
117
118
  description="store metrics that used to predict and track the performance of this workflow."
118
119
  )
119
120
 
120
-
121
- @property
122
- def name(self):
123
- if self.customer.id:
124
- return f"Workflow ID: {self.id} - on {self.product.id} for {self.customer.id}"
125
- else:
126
- return f"Workflow ID: {self.id} - on {self.product.id}"
127
-
128
-
129
121
  @field_validator("id", mode="before")
130
122
  @classmethod
131
123
  def _deny_user_set_id(cls, v: Optional[UUID4]) -> None:
@@ -160,3 +152,11 @@ class MessagingWorkflow(ABC, BaseModel):
160
152
  self.agents = agents
161
153
  self.team = team
162
154
  self.updated_at = datetime.datetime.now()
155
+
156
+
157
+ @property
158
+ def name(self) -> str:
159
+ if self.customer.id:
160
+ return f"Workflow ID: {self.id} - on {self.product.id} for {self.customer.id}"
161
+ else:
162
+ return f"Workflow ID: {self.id} - on {self.product.id}"