versionhq 1.2.4.2__py3-none-any.whl → 1.2.4.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
versionhq/__init__.py CHANGED
@@ -32,7 +32,7 @@ from versionhq.agent_network.formation import form_agent_network
32
32
  from versionhq.task_graph.draft import workflow
33
33
 
34
34
 
35
- __version__ = "1.2.4.2"
35
+ __version__ = "1.2.4.5"
36
36
  __all__ = [
37
37
  "Agent",
38
38
 
@@ -67,7 +67,7 @@ class PromptFeedbackGraph(TaskGraph):
67
67
  if not agents:
68
68
  return None
69
69
 
70
- self.concl_template = base_task.pydantic_output if base_task.pydantic_output else base_task.response_fields if base_task.response_fields else None
70
+ self.concl_response_schema = base_task.response_schema
71
71
  base_agent.callbacks.append(self._reflect)
72
72
  init_node = Node(task=base_task, assigned_to=base_agent)
73
73
  self.add_node(init_node)
@@ -2,7 +2,7 @@
2
2
  from typing import Dict, List, Tuple, Any
3
3
  from textwrap import dedent
4
4
 
5
- from pydantic import InstanceOf
5
+ from pydantic import InstanceOf, BaseModel
6
6
 
7
7
  from versionhq._utils import is_valid_url
8
8
 
@@ -25,34 +25,26 @@ class Prompt:
25
25
 
26
26
 
27
27
  def _draft_output_prompt(self) -> str:
28
- """Drafts prompt for output either from `pydantic_output` or `response_fields`"""
28
+ """Drafts prompt for output format using `response_schema`."""
29
29
 
30
- from versionhq.llm.model import DEFAULT_MODEL_PROVIDER_NAME
30
+ from versionhq.task.model import ResponseField
31
31
 
32
32
  output_prompt = ""
33
- model_provider = self.agent.llm.provider if self.agent else DEFAULT_MODEL_PROVIDER_NAME
33
+ output_formats_to_follow = dict()
34
34
 
35
- if self.task.pydantic_output:
36
- output_prompt, output_formats_to_follow = "", dict()
37
- response_format = str(self.task._structure_response_format(model_provider=model_provider))
38
- for k, v in self.task.pydantic_output.model_fields.items():
39
- output_formats_to_follow[k] = f"<Return your answer in {v.annotation}>"
35
+ if self.task.response_schema:
36
+ if isinstance(self.task.response_schema, list):
37
+ for item in self.task.response_schema:
38
+ if isinstance(item, ResponseField):
39
+ output_formats_to_follow[item.title] = f"<Return your answer in {item.data_type.__name__}>"
40
40
 
41
- output_prompt = f"""Your response MUST be a valid JSON string that strictly follows the response format. Use double quotes for all keys and string values. Do not use single quotes, trailing commas, or any other non-standard JSON syntax.
42
- Response format: {response_format}
43
- Ref. Output image: {output_formats_to_follow}
44
- """
45
- elif self.task.response_fields:
46
- output_prompt, output_formats_to_follow = "", dict()
47
- response_format = str(self.task._structure_response_format(model_provider=model_provider))
48
- for item in self.task.response_fields:
49
- if item:
50
- output_formats_to_follow[item.title] = f"<Return your answer in {item.data_type.__name__}>"
41
+ elif issubclass(self.task.response_schema, BaseModel):
42
+ for k, v in self.task.response_schema.model_fields.items():
43
+ output_formats_to_follow[k] = f"<Return your answer in {v.annotation}>"
51
44
 
52
45
  output_prompt = f"""Your response MUST be a valid JSON string that strictly follows the response format. Use double quotes for all keys and string values. Do not use single quotes, trailing commas, or any other non-standard JSON syntax.
53
- Response format: {response_format}
54
46
  Ref. Output image: {output_formats_to_follow}
55
- """
47
+ """
56
48
  else:
57
49
  output_prompt = "You MUST return your response as a valid JSON serializable string, enclosed in double quotes. Use double quotes for all keys and string values. Do NOT use single quotes, trailing commas, or other non-standard JSON syntax."
58
50
 
@@ -2,3 +2,4 @@ from versionhq._utils.logger import Logger
2
2
  from versionhq._utils.process_config import process_config
3
3
  from versionhq._utils.vars import KNOWLEDGE_DIRECTORY, MAX_FILE_NAME_LENGTH
4
4
  from versionhq._utils.is_valid_url import is_valid_url
5
+ from versionhq._utils.usage_metrics import UsageMetrics, ErrorType
@@ -1,55 +1,72 @@
1
- from pydantic import BaseModel, Field
1
+ import uuid
2
+ import enum
3
+ import datetime
4
+ from typing import Dict, List
5
+ from typing_extensions import Self
6
+
7
+ from pydantic import BaseModel, UUID4, InstanceOf
8
+
9
+
10
+ class ErrorType(enum.Enum):
11
+ FORMAT = 1
12
+ TOOL = 2
13
+ API = 3
14
+ OVERFITTING = 4
15
+ HUMAN_INTERACTION = 5
2
16
 
3
17
 
4
18
  class UsageMetrics(BaseModel):
5
- """
6
- Model to track usage
7
- """
8
-
9
- total_tokens: int = Field(default=0, description="total number of tokens used")
10
- prompt_tokens: int = Field(default=0, description="number of tokens used in prompts")
11
- cached_prompt_tokens: int = Field(default=0, description="number of cached prompt tokens used")
12
- completion_tokens: int = Field(default=0, description="number of tokens used in completions")
13
- successful_requests: int = Field(default=0, description="number of successful requests made")
14
-
15
- def add_usage_metrics(self, usage_metrics: "UsageMetrics") -> None:
16
- """
17
- Add the usage metrics from another UsageMetrics object.
18
- """
19
- self.total_tokens += usage_metrics.total_tokens
20
- self.prompt_tokens += usage_metrics.prompt_tokens
21
- self.cached_prompt_tokens += usage_metrics.cached_prompt_tokens
22
- self.completion_tokens += usage_metrics.completion_tokens
23
- self.successful_requests += usage_metrics.successful_requests
24
-
25
-
26
-
27
- # class TokenProcess:
28
- # total_tokens: int = 0
29
- # prompt_tokens: int = 0
30
- # cached_prompt_tokens: int = 0
31
- # completion_tokens: int = 0
32
- # successful_requests: int = 0
33
-
34
- # def sum_prompt_tokens(self, tokens: int) -> None:
35
- # self.prompt_tokens = self.prompt_tokens + tokens
36
- # self.total_tokens = self.total_tokens + tokens
37
-
38
- # def sum_completion_tokens(self, tokens: int) -> None:
39
- # self.completion_tokens = self.completion_tokens + tokens
40
- # self.total_tokens = self.total_tokens + tokens
41
-
42
- # def sum_cached_prompt_tokens(self, tokens: int) -> None:
43
- # self.cached_prompt_tokens = self.cached_prompt_tokens + tokens
44
-
45
- # def sum_successful_requests(self, requests: int) -> None:
46
- # self.successful_requests = self.successful_requests + requests
47
-
48
- # def get_summary(self) -> UsageMetrics:
49
- # return UsageMetrics(
50
- # total_tokens=self.total_tokens,
51
- # prompt_tokens=self.prompt_tokens,
52
- # cached_prompt_tokens=self.cached_prompt_tokens,
53
- # completion_tokens=self.completion_tokens,
54
- # successful_requests=self.successful_requests,
55
- # )
19
+ """A Pydantic model to manage token usage, errors, job latency."""
20
+
21
+ id: UUID4 = uuid.uuid4() # stores task id or task graph id
22
+ total_tokens: int = 0
23
+ prompt_tokens: int = 0
24
+ completion_tokens: int = 0
25
+ successful_requests: int = 0
26
+ total_errors: int = 0
27
+ error_breakdown: Dict[ErrorType, int] = dict()
28
+ latency: float = 0.0 # in ms
29
+
30
+ def record_token_usage(self, token_usages: List[Dict[str, int]]) -> None:
31
+ """Records usage metrics from the raw response of the model."""
32
+
33
+ if token_usages:
34
+ for item in token_usages:
35
+ self.total_tokens += int(item["total_tokens"]) if "total_tokens" in item else 0
36
+ self.completion_tokens += int(item["completion_tokens"]) if "completion_tokens" in item else 0
37
+ self.prompt_tokens += int(item["prompt_tokens"]) if "prompt_tokens" in item else 0
38
+
39
+
40
+ def record_errors(self, type: ErrorType = None) -> None:
41
+ self.total_errors += 1
42
+ if type:
43
+ if type in self.error_breakdown:
44
+ self.error_breakdown[type] += 1
45
+ else:
46
+ self.error_breakdown[type] = 1
47
+
48
+
49
+ def record_latency(self, start_dt: datetime.datetime, end_dt: datetime.datetime) -> None:
50
+ self.latency += round((end_dt - start_dt).total_seconds() * 1000, 3)
51
+
52
+
53
+ def aggregate(self, metrics: InstanceOf["UsageMetrics"]) -> Self:
54
+ if not metrics:
55
+ return self
56
+
57
+ self.total_tokens += metrics.total_tokens if metrics.total_tokens else 0
58
+ self.prompt_tokens += metrics.prompt_tokens if metrics.prompt_tokens else 0
59
+ self.completion_tokens += metrics.completion_tokens if metrics.completion_tokens else 0
60
+ self.successful_requests += metrics.successful_requests if metrics.successful_requests else 0
61
+ self.total_errors += metrics.total_errors if metrics.total_errors else 0
62
+ self.latency += metrics.latency if metrics.latency else 0.0
63
+ self.latency = round(self.latency, 3)
64
+
65
+ if metrics.error_breakdown:
66
+ for k, v in metrics.error_breakdown.items():
67
+ if self.error_breakdown and k in self.error_breakdown:
68
+ self.error_breakdown[k] += int(v)
69
+ else:
70
+ self.error_breakdown.update({ k: v })
71
+
72
+ return self
@@ -30,7 +30,7 @@ vhq_task_evaluator = Agent(
30
30
  vhq_formation_planner = Agent(
31
31
  role="vhq-Formation Planner",
32
32
  goal="Plan a formation of agents based on the given task descirption.",
33
- llm="gemini/gemini-2.0-flash-exp",
33
+ llm="gemini/gemini-2.0-flash",
34
34
  llm_config=dict(top_p=0.8, topK=40, temperature=0.9),
35
35
  maxit=1,
36
36
  max_retry_limit=1,
@@ -46,7 +46,7 @@ vhq_formation_planner = Agent(
46
46
  vhq_agent_creator = Agent(
47
47
  role="vhq-Agent Creator",
48
48
  goal="build an agent that can handle the given task",
49
- llm="gemini/gemini-2.0-flash-exp",
49
+ llm="gemini/gemini-2.0-flash",
50
50
  maxit=1,
51
51
  max_retry_limit=1,
52
52
  )
versionhq/agent/model.py CHANGED
@@ -1,6 +1,6 @@
1
1
  import os
2
2
  import uuid
3
- from typing import Any, Dict, List, Optional, TypeVar, Callable, Type
3
+ from typing import Any, Dict, List, Optional, TypeVar, Callable, Type, Tuple
4
4
  from typing_extensions import Self
5
5
  from dotenv import load_dotenv
6
6
 
@@ -11,7 +11,7 @@ from versionhq.agent.rpm_controller import RPMController
11
11
  from versionhq.tool.model import Tool, ToolSet, BaseTool
12
12
  from versionhq.knowledge.model import BaseKnowledgeSource, Knowledge
13
13
  from versionhq.memory.model import ShortTermMemory, LongTermMemory, UserMemory
14
- from versionhq._utils import Logger, process_config, is_valid_url
14
+ from versionhq._utils import Logger, process_config, is_valid_url, ErrorType
15
15
 
16
16
 
17
17
  load_dotenv(override=True)
@@ -373,16 +373,17 @@ class Agent(BaseModel):
373
373
 
374
374
  if tool_res_as_final:
375
375
  raw_response = self.func_calling_llm.call(messages=messages, tools=tools, tool_res_as_final=True)
376
- task._tokens = self.func_calling_llm._tokens
376
+ task._usage.record_token_usage(token_usages=self.func_calling_llm._usages)
377
377
  else:
378
378
  raw_response = self.llm.call(messages=messages, response_format=response_format, tools=tools)
379
- task._tokens = self.llm._tokens
379
+ task._usage.record_token_usage(token_usages=self.llm._usages)
380
380
 
381
381
  task_execution_counter += 1
382
382
  Logger(**self._logger_config, filename=self.key).log(level="info", message=f"Agent response: {raw_response}", color="green")
383
383
  return raw_response
384
384
 
385
385
  except Exception as e:
386
+ task._usage.record_errors(type=ErrorType.API)
386
387
  Logger(**self._logger_config, filename=self.key).log(level="error", message=f"An error occured. The agent will retry: {str(e)}", color="red")
387
388
 
388
389
  while not raw_response and task_execution_counter <= self.max_retry_limit:
@@ -474,9 +475,16 @@ class Agent(BaseModel):
474
475
  return self
475
476
 
476
477
 
477
- def start(self, context: Any = None, tool_res_as_final: bool = False, image: str = None, file: str = None, audio: str = None) -> Any | None:
478
+ def start(
479
+ self,
480
+ context: Any = None,
481
+ tool_res_as_final: bool = False,
482
+ image: str = None,
483
+ file: str = None,
484
+ audio: str = None
485
+ ) -> Tuple[Any | None, Any | None]:
478
486
  """
479
- Defines and executes a task when it is not given and returns TaskOutput object.
487
+ Defines and executes a task, then returns TaskOutput object with the generated task.
480
488
  """
481
489
 
482
490
  if not self.role:
@@ -490,14 +498,14 @@ class Agent(BaseModel):
490
498
 
491
499
  task = Task(
492
500
  description=f"Generate a simple result in a sentence to achieve the goal: {self.goal if self.goal else self.role}. If needed, list up necessary steps in concise manner.",
493
- pydantic_output=Output,
501
+ response_schema=Output,
494
502
  tool_res_as_final=tool_res_as_final,
495
503
  image=image, #REFINEME - query memory/knowledge or self create
496
504
  file=file,
497
505
  audio=audio,
498
506
  )
499
507
  res = task.execute(agent=self, context=context)
500
- return res
508
+ return res, task
501
509
 
502
510
 
503
511
  def execute_task(self, task, context: Optional[Any] = None, task_tools: Optional[List[Tool | ToolSet]] = list()) -> str:
@@ -526,6 +534,8 @@ class Agent(BaseModel):
526
534
  tool_res_as_final=task.tool_res_as_final,
527
535
  task=task
528
536
  )
537
+ if raw_response:
538
+ task._usage.successful_requests += 1
529
539
 
530
540
  except Exception as e:
531
541
  self._times_executed += 1
@@ -74,8 +74,8 @@ def form_agent_network(
74
74
  leader_agent: str
75
75
 
76
76
  vhq_task = Task(
77
- description=f"Design a team of specialized agents to fully automate the following task and achieve the expected outcome. For each agent, define its role, task description, and expected outputs via the task with items in a list. Then specify the formation if the formation is not given. If you think SUPERVISING or HYBRID is the best formation, include a leader_agent role, else leave the leader_agent role blank.\nTask: {str(task)}\nExpected outcome: {prompt_expected_outcome}\nFormation: {prompt_formation}",
78
- pydantic_output=Outcome
77
+ description=f"Design a team of specialized agents to fully automate the following task and deliver the expected outcome. For each agent, define its role, task description, and expected outputs via the task with items in a list. Then specify the formation if the formation is not given. If you think SUPERVISING or HYBRID is the best formation, include a leader_agent role, else leave the leader_agent role blank.\nTask: {str(task)}\nExpected outcome: {prompt_expected_outcome}\nFormation: {prompt_formation}",
78
+ response_schema=Outcome
79
79
  )
80
80
 
81
81
  if agents:
@@ -93,10 +93,11 @@ def form_agent_network(
93
93
 
94
94
  network_tasks = []
95
95
  members = []
96
- leader = str(res.pydantic.leader_agent) if res.pydantic and hasattr(res.pydantic, "leader_agent") else str(res.json_dict["leader_agent"]) if "leader_agent" in res.json_dict else None
97
-
98
- agent_roles = res.pydantic.agent_roles if res.pydantic else res.json_dict["agent_roles"]
99
- created_agents = [Agent(role=str(item), goal=str(item)) for item in agent_roles]
96
+ leader = res._fetch_value_of(key="leader_agent")
97
+ agent_roles = res._fetch_value_of(key="agent_roles")
98
+ created_agents = [Agent(role=str(item), goal=str(item)) for item in agent_roles] if agent_roles else []
99
+ task_descriptions = res._fetch_value_of(key="task_descriptions")
100
+ task_outcomes = res._fetch_value_of(key="task_outcomes")
100
101
 
101
102
  if agents:
102
103
  for i, agent in enumerate(created_agents):
@@ -108,9 +109,9 @@ def form_agent_network(
108
109
 
109
110
  created_tasks = []
110
111
 
111
- if res.pydantic:
112
- for i, item in enumerate(res.pydantic.task_outcomes):
113
- if len(res.pydantic.task_descriptions) > i and res.pydantic.task_descriptions[i]:
112
+ if task_outcomes:
113
+ for i, item in enumerate(task_outcomes):
114
+ if len(task_descriptions) > i and task_descriptions[i]:
114
115
  fields = {}
115
116
  for ob in item:
116
117
  try:
@@ -119,24 +120,9 @@ def form_agent_network(
119
120
  except:
120
121
  pass
121
122
  output = create_model("Output", **fields) if fields else None
122
- _task = Task(description=res.pydantic.task_descriptions[i], pydantic_output=output)
123
+ _task = Task(description=task_descriptions[i], response_schema=output)
123
124
  created_tasks.append(_task)
124
125
 
125
- elif res.json_dict:
126
- for i, item in enumerate(res["task_outcomes"]):
127
- if len(res["task_descriptions"]) > i and res["task_descriptions"][i]:
128
- fields = {}
129
- for ob in item:
130
- try:
131
- field_name = str(ob).lower().split(":")[0].replace(" ", "_")[0: 16]
132
- fields[field_name] = (str, Field(default=None))
133
- except:
134
- pass
135
- output = create_model("Output", **fields) if fields else None
136
- _task = Task(description=res["task_descriptions"][i], pydantic_output=output)
137
- created_tasks.append(_task)
138
-
139
-
140
126
  if len(created_tasks) <= len(created_agents):
141
127
  for i in range(len(created_tasks)):
142
128
  is_manager = False if not leader else bool(created_agents[i].role.lower() == leader.lower())
@@ -159,7 +145,6 @@ def form_agent_network(
159
145
 
160
146
  network_tasks.extend(created_tasks[len(created_agents):len(created_tasks)])
161
147
 
162
-
163
148
  if _formation == Formation.SUPERVISING and not [member for member in members if member.is_manager]:
164
149
  role = leader if leader else "Leader"
165
150
  manager = Member(agent=Agent(role=role), is_manager=True)
@@ -90,7 +90,6 @@ class AgentNetwork(BaseModel):
90
90
 
91
91
  cache: bool = Field(default=True)
92
92
  execution_logs: List[Dict[str, Any]] = Field(default_factory=list, description="list of execution logs of the tasks handled by members")
93
- # usage_metrics: Optional[UsageMetrics] = Field(default=None, description="usage metrics for all the llm executions")
94
93
 
95
94
 
96
95
  def __name__(self) -> str:
@@ -207,7 +206,7 @@ class AgentNetwork(BaseModel):
207
206
  for unassgined_task in unassigned_tasks:
208
207
  task = Task(
209
208
  description=f"Based on the following task summary, draft an agent's role and goal in concise manner. Task summary: {unassgined_task.summary}",
210
- response_fields=[
209
+ response_schema=[
211
210
  ResponseField(title="goal", data_type=str, required=True),
212
211
  ResponseField(title="role", data_type=str, required=True),
213
212
  ],
versionhq/llm/llm_vars.py CHANGED
@@ -27,8 +27,9 @@ MODELS = {
27
27
  "o1-preview",
28
28
  ],
29
29
  "gemini": [
30
- "gemini/gemini-1.5-flash",
31
- "gemini/gemini-1.5-pro",
30
+ "gemini/gemini-2.0-flash",
31
+ "gemini/gemini-2.0-flash-thinking-exp",
32
+ "gemini/gemini-2.0-flash-lite-preview-02-05",
32
33
  "gemini/gemini-2.0-flash-exp",
33
34
  ],
34
35
  "anthropic": [
@@ -75,6 +76,7 @@ ENV_VARS = {
75
76
  "huggingface": ["HUGGINGFACE_API_KEY", ],
76
77
  "bedrock": ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION_NAME"],
77
78
  "sagemaker": ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION_NAME"],
79
+ "azure_ai": ["AZURE_AI_API_KEY", "AZURE_AI_API_BASE"],
78
80
  }
79
81
 
80
82
 
@@ -90,9 +92,10 @@ LLM_CONTEXT_WINDOW_SIZES = {
90
92
  "o1-preview": 128000,
91
93
  "o1-mini": 128000,
92
94
 
93
- "gemini/gemini-1.5-flash": 1048576,
94
- "gemini/gemini-1.5-pro": 2097152,
95
95
  "gemini/gemini-2.0-flash-exp": 1048576,
96
+ "gemini/gemini-2.0-flash": 1048576,
97
+ "gemini/gemini-2.0-flash-thinking-exp": 1048576,
98
+ "gemini/gemini-2.0-flash-lite-preview-02-05": 1048576,
96
99
 
97
100
  "claude-3-7-sonnet-latest": 200000,
98
101
  "claude-3-5-haiku-latest": 200000,
versionhq/llm/model.py CHANGED
@@ -69,7 +69,7 @@ class LLM(BaseModel):
69
69
 
70
70
  _logger: Logger = PrivateAttr(default_factory=lambda: Logger(verbose=True))
71
71
  _init_model_name: str = PrivateAttr(default=None)
72
- _tokens: int = PrivateAttr(default=0) # aggregate number of tokens consumed
72
+ _usages: list[Dict[str, int]] = PrivateAttr(default_factory=list)
73
73
 
74
74
  model: str = Field(default=None)
75
75
  provider: Optional[str] = Field(default=None, description="model provider")
@@ -181,8 +181,6 @@ class LLM(BaseModel):
181
181
  """
182
182
  litellm.drop_params = True
183
183
 
184
- self._tokens = 0
185
-
186
184
  if self.callbacks:
187
185
  self._set_callbacks(self.callbacks)
188
186
 
@@ -230,6 +228,9 @@ class LLM(BaseModel):
230
228
  if self.context_window_size and valid_config["max_tokens"] > self.context_window_size:
231
229
  valid_config["max_tokens"] = self.context_window_size
232
230
 
231
+ if "model" in valid_config:
232
+ self.model = valid_config.pop("model")
233
+
233
234
  self.llm_config = valid_config
234
235
  return valid_config
235
236
 
@@ -319,7 +320,7 @@ class LLM(BaseModel):
319
320
  if not tools:
320
321
  params = self._create_valid_params(config=config)
321
322
  res = litellm.completion(model=self.model, messages=messages, stream=False, **params, **cred)
322
- self._tokens += int(res["usage"]["total_tokens"])
323
+ self._usages.append(res["usage"])
323
324
  return res["choices"][0]["message"]["content"]
324
325
 
325
326
  else:
@@ -384,15 +385,13 @@ class LLM(BaseModel):
384
385
  else:
385
386
  pass
386
387
 
387
-
388
388
  if tool_res_as_final:
389
389
  return tool_res
390
390
  else:
391
391
  res = litellm.completion(model=self.model, messages=messages, **params, **cred)
392
- self._tokens += int(res["usage"]["total_tokens"])
392
+ self._usages.append(res["usage"])
393
393
  return res.choices[0].message.content
394
394
 
395
-
396
395
  except JSONSchemaValidationError as e:
397
396
  self._logger.log(level="error", message="Raw Response: {}".format(e.raw_response), color="red")
398
397
  raise e
@@ -147,8 +147,8 @@ class TaskOutputStorageHandler:
147
147
  description=str(task.description),
148
148
  raw=str(task.output.raw),
149
149
  responsible_agents=str(task.processed_agents),
150
- tokens=task.output._tokens,
151
- latency=task.output.latency,
150
+ tokens=task._usage.total_tokens,
151
+ latency=task._usage.latency,
152
152
  score=task.output.aggregate_score if task.output.aggregate_score else "None",
153
153
  )
154
154
  self.storage.add(task=task, output=output_to_store, inputs=inputs)
versionhq/task/model.py CHANGED
@@ -6,7 +6,7 @@ import inspect
6
6
  import enum
7
7
  from concurrent.futures import Future
8
8
  from hashlib import md5
9
- from typing import Any, Dict, List, Set, Optional, Callable, Type, Tuple
9
+ from typing import Any, Dict, List, Set, Optional, Callable, Type
10
10
  from typing_extensions import Annotated, Self
11
11
 
12
12
  from pydantic import UUID4, BaseModel, Field, PrivateAttr, field_validator, model_validator, InstanceOf, field_validator
@@ -15,7 +15,7 @@ from pydantic_core import PydanticCustomError
15
15
  import versionhq as vhq
16
16
  from versionhq.task.evaluation import Evaluation, EvaluationItem
17
17
  from versionhq.tool.model import Tool, ToolSet
18
- from versionhq._utils import process_config, Logger
18
+ from versionhq._utils import process_config, Logger, UsageMetrics, ErrorType
19
19
 
20
20
 
21
21
  class TaskExecutionType(enum.Enum):
@@ -175,18 +175,31 @@ class TaskOutput(BaseModel):
175
175
  A class to store the final output of the given task in raw (string), json_dict, and pydantic class formats.
176
176
  """
177
177
 
178
- _tokens: int = PrivateAttr(default=0)
179
-
180
178
  task_id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True, description="store Task ID")
181
179
  raw: str = Field(default="", description="Raw output of the task")
182
180
  json_dict: Dict[str, Any] = Field(default=None, description="`raw` converted to dictionary")
183
181
  pydantic: Optional[Any] = Field(default=None)
184
182
  tool_output: Optional[Any] = Field(default=None, description="stores tool result when the task takes tool output as its final output")
185
183
  callback_output: Optional[Any] = Field(default=None, description="stores task or agent callback outcome")
186
- latency: float = Field(default=None, description="job latency in ms")
187
184
  evaluation: Optional[InstanceOf[Evaluation]] = Field(default=None, description="stores overall evaluation of the task output. stored in ltm")
188
185
 
189
186
 
187
+ def _fetch_value_of(self, key: str = None) -> Any:
188
+ """Returns a value to the given key."""
189
+
190
+ if not key:
191
+ return None
192
+
193
+ if self.pydantic and hasattr(self.pydantic, key):
194
+ return getattr(self.pydantic, key)
195
+
196
+ elif self.json_dict and key in self.json_dict:
197
+ return self.json_dict[key]
198
+
199
+ else:
200
+ return None
201
+
202
+
190
203
  def _to_context_prompt(self) -> str:
191
204
  """Formats prompt context in text formats from the final response."""
192
205
 
@@ -223,9 +236,8 @@ class TaskOutput(BaseModel):
223
236
  description = EVALUATE.format(task_description=task.description, task_output=self.raw, eval_criteria=str(item))
224
237
  description = description + fsl_prompt if fsl_prompt else description
225
238
 
226
- task_eval = Task(description=description, pydantic_output=EvaluationItem)
239
+ task_eval = Task(description=description, response_schema=EvaluationItem)
227
240
  res = task_eval.execute(agent=self.evaluation.eval_by)
228
- self._tokens += task_eval._tokens
229
241
 
230
242
  if res.pydantic:
231
243
  item = EvaluationItem(
@@ -296,11 +308,7 @@ class Task(BaseModel):
296
308
  id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True, description="unique identifier for the object, not set by user")
297
309
  name: Optional[str] = Field(default=None)
298
310
  description: str = Field(description="Description of the actual task")
299
-
300
- # response format
301
- response_schema: Optional[Type[BaseModel] | List[ResponseField]] = Field(default=None)
302
- pydantic_output: Optional[Type[BaseModel]] = Field(default=None, description="store Pydantic class as structured response format")
303
- response_fields: Optional[List[ResponseField]] = Field(default_factory=list, description="store list of ResponseField as structured response format")
311
+ response_schema: Optional[Type[BaseModel] | List[ResponseField]] = Field(default=None, description="stores response format")
304
312
 
305
313
  # tool usage
306
314
  tools: Optional[List[ToolSet | Tool | Any]] = Field(default_factory=list, description="tools that the agent can use aside from their tools")
@@ -328,9 +336,7 @@ class Task(BaseModel):
328
336
  fsls: Optional[list[str]] = Field(default=None, description="stores ideal/weak responses")
329
337
 
330
338
  # recording
331
- _tokens: int = 0
332
- _tool_errors: int = 0
333
- _format_errors: int = 0
339
+ _usage: UsageMetrics = PrivateAttr(default=None)
334
340
  _delegations: int = 0
335
341
  processed_agents: Set[str] = Field(default_factory=set, description="store keys of the agents that executed the task")
336
342
  output: Optional[TaskOutput] = Field(default=None, description="store the final TaskOutput object")
@@ -355,6 +361,8 @@ class Task(BaseModel):
355
361
  for field in required_fields:
356
362
  if getattr(self, field) is None:
357
363
  raise ValueError( f"{field} must be provided either directly or through config")
364
+
365
+ self._usage = UsageMetrics(id=self.id)
358
366
  return self
359
367
 
360
368
 
@@ -376,7 +384,7 @@ class Task(BaseModel):
376
384
 
377
385
 
378
386
  def _structure_response_format(self, data_type: str = "object", model_provider: str = "gemini") -> Dict[str, Any] | None:
379
- """Structures `response_fields` or `pydantic_output` to a LLM response format."""
387
+ """Structures `response_schema` into the LLM response format."""
380
388
 
381
389
  from versionhq.task.structured_response import StructuredOutput
382
390
 
@@ -386,28 +394,29 @@ class Task(BaseModel):
386
394
  return response_format
387
395
 
388
396
  else:
389
- if self.response_fields:
390
- properties, required_fields = {}, []
391
- for i, item in enumerate(self.response_fields):
392
- if item:
393
- properties.update(item._format_props())
394
- required_fields.append(item.title)
395
-
396
- response_schema = {
397
- "type": data_type,
398
- "properties": properties,
399
- "required": required_fields,
400
- "additionalProperties": False,
401
- }
402
- response_format = {
403
- "type": "json_schema",
404
- "json_schema": { "name": "outcome", "schema": response_schema }
405
- }
397
+ if self.response_schema:
398
+ if isinstance(self.response_schema, list):
399
+ properties, required_fields = {}, []
400
+ for i, item in enumerate(self.response_schema):
401
+ if isinstance(item, ResponseField):
402
+ properties.update(item._format_props())
403
+ required_fields.append(item.title)
404
+
405
+ response_schema = {
406
+ "type": data_type,
407
+ "properties": properties,
408
+ "required": required_fields,
409
+ "additionalProperties": False,
410
+ }
411
+ response_format = {
412
+ "type": "json_schema",
413
+ "json_schema": { "name": "outcome", "schema": response_schema }
414
+ }
406
415
 
407
- elif self.pydantic_output:
408
- response_format = StructuredOutput(response_format=self.pydantic_output, provider=model_provider)._format()
416
+ elif issubclass(self.response_schema, BaseModel):
417
+ response_format = StructuredOutput(response_format=self.response_schema, provider=model_provider)._format()
409
418
 
410
- return response_format
419
+ return response_format
411
420
 
412
421
 
413
422
  def _sanitize_raw_output(self, raw: str) -> Dict[str, str]:
@@ -423,6 +432,7 @@ class Task(BaseModel):
423
432
  r = re.sub(r"'\b", '"', r)
424
433
  r = r.strip()
425
434
  r = r.replace(" ", "")
435
+
426
436
  try:
427
437
  output = json.loads(r)
428
438
  except:
@@ -433,14 +443,14 @@ class Task(BaseModel):
433
443
  output = json.loads(j)
434
444
 
435
445
  if isinstance(output, dict):
436
- return output
446
+ return output["json_schema"] if "json_schema" in output else output
437
447
  else:
438
448
  try:
439
449
  output = ast.literal_eval(j)
440
450
  except:
441
451
  output = ast.literal_eval(r)
442
452
 
443
- return output if isinstance(output, dict) else { "output": str(r) }
453
+ return output["json_schema"] if isinstance(output, dict) and "json_schema" in output else output if isinstance(output, dict) else { "output": str(r) }
444
454
 
445
455
 
446
456
  def _create_json_output(self, raw: str) -> Dict[str, Any]:
@@ -456,31 +466,34 @@ class Task(BaseModel):
456
466
  try:
457
467
  output = json.loads(raw)
458
468
  if isinstance(output, dict):
459
- return output
469
+ return output["json_schema"] if "json_schema" in output else output
460
470
  else:
461
- output = self._sanitize_raw_output(raw=raw)
462
- return output
471
+ output = self._sanitize_raw_output(raw=raw)
472
+ return output
463
473
  except:
464
474
  output = self._sanitize_raw_output(raw=raw)
475
+ self._usage.record_errors(type=ErrorType.FORMAT)
465
476
  return output
466
477
 
467
478
 
468
- def _create_pydantic_output(self, raw: str = None, json_dict: Dict[str, Any] = None) -> InstanceOf[BaseModel]:
479
+ def _create_pydantic_output(self, raw: str = None, json_dict: Dict[str, Any] = None) -> InstanceOf[BaseModel] | None:
469
480
  """
470
481
  Create pydantic output from raw or json_dict output.
471
482
  """
472
483
 
473
- output_pydantic = self.pydantic_output
474
-
475
- try:
476
- json_dict = json_dict if json_dict else self._create_json_output(raw=raw)
484
+ if self.response_schema and not isinstance(self.response_schema, list):
485
+ output_pydantic = self.response_schema
486
+ try:
487
+ json_dict = json_dict if json_dict else self._create_json_output(raw=raw)
477
488
 
478
- for k, v in json_dict.items():
479
- setattr(output_pydantic, k, v)
480
- except:
481
- pass
489
+ for k, v in json_dict.items():
490
+ setattr(output_pydantic, k, v)
491
+ except:
492
+ pass
493
+ return output_pydantic
482
494
 
483
- return output_pydantic
495
+ else:
496
+ return None
484
497
 
485
498
 
486
499
  def interpolate_inputs(self, inputs: Dict[str, Any]) -> None:
@@ -592,13 +605,6 @@ class Task(BaseModel):
592
605
  res = self._test_time_computation(agent=agent, context=context)
593
606
  return res
594
607
 
595
- # if self._pfg:
596
- # res, all_outputs = self.pfg.activate()
597
- # tokens, latency = self.pfg.usage
598
- # self._tokens = tokens
599
- # res.latency = latency
600
- # return res
601
-
602
608
  match type:
603
609
  case TaskExecutionType.SYNC:
604
610
  res = self._execute_sync(agent=agent, context=context)
@@ -629,11 +635,11 @@ class Task(BaseModel):
629
635
  def _execute_core(self, agent, context: Optional[Any]) -> TaskOutput:
630
636
  """A core method to execute a single task."""
631
637
 
638
+ start_dt = datetime.datetime.now()
632
639
  task_output: InstanceOf[TaskOutput] = None
633
640
  raw_output: str = None
634
641
  tool_output: str | list = None
635
642
  task_tools: List[List[InstanceOf[Tool]| InstanceOf[ToolSet] | Type[Tool]]] = []
636
- started_at, ended_at = datetime.datetime.now(), datetime.datetime.now()
637
643
  user_prompt, dev_prompt = None, None
638
644
 
639
645
  if self.tools:
@@ -647,22 +653,19 @@ class Task(BaseModel):
647
653
  self._delegations += 1
648
654
 
649
655
  if self.tool_res_as_final == True:
650
- started_at = datetime.datetime.now()
651
656
  user_prompt, dev_prompt, tool_output = agent.execute_task(task=self, context=context, task_tools=task_tools)
652
657
  raw_output = str(tool_output) if tool_output else ""
653
- ended_at = datetime.datetime.now()
658
+ if not raw_output:
659
+ self._usage.record_errors(type=ErrorType.TOOL)
654
660
  task_output = TaskOutput(task_id=self.id, tool_output=tool_output, raw=raw_output)
655
661
 
656
662
  else:
657
- started_at = datetime.datetime.now()
658
663
  user_prompt, dev_prompt, raw_output = agent.execute_task(task=self, context=context, task_tools=task_tools)
659
- ended_at = datetime.datetime.now()
660
-
661
664
  json_dict_output = self._create_json_output(raw=raw_output)
662
665
  if "outcome" in json_dict_output:
663
666
  json_dict_output = self._create_json_output(raw=str(json_dict_output["outcome"]))
664
667
 
665
- pydantic_output = self._create_pydantic_output(raw=raw_output, json_dict=json_dict_output) if self.pydantic_output else None
668
+ pydantic_output = self._create_pydantic_output(raw=raw_output, json_dict=json_dict_output)
666
669
 
667
670
  task_output = TaskOutput(
668
671
  task_id=self.id,
@@ -671,8 +674,6 @@ class Task(BaseModel):
671
674
  json_dict=json_dict_output,
672
675
  )
673
676
 
674
- task_output.latency = round((ended_at - started_at).total_seconds() * 1000, 3)
675
- task_output._tokens = self._tokens
676
677
  self.output = task_output
677
678
  self.processed_agents.add(agent.key)
678
679
 
@@ -706,6 +707,8 @@ class Task(BaseModel):
706
707
  self.output = task_output
707
708
  self._store_logs()
708
709
 
710
+ end_dt = datetime.datetime.now()
711
+ self._usage.record_latency(start_dt=start_dt, end_dt=end_dt)
709
712
  return task_output
710
713
 
711
714
 
@@ -716,27 +719,27 @@ class Task(BaseModel):
716
719
  from versionhq._prompt.model import Prompt
717
720
  from versionhq._prompt.auto_feedback import PromptFeedbackGraph
718
721
 
722
+ # self._usage = None
719
723
  prompt = Prompt(task=self, agent=agent, context=context)
720
724
  pfg = PromptFeedbackGraph(prompt=prompt, should_reform=self.human, reform_trigger_event=ReformTriggerEvent.USER_INPUT if self.human else None)
721
725
  pfg = pfg.set_up_graph()
722
726
  self._pfg = pfg
723
727
 
724
- # try:
725
- if self._pfg and self.output is None:
726
- res, _ = self._pfg.activate()
727
- tokens, latency = self._pfg.usage
728
- self._tokens = tokens
729
- res.latency = latency
730
- return res
728
+ try:
729
+ if self._pfg and self.output is None:
730
+ res, all_outputs = self._pfg.activate()
731
+ if all_outputs: self._usage = self._pfg._usage
732
+ return res
731
733
 
732
- # except:
733
- # Logger().log(level="error", message="Failed to execute the task.", color="red")
734
- # return None, None
734
+ except:
735
+ self._usage.record_errors(type=ErrorType.API)
736
+ Logger().log(level="error", message="Failed to execute the task.", color="red")
737
+ return None
735
738
 
736
739
 
737
740
  @property
738
741
  def key(self) -> str:
739
- output_format = "json" if self.response_fields else "pydantic" if self.pydantic_output is not None else "raw"
742
+ output_format = "json" if self.response_schema else "raw"
740
743
  source = [self.description, output_format]
741
744
  return md5("|".join(source).encode(), usedforsecurity=False).hexdigest()
742
745
 
@@ -41,7 +41,7 @@ def workflow(final_output: Type[BaseModel], context: Any = None, human: bool = F
41
41
  "https://www.geeksforgeeks.org/graph-and-its-representations/",
42
42
  ", ".join([k for k in DependencyType._member_map_.keys()]),
43
43
  ],
44
- llm="gemini-2.0",
44
+ llm="gemini/gemini-2.0-flash",
45
45
  with_memory=with_memory,
46
46
  maxit=1,
47
47
  max_retry_limit=1,
@@ -49,7 +49,7 @@ def workflow(final_output: Type[BaseModel], context: Any = None, human: bool = F
49
49
 
50
50
  task = Task(
51
51
  description=dedent(f"Design a resource-efficient workflow to achieve the following goal: {final_output_prompt}. The workflow should consist of a list of detailed tasks that represent decision making points, each with the following information:\nname: A concise name of the task\ndescription: A concise description of the task.\nconnections: A list of target tasks that this task connects to.\ndependency_types: The type of dependency between this task and each of its connected task. \noutput: key output from the task in a word.\n\nUse the following dependency types: {dep_type_prompt}.\n\nPrioritize minimizing resource consumption (computation, memory, and data transfer) when defining tasks, connections, and dependencies. Consider how data is passed between tasks and aim to reduce unnecessary data duplication or transfer. Explain any design choices made to optimize resource usage."),
52
- response_fields=[
52
+ response_schema=[
53
53
  ResponseField(title="tasks", data_type=list, items=dict, properties=[
54
54
  ResponseField(title="name", data_type=str),
55
55
  ResponseField(title="description", data_type=str),
@@ -73,13 +73,13 @@ def workflow(final_output: Type[BaseModel], context: Any = None, human: bool = F
73
73
 
74
74
  for item in task_items:
75
75
  key = item["output"].lower().replace(" ", "_") if item["output"] else "output"
76
- task = Task(name=item["name"], description=item["description"], response_fields=[ResponseField(title=key, data_type=str)])
76
+ task = Task(name=item["name"], description=item["description"], response_schema=[ResponseField(title=key, data_type=str)])
77
77
  tasks.append(task)
78
78
  nodes.append(Node(task=task))
79
79
 
80
80
  task_graph = TaskGraph(
81
81
  nodes={node.identifier: node for node in nodes},
82
- concl_format=final_output,
82
+ concl_response_schema=final_output,
83
83
  concl=None,
84
84
  should_reform=human,
85
85
  reform_trigger_event=ReformTriggerEvent.USER_INPUT if human else None,
@@ -96,6 +96,6 @@ def workflow(final_output: Type[BaseModel], context: Any = None, human: bool = F
96
96
  task_graph.add_dependency(
97
97
  source=source.identifier, target=target.identifier, dependency_type=dependency_type)
98
98
 
99
- task_graph.visualize()
99
+ # task_graph.visualize()
100
100
 
101
101
  return task_graph
@@ -14,8 +14,8 @@ from pydantic import BaseModel, InstanceOf, Field, UUID4, field_validator, model
14
14
  from pydantic_core import PydanticCustomError
15
15
 
16
16
  from versionhq.agent.model import Agent
17
- from versionhq.task.model import Task, TaskOutput, Evaluation
18
- from versionhq._utils import Logger
17
+ from versionhq.task.model import Task, TaskOutput, Evaluation, ResponseField
18
+ from versionhq._utils import Logger, UsageMetrics, ErrorType
19
19
 
20
20
 
21
21
  class ReformTriggerEvent(enum.Enum):
@@ -129,7 +129,7 @@ class Node(BaseModel):
129
129
  else:
130
130
  self.status = TaskStatus.IN_PROGRESS
131
131
  agent = agent if agent else self.assigned_to
132
- self.task.pydantic_output = self.task.pydantic_output if self.task.pydantic_output else response_format if type(response_format) == BaseModel else None
132
+ self.task.response_schema = self.task.response_schema if self.task.response_schema else response_format if type(response_format) == BaseModel or isinstance(response_format, list) else None
133
133
  res = self.task.execute(agent=agent, context=context)
134
134
 
135
135
  if isinstance(res, Future): # activate async
@@ -393,11 +393,13 @@ class Graph(ABC, BaseModel):
393
393
 
394
394
 
395
395
  class TaskGraph(Graph):
396
+ _usage: Optional[UsageMetrics] = None
397
+
396
398
  id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
397
399
  should_reform: bool = False
398
400
  reform_trigger_event: Optional[ReformTriggerEvent] = None
399
401
  outputs: Dict[str, TaskOutput] = Field(default_factory=dict, description="stores node identifier and TaskOutput")
400
- concl_template: Optional[Dict[str, Any] | Type[BaseModel]] = Field(default=None, description="stores final response format in Pydantic class or JSON dict")
402
+ concl_response_schema: Optional[List[ResponseField] | Type[BaseModel]] = Field(default=None, description="stores final response schema in Pydantic class or response fields")
401
403
  concl: Optional[TaskOutput] = Field(default=None, description="stores the final or latest conclusion of the entire task graph")
402
404
 
403
405
 
@@ -418,6 +420,40 @@ class TaskGraph(Graph):
418
420
  Logger().log(level="error", message=f"Failed to save the graph {str(self.id)}: {str(e)}", color="red")
419
421
 
420
422
 
423
+ def _handle_usage(self) -> None:
424
+ """Returns total tokens and latency spended for the graph execution."""
425
+ if not self.nodes:
426
+ return None
427
+
428
+ self._usage = self._usage if self._usage else UsageMetrics(id=self.id)
429
+
430
+ for node in self.nodes.values():
431
+ if node.task and node.task._usage:
432
+ self._usage.aggregate(metrics=node.task._usage)
433
+
434
+
435
+ def _handle_human_input(self) -> str | None:
436
+ """Handles input from human."""
437
+ request = None
438
+
439
+ print('Proceed? Y/n:')
440
+ x = input()
441
+
442
+ if x.lower() == "y":
443
+ Logger().log(message="Ok, proceeding to the next graph execution.", level="info", color="blue")
444
+
445
+ else:
446
+ request = input("Request?")
447
+ if request:
448
+ Logger().log(message=f"Ok. regenerating the graph based on your input: ', {request}", level="info", color="blue")
449
+ else:
450
+ Logger().log(message="Cannot recognize your request.", level="error", color="red")
451
+ self._usage = self._usage if self._usage else UsageMetrics(id=self.id)
452
+ self._usage.record_errors(type=ErrorType.HUMAN_INTERACTION)
453
+
454
+ return request
455
+
456
+
421
457
  def add_task(self, task: Node | Task) -> Node:
422
458
  """Convert `task` to a Node object and add it to G"""
423
459
 
@@ -633,8 +669,9 @@ class TaskGraph(Graph):
633
669
  res, _ = self.handle_reform(target=target)
634
670
 
635
671
  self.concl = res
636
- self.concl_template = self.concl_template if self.concl_template else res.pydantic.__class__ if res.pydantic else None
672
+ self.concl_response_schema = self.concl_response_schema if self.concl_response_schema else res.pydantic.__class__ if res.pydantic else None
637
673
  # last_task_output = [v for v in self.outputs.values()][len([v for v in self.outputs.values()]) - 1] if [v for v in self.outputs.values()] else None
674
+ self._handle_usage()
638
675
  return res, self.outputs
639
676
 
640
677
 
@@ -657,27 +694,6 @@ class TaskGraph(Graph):
657
694
  return eval
658
695
 
659
696
 
660
- def _handle_human_input(self) -> str | None:
661
- """Handles input from human."""
662
- request = None
663
-
664
- print('Proceed? Y/n:')
665
- x = input()
666
-
667
- if x.lower() == "y":
668
- Logger().log(message="Ok, proceeding to the next graph execution.", level="info", color="blue")
669
-
670
- else:
671
- request = input("Request?")
672
-
673
- if request:
674
- Logger().log(message=f"Ok. regenerating the graph based on your input: ', {request}", level="info", color="blue")
675
- else:
676
- Logger().log(message="Cannot recognize your request.", level="error", color="red")
677
-
678
- return request
679
-
680
-
681
697
  def handle_reform(self, target: str = None) -> Self:
682
698
  task_description = "Improve the given output: "
683
699
  if target:
@@ -693,15 +709,3 @@ class TaskGraph(Graph):
693
709
  self.add_node(node=new_node)
694
710
  self.add_dependency(source=target, target=new_node.identifier)
695
711
  return self.activate(target=new_node.identifier)
696
-
697
-
698
- @property
699
- def usage(self) -> Tuple[int, float]:
700
- """Returns aggregate number of consumed tokens and job latency in ms during the activation"""
701
-
702
- tokens, latency = 0, 0
703
- for v in self.outputs.values():
704
- tokens += v._tokens
705
- latency += v.latency
706
-
707
- return tokens, latency
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: versionhq
3
- Version: 1.2.4.2
3
+ Version: 1.2.4.5
4
4
  Summary: Autonomous agent networks for task automation with multi-step reasoning.
5
5
  Author-email: Kuriko Iwai <kuriko@versi0n.io>
6
6
  License: MIT License
@@ -295,7 +295,7 @@ def dummy_func(message: str, test1: str, test2: list[str]) -> str:
295
295
 
296
296
  task = vhq.Task(
297
297
  description="Amazing task",
298
- pydantic_output=CustomOutput,
298
+ response_schema=CustomOutput,
299
299
  callback=dummy_func,
300
300
  callback_kwargs=dict(message="Hi! Here is the result: ")
301
301
  )
@@ -317,13 +317,13 @@ agent_b = vhq.Agent(role="Leader", llm="gemini-2.0")
317
317
 
318
318
  task_1 = vhq.Task(
319
319
  description="Analyze the client's business model.",
320
- response_fields=[vhq.ResponseField(title="test1", data_type=str, required=True),],
320
+ response_schema=[vhq.ResponseField(title="test1", data_type=str, required=True),],
321
321
  allow_delegation=True
322
322
  )
323
323
 
324
324
  task_2 = vhq.Task(
325
325
  description="Define a cohort.",
326
- response_fields=[vhq.ResponseField(title="test1", data_type=int, required=True),],
326
+ response_schema=[vhq.ResponseField(title="test1", data_type=int, required=True),],
327
327
  allow_delegation=False
328
328
  )
329
329
 
@@ -1,25 +1,25 @@
1
- versionhq/__init__.py,sha256=m36KgpiM94kUCZJm-JA7gPWjb7gNgMHHXUAbxzKlIOs,3026
2
- versionhq/_prompt/auto_feedback.py,sha256=iIa3ReiFqs-JA2Q4Y_VnLV-DbXPelEVSMHTw3tICVTE,3892
1
+ versionhq/__init__.py,sha256=8XLNCUY-3bjs8eI6mMUDBZLeY_Gj39QTut0tpzzyE3c,3026
2
+ versionhq/_prompt/auto_feedback.py,sha256=QDYd8mKlluDaOjHIfL9B8Tr5tg40tsDVyBXqCuUdTCU,3800
3
3
  versionhq/_prompt/constants.py,sha256=DOwUFnVVObEFqgnaMCDnW8fnw1oPMgS8JAqOiTuqleI,932
4
- versionhq/_prompt/model.py,sha256=GQPaC_Vj1wQ69ZHlzXWdtdif8UeF6WK1jN3JrFEcCt0,8662
5
- versionhq/_utils/__init__.py,sha256=llXOcGFlR9YF5iMI5uFb4twvM9wo-vmoMw8y1KzQVVc,233
4
+ versionhq/_prompt/model.py,sha256=kokH7axDDPEZKPBmrOKi5L0aZLtApdVOJDKsMjkJmvw,8020
5
+ versionhq/_utils/__init__.py,sha256=UggL2r-idlWDh0cIPFLyJ7AvO17NLzhjheW4IBFLBj4,300
6
6
  versionhq/_utils/i18n.py,sha256=TwA_PnYfDLA6VqlUDPuybdV9lgi3Frh_ASsb_X8jJo8,1483
7
7
  versionhq/_utils/is_valid_url.py,sha256=m8Mswvb-90FJtx1Heq6hPFDbwGgrv_R3wSbZQmEPM9Q,379
8
8
  versionhq/_utils/llm_as_a_judge.py,sha256=RM0oYfoeanuUyUL3Ewl6_8Xn1F5Axd285UMH46kxG1I,2378
9
9
  versionhq/_utils/logger.py,sha256=iHxGjm3BvUo5dHKLU88_pc0Z45wzSHOjyJGQkb7OADk,3255
10
10
  versionhq/_utils/process_config.py,sha256=YTGY_erW335RfceQfzS18YAqq-AAb-iSvKSjN7noD2E,782
11
- versionhq/_utils/usage_metrics.py,sha256=xgYGRW3OTuK9EJyi3QYJeYcJl7dL27olcWaLo_7B3JE,2246
11
+ versionhq/_utils/usage_metrics.py,sha256=zaoH6xjWX69UrQJmViBiX3sEUpnwSoHaapCPfWU2oM8,2632
12
12
  versionhq/_utils/vars.py,sha256=bZ5Dx_bFKlt3hi4-NNGXqdk7B23If_WaTIju2fiTyPQ,57
13
13
  versionhq/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
- versionhq/agent/inhouse_agents.py,sha256=BPkvEyMH8VnZWsMeCwsGplDT_kLwlIejeRcr-6ItGqQ,2637
15
- versionhq/agent/model.py,sha256=Zom5G0ubq4TWGWF6PK631-6puCjcdHYKj0R355GS9T8,24480
14
+ versionhq/agent/inhouse_agents.py,sha256=D2WAiXCYsnQK3_Fe7CbbtvXsHWOaN6vde6m_QoW7fH4,2629
15
+ versionhq/agent/model.py,sha256=auIrSWqt4NnANgA2YOCS2l9kxPCWkZG7YUD22NjrkAA,24810
16
16
  versionhq/agent/parser.py,sha256=riG0dkdQCxH7uJ0AbdVdg7WvL0BXhUgJht0VtQvxJBc,4082
17
17
  versionhq/agent/rpm_controller.py,sha256=grezIxyBci_lDlwAlgWFRyR5KOocXeOhYkgN02dNFNE,2360
18
18
  versionhq/agent/TEMPLATES/Backstory.py,sha256=dkfuATUQ2g2WoUKkmgAIch-RB--bektGoQaUlsDOn0g,529
19
19
  versionhq/agent/TEMPLATES/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
20
20
  versionhq/agent_network/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
21
- versionhq/agent_network/formation.py,sha256=WkAbmE2-Oqw8KWDcqqHU_s98QzGJWNQ_YCAls6ZuQbg,8260
22
- versionhq/agent_network/model.py,sha256=-vLBqPCtfLxTf17toJkE7Gkxg1SwlrA-Frf2Pc_uB50,16021
21
+ versionhq/agent_network/formation.py,sha256=jXCc9dS9gOgjNoPNoqKaqKPrHqmcFIvR2JwKYTgiQW0,7505
22
+ versionhq/agent_network/model.py,sha256=CYkIU1W1Ijh5DQD18vRcD1g8myZWmqAYNS0PlRnEX-o,15899
23
23
  versionhq/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
24
24
  versionhq/clients/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
25
25
  versionhq/clients/customer/__init__.py,sha256=-YXh1FQfvpfLacK8SUC7bD7Wx_eIEi4yrkCC_cUasFg,217
@@ -36,8 +36,8 @@ versionhq/knowledge/source.py,sha256=-hEUPtJUHHMx4rUKtiHl19J8xAMw-WVBw34zwa2jZ08
36
36
  versionhq/knowledge/source_docling.py,sha256=XpavmLvh4dLcuTikj8MCE9KG52oQMafy7_wBneliMK0,4994
37
37
  versionhq/knowledge/storage.py,sha256=Kd-4r6aWM5EDaoXrzKXbgi1hY6tysSQARPGXM95qMmU,8266
38
38
  versionhq/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
39
- versionhq/llm/llm_vars.py,sha256=qSG-_pYeWksdMmwASXpjQqf97fMovsY4lNTSCHQF88k,5694
40
- versionhq/llm/model.py,sha256=P4J6ZU0vY5HU7XHLelz7oznPmeEElHPFsAo-2Vd8DQ0,17255
39
+ versionhq/llm/llm_vars.py,sha256=fl9MOjPzBEIymp99BNLD39VmB7rQ9L4YDnfLNXbUZws,5896
40
+ versionhq/llm/model.py,sha256=m4OaFgGWKCZjmfN-OY3KoqG1K4T31UF7QVkYUcnpjdg,17273
41
41
  versionhq/memory/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
42
42
  versionhq/memory/contextual_memory.py,sha256=QEMVvHuEXxY7M6-12S8HhyFKf108KfX8Zzt7paPW048,3882
43
43
  versionhq/memory/model.py,sha256=VQR1229t7GQPMItlGAHLtJrb6LrZfSoRA1DRW4z0SOU,8234
@@ -46,18 +46,18 @@ versionhq/storage/base.py,sha256=p-Jas0fXQan_qotnRD6seQxrT2lj-uw9-SmHQhdppcs,355
46
46
  versionhq/storage/ltm_sqlite_storage.py,sha256=LeJE4ZPUWjyY1E5nNCHoKujTHFDR2BO_LAMvAOX-WHg,3957
47
47
  versionhq/storage/mem0_storage.py,sha256=ZY8MELBWaINRv9YuRW5MxH7dj2cII-L0i3xSD6o1-2M,3781
48
48
  versionhq/storage/rag_storage.py,sha256=bS2eE874obarYl-4hT6ZWYWTRsqtfuGpKgKzERmM6Uo,7433
49
- versionhq/storage/task_output_storage.py,sha256=M8vInLJ5idGAq17w1juHKXtyPyF-B-rK_P8UcqD-Px8,5357
49
+ versionhq/storage/task_output_storage.py,sha256=nkBNmBbrQeEgds3lAyKl4ugDWLtWRoCQUO6KiOmCIMU,5362
50
50
  versionhq/storage/utils.py,sha256=r5ghA_ktdR2IuzlzKqZYCjsNxztEMzyhWLneA4cFuWY,748
51
51
  versionhq/task/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
52
52
  versionhq/task/evaluation.py,sha256=qQSA5ZWTWA3he54ystsYpTKXJWv68gBL6DCq8ZW1bl8,3813
53
53
  versionhq/task/formatter.py,sha256=N8Kmk9vtrMtBdgJ8J7RmlKNMdZWSmV8O1bDexmCWgU0,643
54
- versionhq/task/model.py,sha256=YsuxbohcXLwE1qB8-lPnl_sFWQJ7CPqKDbYbf8CeZww,28879
54
+ versionhq/task/model.py,sha256=C5QttgJrfhx0UL82845ZBIhhc8oLiQkAWz2i6UkskMM,29023
55
55
  versionhq/task/structured_response.py,sha256=tqOHpch8CVmMj0aZXjdDWtPNcVmBW8DVZnBvPBwS4PM,5053
56
56
  versionhq/task/TEMPLATES/Description.py,sha256=hKhpbz0ztbkUMXz9KiL-P40fis9OB5ICOdL9jCtgAhU,864
57
57
  versionhq/task_graph/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
58
58
  versionhq/task_graph/colors.py,sha256=naJCx4Vho4iuJtbW8USUXb-M5uYvd5ds2p8qbjUfRus,669
59
- versionhq/task_graph/draft.py,sha256=0zDRx1-PXIwuB_RkdKQTHgqjm-5VrJHhljBIlYRX6EM,4919
60
- versionhq/task_graph/model.py,sha256=T7-Rj05q9gKIXPNwPdr1cOjnxNQdNicEFg2WIApO9Og,28877
59
+ versionhq/task_graph/draft.py,sha256=mVhCHDH-7N-SQRssE50KGIAgd9gdvdeWjt8ofm-SYI4,4943
60
+ versionhq/task_graph/model.py,sha256=l4Alvdtdl-fwYG7eMo655HF0zx1HkKRiPiST_Ra7hzg,29305
61
61
  versionhq/tool/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
62
62
  versionhq/tool/cache_handler.py,sha256=iL8FH7X0G-cdT0uhJwzuhLDaadTXOdfybZcDy151-es,1085
63
63
  versionhq/tool/composio_tool.py,sha256=IATfsEnF_1RPJyGtPBmAtEJh5XPcgDHpyG3SUR461Og,8572
@@ -66,8 +66,8 @@ versionhq/tool/decorator.py,sha256=C4ZM7Xi2gwtEMaSeRo-geo_g_MAkY77WkSLkAuY0AyI,1
66
66
  versionhq/tool/model.py,sha256=ve9C4WyiRjQigOU0hRWVxtSUWAQNntlmeW-_DL0_lJY,12328
67
67
  versionhq/tool/rag_tool.py,sha256=dW5o-83V4bMFFJEj3PUm7XjblwrYJGmZVBlCpPj6CeM,3852
68
68
  versionhq/tool/tool_handler.py,sha256=2m41K8qo5bGCCbwMFferEjT-XZ-mE9F0mDUOBkgivOI,1416
69
- versionhq-1.2.4.2.dist-info/LICENSE,sha256=cRoGGdM73IiDs6nDWKqPlgSv7aR4n-qBXYnJlCMHCeE,1082
70
- versionhq-1.2.4.2.dist-info/METADATA,sha256=qQ2-yJwPEuN62-SfvREXpqf3dDJc3xkwMrACWx5JYm8,21146
71
- versionhq-1.2.4.2.dist-info/WHEEL,sha256=52BFRY2Up02UkjOa29eZOS2VxUrpPORXg1pkohGGUS8,91
72
- versionhq-1.2.4.2.dist-info/top_level.txt,sha256=DClQwxDWqIUGeRJkA8vBlgeNsYZs4_nJWMonzFt5Wj0,10
73
- versionhq-1.2.4.2.dist-info/RECORD,,
69
+ versionhq-1.2.4.5.dist-info/LICENSE,sha256=cRoGGdM73IiDs6nDWKqPlgSv7aR4n-qBXYnJlCMHCeE,1082
70
+ versionhq-1.2.4.5.dist-info/METADATA,sha256=gwFpoEsZC3Jfo5k2bvpZBPix_pWZCJ0h_UDDgRfrsBw,21146
71
+ versionhq-1.2.4.5.dist-info/WHEEL,sha256=52BFRY2Up02UkjOa29eZOS2VxUrpPORXg1pkohGGUS8,91
72
+ versionhq-1.2.4.5.dist-info/top_level.txt,sha256=DClQwxDWqIUGeRJkA8vBlgeNsYZs4_nJWMonzFt5Wj0,10
73
+ versionhq-1.2.4.5.dist-info/RECORD,,