versionhq 1.2.4.2__py3-none-any.whl → 1.2.4.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
versionhq/__init__.py CHANGED
@@ -32,7 +32,7 @@ from versionhq.agent_network.formation import form_agent_network
32
32
  from versionhq.task_graph.draft import workflow
33
33
 
34
34
 
35
- __version__ = "1.2.4.2"
35
+ __version__ = "1.2.4.3"
36
36
  __all__ = [
37
37
  "Agent",
38
38
 
@@ -2,3 +2,4 @@ from versionhq._utils.logger import Logger
2
2
  from versionhq._utils.process_config import process_config
3
3
  from versionhq._utils.vars import KNOWLEDGE_DIRECTORY, MAX_FILE_NAME_LENGTH
4
4
  from versionhq._utils.is_valid_url import is_valid_url
5
+ from versionhq._utils.usage_metrics import UsageMetrics, ErrorType
@@ -1,55 +1,72 @@
1
- from pydantic import BaseModel, Field
1
+ import uuid
2
+ import enum
3
+ import datetime
4
+ from typing import Dict, List
5
+ from typing_extensions import Self
6
+
7
+ from pydantic import BaseModel, UUID4, InstanceOf
8
+
9
+
10
+ class ErrorType(enum.Enum):
11
+ FORMAT = 1
12
+ TOOL = 2
13
+ API = 3
14
+ OVERFITTING = 4
15
+ HUMAN_INTERACTION = 5
2
16
 
3
17
 
4
18
  class UsageMetrics(BaseModel):
5
- """
6
- Model to track usage
7
- """
8
-
9
- total_tokens: int = Field(default=0, description="total number of tokens used")
10
- prompt_tokens: int = Field(default=0, description="number of tokens used in prompts")
11
- cached_prompt_tokens: int = Field(default=0, description="number of cached prompt tokens used")
12
- completion_tokens: int = Field(default=0, description="number of tokens used in completions")
13
- successful_requests: int = Field(default=0, description="number of successful requests made")
14
-
15
- def add_usage_metrics(self, usage_metrics: "UsageMetrics") -> None:
16
- """
17
- Add the usage metrics from another UsageMetrics object.
18
- """
19
- self.total_tokens += usage_metrics.total_tokens
20
- self.prompt_tokens += usage_metrics.prompt_tokens
21
- self.cached_prompt_tokens += usage_metrics.cached_prompt_tokens
22
- self.completion_tokens += usage_metrics.completion_tokens
23
- self.successful_requests += usage_metrics.successful_requests
24
-
25
-
26
-
27
- # class TokenProcess:
28
- # total_tokens: int = 0
29
- # prompt_tokens: int = 0
30
- # cached_prompt_tokens: int = 0
31
- # completion_tokens: int = 0
32
- # successful_requests: int = 0
33
-
34
- # def sum_prompt_tokens(self, tokens: int) -> None:
35
- # self.prompt_tokens = self.prompt_tokens + tokens
36
- # self.total_tokens = self.total_tokens + tokens
37
-
38
- # def sum_completion_tokens(self, tokens: int) -> None:
39
- # self.completion_tokens = self.completion_tokens + tokens
40
- # self.total_tokens = self.total_tokens + tokens
41
-
42
- # def sum_cached_prompt_tokens(self, tokens: int) -> None:
43
- # self.cached_prompt_tokens = self.cached_prompt_tokens + tokens
44
-
45
- # def sum_successful_requests(self, requests: int) -> None:
46
- # self.successful_requests = self.successful_requests + requests
47
-
48
- # def get_summary(self) -> UsageMetrics:
49
- # return UsageMetrics(
50
- # total_tokens=self.total_tokens,
51
- # prompt_tokens=self.prompt_tokens,
52
- # cached_prompt_tokens=self.cached_prompt_tokens,
53
- # completion_tokens=self.completion_tokens,
54
- # successful_requests=self.successful_requests,
55
- # )
19
+ """A Pydantic model to manage token usage, errors, job latency."""
20
+
21
+ id: UUID4 = uuid.uuid4() # stores task id or task graph id
22
+ total_tokens: int = 0
23
+ prompt_tokens: int = 0
24
+ completion_tokens: int = 0
25
+ successful_requests: int = 0
26
+ total_errors: int = 0
27
+ error_breakdown: Dict[ErrorType, int] = dict()
28
+ latency: float = 0.0 # in ms
29
+
30
+ def record_token_usage(self, token_usages: List[Dict[str, int]]) -> None:
31
+ """Records usage metrics from the raw response of the model."""
32
+
33
+ if token_usages:
34
+ for item in token_usages:
35
+ self.total_tokens += int(item["total_tokens"]) if "total_tokens" in item else 0
36
+ self.completion_tokens += int(item["completion_tokens"]) if "completion_tokens" in item else 0
37
+ self.prompt_tokens += int(item["prompt_tokens"]) if "prompt_tokens" in item else 0
38
+
39
+
40
+ def record_errors(self, type: ErrorType = None) -> None:
41
+ self.total_errors += 1
42
+ if type:
43
+ if type in self.error_breakdown:
44
+ self.error_breakdown[type] += 1
45
+ else:
46
+ self.error_breakdown[type] = 1
47
+
48
+
49
+ def record_latency(self, start_dt: datetime.datetime, end_dt: datetime.datetime) -> None:
50
+ self.latency += round((end_dt - start_dt).total_seconds() * 1000, 3)
51
+
52
+
53
+ def aggregate(self, metrics: InstanceOf["UsageMetrics"]) -> Self:
54
+ if not metrics:
55
+ return self
56
+
57
+ self.total_tokens += metrics.total_tokens if metrics.total_tokens else 0
58
+ self.prompt_tokens += metrics.prompt_tokens if metrics.prompt_tokens else 0
59
+ self.completion_tokens += metrics.completion_tokens if metrics.completion_tokens else 0
60
+ self.successful_requests += metrics.successful_requests if metrics.successful_requests else 0
61
+ self.total_errors += metrics.total_errors if metrics.total_errors else 0
62
+ self.latency += metrics.latency if metrics.latency else 0.0
63
+ self.latency = round(self.latency, 3)
64
+
65
+ if metrics.error_breakdown:
66
+ for k, v in metrics.error_breakdown.items():
67
+ if self.error_breakdown and k in self.error_breakdown:
68
+ self.error_breakdown[k] += int(v)
69
+ else:
70
+ self.error_breakdown.update({ k: v })
71
+
72
+ return self
versionhq/agent/model.py CHANGED
@@ -11,7 +11,7 @@ from versionhq.agent.rpm_controller import RPMController
11
11
  from versionhq.tool.model import Tool, ToolSet, BaseTool
12
12
  from versionhq.knowledge.model import BaseKnowledgeSource, Knowledge
13
13
  from versionhq.memory.model import ShortTermMemory, LongTermMemory, UserMemory
14
- from versionhq._utils import Logger, process_config, is_valid_url
14
+ from versionhq._utils import Logger, process_config, is_valid_url, ErrorType
15
15
 
16
16
 
17
17
  load_dotenv(override=True)
@@ -373,16 +373,17 @@ class Agent(BaseModel):
373
373
 
374
374
  if tool_res_as_final:
375
375
  raw_response = self.func_calling_llm.call(messages=messages, tools=tools, tool_res_as_final=True)
376
- task._tokens = self.func_calling_llm._tokens
376
+ task._usage.record_token_usage(token_usages=self.func_calling_llm._usages)
377
377
  else:
378
378
  raw_response = self.llm.call(messages=messages, response_format=response_format, tools=tools)
379
- task._tokens = self.llm._tokens
379
+ task._usage.record_token_usage(token_usages=self.llm._usages)
380
380
 
381
381
  task_execution_counter += 1
382
382
  Logger(**self._logger_config, filename=self.key).log(level="info", message=f"Agent response: {raw_response}", color="green")
383
383
  return raw_response
384
384
 
385
385
  except Exception as e:
386
+ task._usage.record_errors(type=ErrorType.API)
386
387
  Logger(**self._logger_config, filename=self.key).log(level="error", message=f"An error occured. The agent will retry: {str(e)}", color="red")
387
388
 
388
389
  while not raw_response and task_execution_counter <= self.max_retry_limit:
@@ -526,6 +527,8 @@ class Agent(BaseModel):
526
527
  tool_res_as_final=task.tool_res_as_final,
527
528
  task=task
528
529
  )
530
+ if raw_response:
531
+ task._usage.successful_requests += 1
529
532
 
530
533
  except Exception as e:
531
534
  self._times_executed += 1
@@ -93,10 +93,11 @@ def form_agent_network(
93
93
 
94
94
  network_tasks = []
95
95
  members = []
96
- leader = str(res.pydantic.leader_agent) if res.pydantic and hasattr(res.pydantic, "leader_agent") else str(res.json_dict["leader_agent"]) if "leader_agent" in res.json_dict else None
97
-
98
- agent_roles = res.pydantic.agent_roles if res.pydantic else res.json_dict["agent_roles"]
99
- created_agents = [Agent(role=str(item), goal=str(item)) for item in agent_roles]
96
+ leader = res._fetch_value_of(key="leader_agent")
97
+ agent_roles = res._fetch_value_of(key="agent_roles")
98
+ created_agents = [Agent(role=str(item), goal=str(item)) for item in agent_roles] if agent_roles else []
99
+ task_descriptions = res._fetch_value_of(key="task_descriptions")
100
+ task_outcomes = res._fetch_value_of(key="task_outcomes")
100
101
 
101
102
  if agents:
102
103
  for i, agent in enumerate(created_agents):
@@ -108,9 +109,9 @@ def form_agent_network(
108
109
 
109
110
  created_tasks = []
110
111
 
111
- if res.pydantic:
112
- for i, item in enumerate(res.pydantic.task_outcomes):
113
- if len(res.pydantic.task_descriptions) > i and res.pydantic.task_descriptions[i]:
112
+ if task_outcomes:
113
+ for i, item in enumerate(task_outcomes):
114
+ if len(task_descriptions) > i and task_descriptions[i]:
114
115
  fields = {}
115
116
  for ob in item:
116
117
  try:
@@ -119,24 +120,9 @@ def form_agent_network(
119
120
  except:
120
121
  pass
121
122
  output = create_model("Output", **fields) if fields else None
122
- _task = Task(description=res.pydantic.task_descriptions[i], pydantic_output=output)
123
+ _task = Task(description=task_descriptions[i], pydantic_output=output)
123
124
  created_tasks.append(_task)
124
125
 
125
- elif res.json_dict:
126
- for i, item in enumerate(res["task_outcomes"]):
127
- if len(res["task_descriptions"]) > i and res["task_descriptions"][i]:
128
- fields = {}
129
- for ob in item:
130
- try:
131
- field_name = str(ob).lower().split(":")[0].replace(" ", "_")[0: 16]
132
- fields[field_name] = (str, Field(default=None))
133
- except:
134
- pass
135
- output = create_model("Output", **fields) if fields else None
136
- _task = Task(description=res["task_descriptions"][i], pydantic_output=output)
137
- created_tasks.append(_task)
138
-
139
-
140
126
  if len(created_tasks) <= len(created_agents):
141
127
  for i in range(len(created_tasks)):
142
128
  is_manager = False if not leader else bool(created_agents[i].role.lower() == leader.lower())
@@ -159,7 +145,6 @@ def form_agent_network(
159
145
 
160
146
  network_tasks.extend(created_tasks[len(created_agents):len(created_tasks)])
161
147
 
162
-
163
148
  if _formation == Formation.SUPERVISING and not [member for member in members if member.is_manager]:
164
149
  role = leader if leader else "Leader"
165
150
  manager = Member(agent=Agent(role=role), is_manager=True)
@@ -90,7 +90,6 @@ class AgentNetwork(BaseModel):
90
90
 
91
91
  cache: bool = Field(default=True)
92
92
  execution_logs: List[Dict[str, Any]] = Field(default_factory=list, description="list of execution logs of the tasks handled by members")
93
- # usage_metrics: Optional[UsageMetrics] = Field(default=None, description="usage metrics for all the llm executions")
94
93
 
95
94
 
96
95
  def __name__(self) -> str:
versionhq/llm/model.py CHANGED
@@ -69,7 +69,7 @@ class LLM(BaseModel):
69
69
 
70
70
  _logger: Logger = PrivateAttr(default_factory=lambda: Logger(verbose=True))
71
71
  _init_model_name: str = PrivateAttr(default=None)
72
- _tokens: int = PrivateAttr(default=0) # aggregate number of tokens consumed
72
+ _usages: list[Dict[str, int]] = PrivateAttr(default_factory=list)
73
73
 
74
74
  model: str = Field(default=None)
75
75
  provider: Optional[str] = Field(default=None, description="model provider")
@@ -181,8 +181,6 @@ class LLM(BaseModel):
181
181
  """
182
182
  litellm.drop_params = True
183
183
 
184
- self._tokens = 0
185
-
186
184
  if self.callbacks:
187
185
  self._set_callbacks(self.callbacks)
188
186
 
@@ -319,7 +317,7 @@ class LLM(BaseModel):
319
317
  if not tools:
320
318
  params = self._create_valid_params(config=config)
321
319
  res = litellm.completion(model=self.model, messages=messages, stream=False, **params, **cred)
322
- self._tokens += int(res["usage"]["total_tokens"])
320
+ self._usages.append(res["usage"])
323
321
  return res["choices"][0]["message"]["content"]
324
322
 
325
323
  else:
@@ -384,12 +382,11 @@ class LLM(BaseModel):
384
382
  else:
385
383
  pass
386
384
 
387
-
388
385
  if tool_res_as_final:
389
386
  return tool_res
390
387
  else:
391
388
  res = litellm.completion(model=self.model, messages=messages, **params, **cred)
392
- self._tokens += int(res["usage"]["total_tokens"])
389
+ self._usages.append(res["usage"])
393
390
  return res.choices[0].message.content
394
391
 
395
392
 
@@ -147,8 +147,8 @@ class TaskOutputStorageHandler:
147
147
  description=str(task.description),
148
148
  raw=str(task.output.raw),
149
149
  responsible_agents=str(task.processed_agents),
150
- tokens=task.output._tokens,
151
- latency=task.output.latency,
150
+ tokens=task._usage.total_tokens,
151
+ latency=task._usage.latency,
152
152
  score=task.output.aggregate_score if task.output.aggregate_score else "None",
153
153
  )
154
154
  self.storage.add(task=task, output=output_to_store, inputs=inputs)
versionhq/task/model.py CHANGED
@@ -6,7 +6,7 @@ import inspect
6
6
  import enum
7
7
  from concurrent.futures import Future
8
8
  from hashlib import md5
9
- from typing import Any, Dict, List, Set, Optional, Callable, Type, Tuple
9
+ from typing import Any, Dict, List, Set, Optional, Callable, Type
10
10
  from typing_extensions import Annotated, Self
11
11
 
12
12
  from pydantic import UUID4, BaseModel, Field, PrivateAttr, field_validator, model_validator, InstanceOf, field_validator
@@ -15,7 +15,7 @@ from pydantic_core import PydanticCustomError
15
15
  import versionhq as vhq
16
16
  from versionhq.task.evaluation import Evaluation, EvaluationItem
17
17
  from versionhq.tool.model import Tool, ToolSet
18
- from versionhq._utils import process_config, Logger
18
+ from versionhq._utils import process_config, Logger, UsageMetrics, ErrorType
19
19
 
20
20
 
21
21
  class TaskExecutionType(enum.Enum):
@@ -175,18 +175,31 @@ class TaskOutput(BaseModel):
175
175
  A class to store the final output of the given task in raw (string), json_dict, and pydantic class formats.
176
176
  """
177
177
 
178
- _tokens: int = PrivateAttr(default=0)
179
-
180
178
  task_id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True, description="store Task ID")
181
179
  raw: str = Field(default="", description="Raw output of the task")
182
180
  json_dict: Dict[str, Any] = Field(default=None, description="`raw` converted to dictionary")
183
181
  pydantic: Optional[Any] = Field(default=None)
184
182
  tool_output: Optional[Any] = Field(default=None, description="stores tool result when the task takes tool output as its final output")
185
183
  callback_output: Optional[Any] = Field(default=None, description="stores task or agent callback outcome")
186
- latency: float = Field(default=None, description="job latency in ms")
187
184
  evaluation: Optional[InstanceOf[Evaluation]] = Field(default=None, description="stores overall evaluation of the task output. stored in ltm")
188
185
 
189
186
 
187
+ def _fetch_value_of(self, key: str = None) -> Any:
188
+ """Returns a value to the given key."""
189
+
190
+ if not key:
191
+ return None
192
+
193
+ if self.pydantic and hasattr(self.pydantic, key):
194
+ return getattr(self.pydantic, key)
195
+
196
+ elif self.json_dict and key in self.json_dict:
197
+ return self.json_dict[key]
198
+
199
+ else:
200
+ return None
201
+
202
+
190
203
  def _to_context_prompt(self) -> str:
191
204
  """Formats prompt context in text formats from the final response."""
192
205
 
@@ -225,7 +238,6 @@ class TaskOutput(BaseModel):
225
238
 
226
239
  task_eval = Task(description=description, pydantic_output=EvaluationItem)
227
240
  res = task_eval.execute(agent=self.evaluation.eval_by)
228
- self._tokens += task_eval._tokens
229
241
 
230
242
  if res.pydantic:
231
243
  item = EvaluationItem(
@@ -328,9 +340,7 @@ class Task(BaseModel):
328
340
  fsls: Optional[list[str]] = Field(default=None, description="stores ideal/weak responses")
329
341
 
330
342
  # recording
331
- _tokens: int = 0
332
- _tool_errors: int = 0
333
- _format_errors: int = 0
343
+ _usage: UsageMetrics = PrivateAttr(default=None)
334
344
  _delegations: int = 0
335
345
  processed_agents: Set[str] = Field(default_factory=set, description="store keys of the agents that executed the task")
336
346
  output: Optional[TaskOutput] = Field(default=None, description="store the final TaskOutput object")
@@ -355,6 +365,8 @@ class Task(BaseModel):
355
365
  for field in required_fields:
356
366
  if getattr(self, field) is None:
357
367
  raise ValueError( f"{field} must be provided either directly or through config")
368
+
369
+ self._usage = UsageMetrics(id=self.id)
358
370
  return self
359
371
 
360
372
 
@@ -433,14 +445,15 @@ class Task(BaseModel):
433
445
  output = json.loads(j)
434
446
 
435
447
  if isinstance(output, dict):
436
- return output
448
+ return output["json_schema"] if "json_schema" in output else output
437
449
  else:
438
450
  try:
439
451
  output = ast.literal_eval(j)
440
452
  except:
441
453
  output = ast.literal_eval(r)
442
454
 
443
- return output if isinstance(output, dict) else { "output": str(r) }
455
+
456
+ return output["json_schema"] if isinstance(output, dict) and "json_schema" in output else output if isinstance(output, dict) else { "output": str(r) }
444
457
 
445
458
 
446
459
  def _create_json_output(self, raw: str) -> Dict[str, Any]:
@@ -456,12 +469,13 @@ class Task(BaseModel):
456
469
  try:
457
470
  output = json.loads(raw)
458
471
  if isinstance(output, dict):
459
- return output
472
+ return output["json_schema"] if "json_schema" in output else output
460
473
  else:
461
474
  output = self._sanitize_raw_output(raw=raw)
462
475
  return output
463
476
  except:
464
477
  output = self._sanitize_raw_output(raw=raw)
478
+ self._usage.record_errors(type=ErrorType.FORMAT)
465
479
  return output
466
480
 
467
481
 
@@ -592,13 +606,6 @@ class Task(BaseModel):
592
606
  res = self._test_time_computation(agent=agent, context=context)
593
607
  return res
594
608
 
595
- # if self._pfg:
596
- # res, all_outputs = self.pfg.activate()
597
- # tokens, latency = self.pfg.usage
598
- # self._tokens = tokens
599
- # res.latency = latency
600
- # return res
601
-
602
609
  match type:
603
610
  case TaskExecutionType.SYNC:
604
611
  res = self._execute_sync(agent=agent, context=context)
@@ -629,11 +636,11 @@ class Task(BaseModel):
629
636
  def _execute_core(self, agent, context: Optional[Any]) -> TaskOutput:
630
637
  """A core method to execute a single task."""
631
638
 
639
+ start_dt = datetime.datetime.now()
632
640
  task_output: InstanceOf[TaskOutput] = None
633
641
  raw_output: str = None
634
642
  tool_output: str | list = None
635
643
  task_tools: List[List[InstanceOf[Tool]| InstanceOf[ToolSet] | Type[Tool]]] = []
636
- started_at, ended_at = datetime.datetime.now(), datetime.datetime.now()
637
644
  user_prompt, dev_prompt = None, None
638
645
 
639
646
  if self.tools:
@@ -647,17 +654,14 @@ class Task(BaseModel):
647
654
  self._delegations += 1
648
655
 
649
656
  if self.tool_res_as_final == True:
650
- started_at = datetime.datetime.now()
651
657
  user_prompt, dev_prompt, tool_output = agent.execute_task(task=self, context=context, task_tools=task_tools)
652
658
  raw_output = str(tool_output) if tool_output else ""
653
- ended_at = datetime.datetime.now()
659
+ if not raw_output:
660
+ self._usage.record_errors(type=ErrorType.TOOL)
654
661
  task_output = TaskOutput(task_id=self.id, tool_output=tool_output, raw=raw_output)
655
662
 
656
663
  else:
657
- started_at = datetime.datetime.now()
658
664
  user_prompt, dev_prompt, raw_output = agent.execute_task(task=self, context=context, task_tools=task_tools)
659
- ended_at = datetime.datetime.now()
660
-
661
665
  json_dict_output = self._create_json_output(raw=raw_output)
662
666
  if "outcome" in json_dict_output:
663
667
  json_dict_output = self._create_json_output(raw=str(json_dict_output["outcome"]))
@@ -671,8 +675,6 @@ class Task(BaseModel):
671
675
  json_dict=json_dict_output,
672
676
  )
673
677
 
674
- task_output.latency = round((ended_at - started_at).total_seconds() * 1000, 3)
675
- task_output._tokens = self._tokens
676
678
  self.output = task_output
677
679
  self.processed_agents.add(agent.key)
678
680
 
@@ -706,6 +708,8 @@ class Task(BaseModel):
706
708
  self.output = task_output
707
709
  self._store_logs()
708
710
 
711
+ end_dt = datetime.datetime.now()
712
+ self._usage.record_latency(start_dt=start_dt, end_dt=end_dt)
709
713
  return task_output
710
714
 
711
715
 
@@ -716,22 +720,22 @@ class Task(BaseModel):
716
720
  from versionhq._prompt.model import Prompt
717
721
  from versionhq._prompt.auto_feedback import PromptFeedbackGraph
718
722
 
723
+ # self._usage = None
719
724
  prompt = Prompt(task=self, agent=agent, context=context)
720
725
  pfg = PromptFeedbackGraph(prompt=prompt, should_reform=self.human, reform_trigger_event=ReformTriggerEvent.USER_INPUT if self.human else None)
721
726
  pfg = pfg.set_up_graph()
722
727
  self._pfg = pfg
723
728
 
724
- # try:
725
- if self._pfg and self.output is None:
726
- res, _ = self._pfg.activate()
727
- tokens, latency = self._pfg.usage
728
- self._tokens = tokens
729
- res.latency = latency
730
- return res
729
+ try:
730
+ if self._pfg and self.output is None:
731
+ res, all_outputs = self._pfg.activate()
732
+ if all_outputs: self._usage = self._pfg._usage
733
+ return res
731
734
 
732
- # except:
733
- # Logger().log(level="error", message="Failed to execute the task.", color="red")
734
- # return None, None
735
+ except:
736
+ self._usage.record_errors(type=ErrorType.API)
737
+ Logger().log(level="error", message="Failed to execute the task.", color="red")
738
+ return None
735
739
 
736
740
 
737
741
  @property
@@ -96,6 +96,6 @@ def workflow(final_output: Type[BaseModel], context: Any = None, human: bool = F
96
96
  task_graph.add_dependency(
97
97
  source=source.identifier, target=target.identifier, dependency_type=dependency_type)
98
98
 
99
- task_graph.visualize()
99
+ # task_graph.visualize()
100
100
 
101
101
  return task_graph
@@ -15,7 +15,7 @@ from pydantic_core import PydanticCustomError
15
15
 
16
16
  from versionhq.agent.model import Agent
17
17
  from versionhq.task.model import Task, TaskOutput, Evaluation
18
- from versionhq._utils import Logger
18
+ from versionhq._utils import Logger, UsageMetrics, ErrorType
19
19
 
20
20
 
21
21
  class ReformTriggerEvent(enum.Enum):
@@ -393,6 +393,8 @@ class Graph(ABC, BaseModel):
393
393
 
394
394
 
395
395
  class TaskGraph(Graph):
396
+ _usage: Optional[UsageMetrics] = None
397
+
396
398
  id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
397
399
  should_reform: bool = False
398
400
  reform_trigger_event: Optional[ReformTriggerEvent] = None
@@ -418,6 +420,40 @@ class TaskGraph(Graph):
418
420
  Logger().log(level="error", message=f"Failed to save the graph {str(self.id)}: {str(e)}", color="red")
419
421
 
420
422
 
423
+ def _handle_usage(self) -> None:
424
+ """Returns total tokens and latency spended for the graph execution."""
425
+ if not self.nodes:
426
+ return None
427
+
428
+ self._usage = self._usage if self._usage else UsageMetrics(id=self.id)
429
+
430
+ for node in self.nodes.values():
431
+ if node.task and node.task._usage:
432
+ self._usage.aggregate(metrics=node.task._usage)
433
+
434
+
435
+ def _handle_human_input(self) -> str | None:
436
+ """Handles input from human."""
437
+ request = None
438
+
439
+ print('Proceed? Y/n:')
440
+ x = input()
441
+
442
+ if x.lower() == "y":
443
+ Logger().log(message="Ok, proceeding to the next graph execution.", level="info", color="blue")
444
+
445
+ else:
446
+ request = input("Request?")
447
+ if request:
448
+ Logger().log(message=f"Ok. regenerating the graph based on your input: ', {request}", level="info", color="blue")
449
+ else:
450
+ Logger().log(message="Cannot recognize your request.", level="error", color="red")
451
+ self._usage = self._usage if self._usage else UsageMetrics(id=self.id)
452
+ self._usage.record_errors(type=ErrorType.HUMAN_INTERACTION)
453
+
454
+ return request
455
+
456
+
421
457
  def add_task(self, task: Node | Task) -> Node:
422
458
  """Convert `task` to a Node object and add it to G"""
423
459
 
@@ -635,6 +671,7 @@ class TaskGraph(Graph):
635
671
  self.concl = res
636
672
  self.concl_template = self.concl_template if self.concl_template else res.pydantic.__class__ if res.pydantic else None
637
673
  # last_task_output = [v for v in self.outputs.values()][len([v for v in self.outputs.values()]) - 1] if [v for v in self.outputs.values()] else None
674
+ self._handle_usage()
638
675
  return res, self.outputs
639
676
 
640
677
 
@@ -657,27 +694,6 @@ class TaskGraph(Graph):
657
694
  return eval
658
695
 
659
696
 
660
- def _handle_human_input(self) -> str | None:
661
- """Handles input from human."""
662
- request = None
663
-
664
- print('Proceed? Y/n:')
665
- x = input()
666
-
667
- if x.lower() == "y":
668
- Logger().log(message="Ok, proceeding to the next graph execution.", level="info", color="blue")
669
-
670
- else:
671
- request = input("Request?")
672
-
673
- if request:
674
- Logger().log(message=f"Ok. regenerating the graph based on your input: ', {request}", level="info", color="blue")
675
- else:
676
- Logger().log(message="Cannot recognize your request.", level="error", color="red")
677
-
678
- return request
679
-
680
-
681
697
  def handle_reform(self, target: str = None) -> Self:
682
698
  task_description = "Improve the given output: "
683
699
  if target:
@@ -693,15 +709,3 @@ class TaskGraph(Graph):
693
709
  self.add_node(node=new_node)
694
710
  self.add_dependency(source=target, target=new_node.identifier)
695
711
  return self.activate(target=new_node.identifier)
696
-
697
-
698
- @property
699
- def usage(self) -> Tuple[int, float]:
700
- """Returns aggregate number of consumed tokens and job latency in ms during the activation"""
701
-
702
- tokens, latency = 0, 0
703
- for v in self.outputs.values():
704
- tokens += v._tokens
705
- latency += v.latency
706
-
707
- return tokens, latency
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: versionhq
3
- Version: 1.2.4.2
3
+ Version: 1.2.4.3
4
4
  Summary: Autonomous agent networks for task automation with multi-step reasoning.
5
5
  Author-email: Kuriko Iwai <kuriko@versi0n.io>
6
6
  License: MIT License
@@ -1,25 +1,25 @@
1
- versionhq/__init__.py,sha256=m36KgpiM94kUCZJm-JA7gPWjb7gNgMHHXUAbxzKlIOs,3026
1
+ versionhq/__init__.py,sha256=F0IUNu8UG7KfohEotN5MykEfCklxJjXe4c5kcaVIwlE,3026
2
2
  versionhq/_prompt/auto_feedback.py,sha256=iIa3ReiFqs-JA2Q4Y_VnLV-DbXPelEVSMHTw3tICVTE,3892
3
3
  versionhq/_prompt/constants.py,sha256=DOwUFnVVObEFqgnaMCDnW8fnw1oPMgS8JAqOiTuqleI,932
4
4
  versionhq/_prompt/model.py,sha256=GQPaC_Vj1wQ69ZHlzXWdtdif8UeF6WK1jN3JrFEcCt0,8662
5
- versionhq/_utils/__init__.py,sha256=llXOcGFlR9YF5iMI5uFb4twvM9wo-vmoMw8y1KzQVVc,233
5
+ versionhq/_utils/__init__.py,sha256=UggL2r-idlWDh0cIPFLyJ7AvO17NLzhjheW4IBFLBj4,300
6
6
  versionhq/_utils/i18n.py,sha256=TwA_PnYfDLA6VqlUDPuybdV9lgi3Frh_ASsb_X8jJo8,1483
7
7
  versionhq/_utils/is_valid_url.py,sha256=m8Mswvb-90FJtx1Heq6hPFDbwGgrv_R3wSbZQmEPM9Q,379
8
8
  versionhq/_utils/llm_as_a_judge.py,sha256=RM0oYfoeanuUyUL3Ewl6_8Xn1F5Axd285UMH46kxG1I,2378
9
9
  versionhq/_utils/logger.py,sha256=iHxGjm3BvUo5dHKLU88_pc0Z45wzSHOjyJGQkb7OADk,3255
10
10
  versionhq/_utils/process_config.py,sha256=YTGY_erW335RfceQfzS18YAqq-AAb-iSvKSjN7noD2E,782
11
- versionhq/_utils/usage_metrics.py,sha256=xgYGRW3OTuK9EJyi3QYJeYcJl7dL27olcWaLo_7B3JE,2246
11
+ versionhq/_utils/usage_metrics.py,sha256=zaoH6xjWX69UrQJmViBiX3sEUpnwSoHaapCPfWU2oM8,2632
12
12
  versionhq/_utils/vars.py,sha256=bZ5Dx_bFKlt3hi4-NNGXqdk7B23If_WaTIju2fiTyPQ,57
13
13
  versionhq/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
14
  versionhq/agent/inhouse_agents.py,sha256=BPkvEyMH8VnZWsMeCwsGplDT_kLwlIejeRcr-6ItGqQ,2637
15
- versionhq/agent/model.py,sha256=Zom5G0ubq4TWGWF6PK631-6puCjcdHYKj0R355GS9T8,24480
15
+ versionhq/agent/model.py,sha256=duB4qQDh3RIQZ6YyZovvBKUBdzMtgCFiel0qC0BGbzQ,24691
16
16
  versionhq/agent/parser.py,sha256=riG0dkdQCxH7uJ0AbdVdg7WvL0BXhUgJht0VtQvxJBc,4082
17
17
  versionhq/agent/rpm_controller.py,sha256=grezIxyBci_lDlwAlgWFRyR5KOocXeOhYkgN02dNFNE,2360
18
18
  versionhq/agent/TEMPLATES/Backstory.py,sha256=dkfuATUQ2g2WoUKkmgAIch-RB--bektGoQaUlsDOn0g,529
19
19
  versionhq/agent/TEMPLATES/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
20
20
  versionhq/agent_network/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
21
- versionhq/agent_network/formation.py,sha256=WkAbmE2-Oqw8KWDcqqHU_s98QzGJWNQ_YCAls6ZuQbg,8260
22
- versionhq/agent_network/model.py,sha256=-vLBqPCtfLxTf17toJkE7Gkxg1SwlrA-Frf2Pc_uB50,16021
21
+ versionhq/agent_network/formation.py,sha256=XiGyjqsquTcKqyq6r2JR506wEH0uuD8HyBvWNdvz1Fg,7505
22
+ versionhq/agent_network/model.py,sha256=65y4HTltfvgOuHnSUwDTydtt1qkq1xVk-AxF1ws1J9M,15899
23
23
  versionhq/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
24
24
  versionhq/clients/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
25
25
  versionhq/clients/customer/__init__.py,sha256=-YXh1FQfvpfLacK8SUC7bD7Wx_eIEi4yrkCC_cUasFg,217
@@ -37,7 +37,7 @@ versionhq/knowledge/source_docling.py,sha256=XpavmLvh4dLcuTikj8MCE9KG52oQMafy7_w
37
37
  versionhq/knowledge/storage.py,sha256=Kd-4r6aWM5EDaoXrzKXbgi1hY6tysSQARPGXM95qMmU,8266
38
38
  versionhq/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
39
39
  versionhq/llm/llm_vars.py,sha256=qSG-_pYeWksdMmwASXpjQqf97fMovsY4lNTSCHQF88k,5694
40
- versionhq/llm/model.py,sha256=P4J6ZU0vY5HU7XHLelz7oznPmeEElHPFsAo-2Vd8DQ0,17255
40
+ versionhq/llm/model.py,sha256=Hc_fYYmhM_HuYs4dBtZlHwGqVtqnEC_rvGXD1o6RB4A,17186
41
41
  versionhq/memory/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
42
42
  versionhq/memory/contextual_memory.py,sha256=QEMVvHuEXxY7M6-12S8HhyFKf108KfX8Zzt7paPW048,3882
43
43
  versionhq/memory/model.py,sha256=VQR1229t7GQPMItlGAHLtJrb6LrZfSoRA1DRW4z0SOU,8234
@@ -46,18 +46,18 @@ versionhq/storage/base.py,sha256=p-Jas0fXQan_qotnRD6seQxrT2lj-uw9-SmHQhdppcs,355
46
46
  versionhq/storage/ltm_sqlite_storage.py,sha256=LeJE4ZPUWjyY1E5nNCHoKujTHFDR2BO_LAMvAOX-WHg,3957
47
47
  versionhq/storage/mem0_storage.py,sha256=ZY8MELBWaINRv9YuRW5MxH7dj2cII-L0i3xSD6o1-2M,3781
48
48
  versionhq/storage/rag_storage.py,sha256=bS2eE874obarYl-4hT6ZWYWTRsqtfuGpKgKzERmM6Uo,7433
49
- versionhq/storage/task_output_storage.py,sha256=M8vInLJ5idGAq17w1juHKXtyPyF-B-rK_P8UcqD-Px8,5357
49
+ versionhq/storage/task_output_storage.py,sha256=nkBNmBbrQeEgds3lAyKl4ugDWLtWRoCQUO6KiOmCIMU,5362
50
50
  versionhq/storage/utils.py,sha256=r5ghA_ktdR2IuzlzKqZYCjsNxztEMzyhWLneA4cFuWY,748
51
51
  versionhq/task/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
52
52
  versionhq/task/evaluation.py,sha256=qQSA5ZWTWA3he54ystsYpTKXJWv68gBL6DCq8ZW1bl8,3813
53
53
  versionhq/task/formatter.py,sha256=N8Kmk9vtrMtBdgJ8J7RmlKNMdZWSmV8O1bDexmCWgU0,643
54
- versionhq/task/model.py,sha256=YsuxbohcXLwE1qB8-lPnl_sFWQJ7CPqKDbYbf8CeZww,28879
54
+ versionhq/task/model.py,sha256=ODcyS_lTT0NoVRA3MSuczbOVp03gpwmb5i48nVOe8jo,29069
55
55
  versionhq/task/structured_response.py,sha256=tqOHpch8CVmMj0aZXjdDWtPNcVmBW8DVZnBvPBwS4PM,5053
56
56
  versionhq/task/TEMPLATES/Description.py,sha256=hKhpbz0ztbkUMXz9KiL-P40fis9OB5ICOdL9jCtgAhU,864
57
57
  versionhq/task_graph/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
58
58
  versionhq/task_graph/colors.py,sha256=naJCx4Vho4iuJtbW8USUXb-M5uYvd5ds2p8qbjUfRus,669
59
- versionhq/task_graph/draft.py,sha256=0zDRx1-PXIwuB_RkdKQTHgqjm-5VrJHhljBIlYRX6EM,4919
60
- versionhq/task_graph/model.py,sha256=T7-Rj05q9gKIXPNwPdr1cOjnxNQdNicEFg2WIApO9Og,28877
59
+ versionhq/task_graph/draft.py,sha256=l8nRV39Rh5lLLQ_hHgbHyVLpoBqAzWk9QIXNaGiEbeE,4921
60
+ versionhq/task_graph/model.py,sha256=bUptIXKe8fR9hbZRo81ns_nUMWYklWgTBgtW_4PS4bU,29214
61
61
  versionhq/tool/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
62
62
  versionhq/tool/cache_handler.py,sha256=iL8FH7X0G-cdT0uhJwzuhLDaadTXOdfybZcDy151-es,1085
63
63
  versionhq/tool/composio_tool.py,sha256=IATfsEnF_1RPJyGtPBmAtEJh5XPcgDHpyG3SUR461Og,8572
@@ -66,8 +66,8 @@ versionhq/tool/decorator.py,sha256=C4ZM7Xi2gwtEMaSeRo-geo_g_MAkY77WkSLkAuY0AyI,1
66
66
  versionhq/tool/model.py,sha256=ve9C4WyiRjQigOU0hRWVxtSUWAQNntlmeW-_DL0_lJY,12328
67
67
  versionhq/tool/rag_tool.py,sha256=dW5o-83V4bMFFJEj3PUm7XjblwrYJGmZVBlCpPj6CeM,3852
68
68
  versionhq/tool/tool_handler.py,sha256=2m41K8qo5bGCCbwMFferEjT-XZ-mE9F0mDUOBkgivOI,1416
69
- versionhq-1.2.4.2.dist-info/LICENSE,sha256=cRoGGdM73IiDs6nDWKqPlgSv7aR4n-qBXYnJlCMHCeE,1082
70
- versionhq-1.2.4.2.dist-info/METADATA,sha256=qQ2-yJwPEuN62-SfvREXpqf3dDJc3xkwMrACWx5JYm8,21146
71
- versionhq-1.2.4.2.dist-info/WHEEL,sha256=52BFRY2Up02UkjOa29eZOS2VxUrpPORXg1pkohGGUS8,91
72
- versionhq-1.2.4.2.dist-info/top_level.txt,sha256=DClQwxDWqIUGeRJkA8vBlgeNsYZs4_nJWMonzFt5Wj0,10
73
- versionhq-1.2.4.2.dist-info/RECORD,,
69
+ versionhq-1.2.4.3.dist-info/LICENSE,sha256=cRoGGdM73IiDs6nDWKqPlgSv7aR4n-qBXYnJlCMHCeE,1082
70
+ versionhq-1.2.4.3.dist-info/METADATA,sha256=4q8mpXgq7TcaXjvX26JIE0FsQ78hgJuuEPBhefJiknI,21146
71
+ versionhq-1.2.4.3.dist-info/WHEEL,sha256=52BFRY2Up02UkjOa29eZOS2VxUrpPORXg1pkohGGUS8,91
72
+ versionhq-1.2.4.3.dist-info/top_level.txt,sha256=DClQwxDWqIUGeRJkA8vBlgeNsYZs4_nJWMonzFt5Wj0,10
73
+ versionhq-1.2.4.3.dist-info/RECORD,,