versionhq 1.2.1.22__py3-none-any.whl → 1.2.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
versionhq/__init__.py CHANGED
@@ -8,7 +8,7 @@ from dotenv import load_dotenv
8
8
  load_dotenv(override=True)
9
9
 
10
10
  from versionhq.agent.model import Agent
11
- from versionhq.agent_network.model import AgentNetwork, NetworkOutput, Formation, Member, TaskHandlingProcess
11
+ from versionhq.agent_network.model import AgentNetwork, Formation, Member, TaskHandlingProcess
12
12
  from versionhq.llm.model import LLM
13
13
  from versionhq.llm.llm_vars import LLM_CONTEXT_WINDOW_SIZES, PARAMS, PROVIDERS, MODELS
14
14
  from versionhq.clients.customer.model import Customer
@@ -19,7 +19,7 @@ from versionhq.knowledge.source import PDFKnowledgeSource, CSVKnowledgeSource, J
19
19
  from versionhq.knowledge.source_docling import DoclingSource
20
20
  from versionhq.task_graph.model import TaskStatus, TaskGraph, Node, Edge, DependencyType
21
21
  from versionhq.task.model import Task, TaskOutput, ResponseField, TaskExecutionType
22
- from versionhq.task.evaluate import Evaluation, EvaluationItem
22
+ from versionhq.task.evaluation import Evaluation, EvaluationItem
23
23
  from versionhq.tool.model import Tool, ToolSet
24
24
  from versionhq.tool.cache_handler import CacheHandler
25
25
  from versionhq.tool.tool_handler import ToolHandler
@@ -31,12 +31,11 @@ from versionhq.agent_network.formation import form_agent_network
31
31
  from versionhq.task_graph.draft import workflow
32
32
 
33
33
 
34
- __version__ = "1.2.1.22"
34
+ __version__ = "1.2.2.0"
35
35
  __all__ = [
36
36
  "Agent",
37
37
 
38
38
  "AgentNetwork",
39
- "NetworkOutput",
40
39
  "Formation",
41
40
  "Member",
42
41
  "TaskHandlingProcess",
@@ -1,3 +1,3 @@
1
1
  from versionhq._utils.logger import Logger
2
2
  from versionhq._utils.process_config import process_config
3
- from versionhq._utils.usage_metrics import UsageMetrics
3
+ from versionhq._utils.vars import KNOWLEDGE_DIRECTORY, MAX_FILE_NAME_LENGTH
@@ -21,3 +21,35 @@ class UsageMetrics(BaseModel):
21
21
  self.cached_prompt_tokens += usage_metrics.cached_prompt_tokens
22
22
  self.completion_tokens += usage_metrics.completion_tokens
23
23
  self.successful_requests += usage_metrics.successful_requests
24
+
25
+
26
+
27
+ # class TokenProcess:
28
+ # total_tokens: int = 0
29
+ # prompt_tokens: int = 0
30
+ # cached_prompt_tokens: int = 0
31
+ # completion_tokens: int = 0
32
+ # successful_requests: int = 0
33
+
34
+ # def sum_prompt_tokens(self, tokens: int) -> None:
35
+ # self.prompt_tokens = self.prompt_tokens + tokens
36
+ # self.total_tokens = self.total_tokens + tokens
37
+
38
+ # def sum_completion_tokens(self, tokens: int) -> None:
39
+ # self.completion_tokens = self.completion_tokens + tokens
40
+ # self.total_tokens = self.total_tokens + tokens
41
+
42
+ # def sum_cached_prompt_tokens(self, tokens: int) -> None:
43
+ # self.cached_prompt_tokens = self.cached_prompt_tokens + tokens
44
+
45
+ # def sum_successful_requests(self, requests: int) -> None:
46
+ # self.successful_requests = self.successful_requests + requests
47
+
48
+ # def get_summary(self) -> UsageMetrics:
49
+ # return UsageMetrics(
50
+ # total_tokens=self.total_tokens,
51
+ # prompt_tokens=self.prompt_tokens,
52
+ # cached_prompt_tokens=self.cached_prompt_tokens,
53
+ # completion_tokens=self.completion_tokens,
54
+ # successful_requests=self.successful_requests,
55
+ # )
@@ -10,13 +10,15 @@ vhq_client_manager = Agent(
10
10
  role="vhq-Client Manager",
11
11
  goal="Efficiently communicate with the client on the task progress",
12
12
  llm=DEFAULT_MODEL_NAME,
13
+ maxit=1,
14
+ max_retry_limit=1,
13
15
  with_memory=True,
14
16
  )
15
17
 
16
18
 
17
19
  vhq_task_evaluator = Agent(
18
20
  role="vhq-Task Evaluator",
19
- goal="score the output according to the given evaluation criteria.",
21
+ goal="score the output according to the given evaluation criteria, taking a step by step approach.",
20
22
  llm=DEFAULT_MODEL_NAME,
21
23
  llm_config=dict(top_p=0.8, top_k=30, max_tokens=5000, temperature=0.9),
22
24
  maxit=1,
@@ -45,4 +47,6 @@ vhq_agent_creator = Agent(
45
47
  role="vhq-Agent Creator",
46
48
  goal="build an agent that can handle the given task",
47
49
  llm="gemini/gemini-2.0-flash-exp",
50
+ maxit=1,
51
+ max_retry_limit=1,
48
52
  )
versionhq/agent/model.py CHANGED
@@ -14,7 +14,6 @@ from versionhq.memory.contextual_memory import ContextualMemory
14
14
  from versionhq.memory.model import ShortTermMemory, LongTermMemory, UserMemory
15
15
  from versionhq._utils.logger import Logger
16
16
  from versionhq.agent.rpm_controller import RPMController
17
- from versionhq._utils.usage_metrics import UsageMetrics
18
17
  from versionhq._utils.process_config import process_config
19
18
 
20
19
 
@@ -22,37 +21,6 @@ load_dotenv(override=True)
22
21
  T = TypeVar("T", bound="Agent")
23
22
 
24
23
 
25
- class TokenProcess:
26
- total_tokens: int = 0
27
- prompt_tokens: int = 0
28
- cached_prompt_tokens: int = 0
29
- completion_tokens: int = 0
30
- successful_requests: int = 0
31
-
32
- def sum_prompt_tokens(self, tokens: int) -> None:
33
- self.prompt_tokens = self.prompt_tokens + tokens
34
- self.total_tokens = self.total_tokens + tokens
35
-
36
- def sum_completion_tokens(self, tokens: int) -> None:
37
- self.completion_tokens = self.completion_tokens + tokens
38
- self.total_tokens = self.total_tokens + tokens
39
-
40
- def sum_cached_prompt_tokens(self, tokens: int) -> None:
41
- self.cached_prompt_tokens = self.cached_prompt_tokens + tokens
42
-
43
- def sum_successful_requests(self, requests: int) -> None:
44
- self.successful_requests = self.successful_requests + requests
45
-
46
- def get_summary(self) -> UsageMetrics:
47
- return UsageMetrics(
48
- total_tokens=self.total_tokens,
49
- prompt_tokens=self.prompt_tokens,
50
- cached_prompt_tokens=self.cached_prompt_tokens,
51
- completion_tokens=self.completion_tokens,
52
- successful_requests=self.successful_requests,
53
- )
54
-
55
-
56
24
  # @track_agent()
57
25
  class Agent(BaseModel):
58
26
  """
@@ -62,7 +30,6 @@ class Agent(BaseModel):
62
30
  __hash__ = object.__hash__
63
31
  _rpm_controller: Optional[RPMController] = PrivateAttr(default=None)
64
32
  _request_within_rpm_limit: Any = PrivateAttr(default=None)
65
- _token_process: TokenProcess = PrivateAttr(default_factory=TokenProcess)
66
33
  _times_executed: int = PrivateAttr(default=0)
67
34
  _logger_config: Dict[str, Any] = PrivateAttr(default=dict(verbose=True, info_file_save=True))
68
35
  config: Optional[Dict[str, Any]] = Field(default=None, exclude=True, description="values to add to the Agent class")
@@ -92,7 +59,7 @@ class Agent(BaseModel):
92
59
  user_prompt_template: Optional[str] = Field(default=None, description="abs. file path to user prompt template")
93
60
 
94
61
  # task execution rules
95
- networks: Optional[List[Any]] = Field(default_factory=list, description="store a list of agent networks that the agent belong as a member")
62
+ networks: Optional[List[Any]] = Field(default_factory=list, description="store a list of agent networks that the agent belongs to as a member")
96
63
  allow_delegation: bool = Field(default=False, description="whether to delegate the task to another agent")
97
64
  max_retry_limit: int = Field(default=2, description="max. number of task retries when an error occurs")
98
65
  maxit: Optional[int] = Field(default=25, description="max. number of total optimization loops conducted when an error occurs")
@@ -428,10 +395,10 @@ class Agent(BaseModel):
428
395
 
429
396
  if tool_res_as_final:
430
397
  raw_response = self.func_calling_llm.call(messages=messages, tools=tools, tool_res_as_final=True)
431
- task.tokens = self.func_calling_llm._tokens
398
+ task._tokens = self.func_calling_llm._tokens
432
399
  else:
433
400
  raw_response = self.llm.call(messages=messages, response_format=response_format, tools=tools)
434
- task.tokens = self.llm._tokens
401
+ task._tokens = self.llm._tokens
435
402
 
436
403
  task_execution_counter += 1
437
404
  Logger(**self._logger_config, filename=self.key).log(level="info", message=f"Agent response: {raw_response}", color="green")
@@ -446,7 +413,7 @@ class Agent(BaseModel):
446
413
  self._rpm_controller.check_or_wait()
447
414
 
448
415
  raw_response = self.llm.call(messages=messages, response_format=response_format, tools=tools)
449
- task.tokens = self.llm._tokens
416
+ task._tokens = self.llm._tokens
450
417
  iterations += 1
451
418
 
452
419
  task_execution_counter += 1