versionhq 1.1.12.4__py3-none-any.whl → 1.1.13.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
versionhq/__init__.py CHANGED
@@ -4,6 +4,9 @@ warnings.filterwarnings(action="ignore", message="Pydantic serializer warnings:"
4
4
  warnings.filterwarnings(action="ignore", category=UserWarning, module="pydantic._internal")
5
5
  warnings.filterwarnings(action="ignore", module="LiteLLM:utils")
6
6
 
7
+ from dotenv import load_dotenv
8
+ load_dotenv(override=True)
9
+
7
10
  from versionhq.agent.model import Agent
8
11
  from versionhq.clients.customer.model import Customer
9
12
  from versionhq.clients.product.model import Product, ProductProvider
@@ -13,7 +16,7 @@ from versionhq.knowledge.source import PDFKnowledgeSource, CSVKnowledgeSource, J
13
16
  from versionhq.knowledge.source_docling import DoclingSource
14
17
  from versionhq.task.model import Task, TaskOutput, ConditionalTask, ResponseField
15
18
  from versionhq.task.evaluate import Evaluation, EvaluationItem
16
- from versionhq.team.model import Team, TeamOutput, Formation, TeamMember, TaskHandlingProcess
19
+ from versionhq.team.model import Team, TeamOutput, Formation, Member, TaskHandlingProcess
17
20
  from versionhq.tool.model import Tool, ToolSet
18
21
  from versionhq.tool.cache_handler import CacheHandler
19
22
  from versionhq.tool.tool_handler import ToolHandler
@@ -24,7 +27,7 @@ from versionhq.memory.model import ShortTermMemory,LongTermMemory, UserMemory, M
24
27
  from versionhq.task.formation import form_agent_network
25
28
 
26
29
 
27
- __version__ = "1.1.12.4"
30
+ __version__ = "1.1.13.0"
28
31
  __all__ = [
29
32
  "Agent",
30
33
 
@@ -55,7 +58,7 @@ __all__ = [
55
58
  "Team",
56
59
  "TeamOutput",
57
60
  "Formation",
58
- "TeamMember",
61
+ "Member",
59
62
  "TaskHandlingProcess",
60
63
 
61
64
  "Tool",
@@ -3,14 +3,14 @@ from pydantic import BaseModel, Field
3
3
 
4
4
  class UsageMetrics(BaseModel):
5
5
  """
6
- Model to track usage metrics for the agent/team's execution.
6
+ Model to track usage
7
7
  """
8
8
 
9
- total_tokens: int = Field(default=0, description="Total number of tokens used")
10
- prompt_tokens: int = Field(default=0, description="Number of tokens used in prompts")
11
- cached_prompt_tokens: int = Field(default=0, description="Number of cached prompt tokens used")
12
- completion_tokens: int = Field(default=0, description="Number of tokens used in completions")
13
- successful_requests: int = Field(default=0, description="Number of successful requests made")
9
+ total_tokens: int = Field(default=0, description="total number of tokens used")
10
+ prompt_tokens: int = Field(default=0, description="number of tokens used in prompts")
11
+ cached_prompt_tokens: int = Field(default=0, description="number of cached prompt tokens used")
12
+ completion_tokens: int = Field(default=0, description="number of tokens used in completions")
13
+ successful_requests: int = Field(default=0, description="number of successful requests made")
14
14
 
15
15
  def add_usage_metrics(self, usage_metrics: "UsageMetrics") -> None:
16
16
  """
@@ -29,7 +29,7 @@ vhq_formation_planner = Agent(
29
29
  role="vhq-Formation Planner",
30
30
  goal="Plan a formation of agents based on the given task descirption.",
31
31
  llm="gemini/gemini-2.0-flash-exp",
32
- llm_config=dict(top_p=0.8, top_k=30, temperature=0.9),
32
+ llm_config=dict(top_p=0.8, topK=40, temperature=0.9),
33
33
  maxit=1,
34
34
  max_retry_limit=1,
35
35
  knowledge_sources=[
versionhq/agent/model.py CHANGED
@@ -274,7 +274,7 @@ class Agent(BaseModel):
274
274
  tool_list = []
275
275
 
276
276
  for item in self.tools:
277
- if isinstance(item, Tool):
277
+ if isinstance(item, Tool) or isinstance(item, ToolSet):
278
278
  tool_list.append(item)
279
279
 
280
280
  elif isinstance(item, dict) and "func" in item:
versionhq/llm/llm_vars.py CHANGED
@@ -55,6 +55,11 @@ MODELS = {
55
55
  "openrouter": [
56
56
  "openrouter/deepseek/deepseek-r1",
57
57
  "openrouter/qwen/qwen-2.5-72b-instruct",
58
+ "openrouter/google/gemini-2.0-flash-thinking-exp:free",
59
+ "openrouter/google/gemini-2.0-flash-thinking-exp-1219:free",
60
+ "openrouter/google/gemini-2.0-flash-001",
61
+ "openrouter/meta-llama/llama-3.3-70b-instruct",
62
+ "openrouter/mistralai/mistral-large-2411",
58
63
  ],
59
64
  "huggingface": [
60
65
  "huggingface/qwen/qwen2.5-VL-72B-Instruct",
versionhq/llm/model.py CHANGED
@@ -100,7 +100,7 @@ class LLM(BaseModel):
100
100
  # LiteLLM specific fields
101
101
  api_base: Optional[str] = Field(default=None, description="litellm specific field - api base of the model provider")
102
102
  api_version: Optional[str] = Field(default=None)
103
- num_retries: Optional[int] = Field(default=2)
103
+ num_retries: Optional[int] = Field(default=1)
104
104
  context_window_fallback_dict: Optional[Dict[str, Any]] = Field(default=None, description="A mapping of model to use if call fails due to context window error")
105
105
  fallbacks: Optional[List[Any]]= Field(default=None, description="A list of model names + params to be used, in case the initial call fails")
106
106
  metadata: Optional[Dict[str, Any]] = Field(default=None)
@@ -256,60 +256,86 @@ class LLM(BaseModel):
256
256
  self._set_callbacks(self.callbacks) # passed by agent
257
257
 
258
258
  try:
259
- self.response_format = { "type": "json_object" } if tool_res_as_final == True else response_format
259
+ res, tool_res = None, ""
260
260
 
261
261
  if not tools:
262
+ self.response_format = response_format
262
263
  params = self._create_valid_params(config=config)
263
264
  res = litellm.completion(model=self.model, messages=messages, stream=False, **params)
264
265
  self._tokens += int(res["usage"]["total_tokens"])
265
266
  return res["choices"][0]["message"]["content"]
266
267
 
267
268
  else:
268
- self.tools = [item.tool.properties if isinstance(item, ToolSet) else item.properties for item in tools]
269
- params = self._create_valid_params(config=config)
270
- res = litellm.completion(model=self.model, messages=messages, **params)
271
- tool_calls = res.choices[0].message.tool_calls
272
- tool_res = ""
273
-
274
- for item in tool_calls:
275
- func_name = item.function.name
276
- func_args = item.function.arguments
277
-
278
- if not isinstance(func_args, dict):
279
- try:
280
- func_args = json.loads(json.dumps(eval(str(func_args))))
281
- except:
282
- pass
283
-
284
- for tool in tools:
285
- if isinstance(tool, ToolSet) and (tool.tool.name == func_name or tool.tool.func.__name__ == func_name or func_name == "random_func"):
286
- tool_instance = tool.tool
287
- args = tool.kwargs
288
- tool_res_to_add = tool_instance.run(params=args)
289
-
290
- if tool_res_as_final:
291
- tool_res += str(tool_res_to_add)
292
- else:
293
- messages.append(res.choices[0].message)
294
- messages.append({ "role": "tool", "tool_call_id": item.id, "content": str(tool_res_to_add) })
269
+ try:
270
+ self.response_format = { "type": "json_object" } if tool_res_as_final and self.provider != "gemini" else response_format
271
+ self.tools = [item.tool.properties if isinstance(item, ToolSet) else item.properties for item in tools]
272
+ params = self._create_valid_params(config=config)
273
+ res = litellm.completion(model=self.model, messages=messages, **params)
274
+ tool_calls = res.choices[0].message.tool_calls
275
+
276
+ if tool_calls:
277
+ for item in tool_calls:
278
+ func_name = item.function.name
279
+ func_args = item.function.arguments
280
+ if not isinstance(func_args, dict):
281
+ try:
282
+ func_args = json.loads(json.dumps(eval(str(func_args))))
283
+ except:
284
+ pass
285
+
286
+ # find a tool whose name is matched with the retrieved func_name
287
+ matches = []
288
+ for tool in tools:
289
+ tool_name = tool.tool.name if isinstance(tool, ToolSet) else tool.name
290
+ tool_func_name = tool.tool.func.__name__ if isinstance(tool, ToolSet) else tool.func.__name__
291
+ if tool_name.replace(" ", "_") == func_name or tool_func_name == func_name or tool_name == "random_func":
292
+ matches.append(tool)
293
+ else:
294
+ pass
295
+
296
+ if matches:
297
+ tool_to_execute = matches[0]
298
+ tool_instance = tool_to_execute.tool if isinstance(tool_to_execute, ToolSet) else tool_to_execute
299
+ params = tool_to_execute.kwargs if isinstance(tool_to_execute, ToolSet) else func_args
300
+ tool_res_to_add = tool_instance.run(params=params) if params else tool_instance.run()
295
301
 
296
- else:
297
- try:
298
- tool_res_to_add = tool.run(params=func_args)
299
302
  if tool_res_as_final:
300
- tool_res += str(tool_res_to_add)
303
+ if tool_res_to_add not in tool_res:
304
+ tool_res += str(tool_res_to_add)
301
305
  else:
302
306
  messages.append(res.choices[0].message)
303
307
  messages.append({ "role": "tool", "tool_call_id": item.id, "content": str(tool_res_to_add) })
304
- except:
308
+
309
+ else:
310
+ if tool_res_as_final and tools and not tool_res:
311
+ for item in tools:
312
+ tool_res_to_add = item.tool.run(params=item.kwargs) if isinstance(item, ToolSet) else item.run()
313
+ if tool_res_to_add not in tool_res:
314
+ tool_res += str(tool_res_to_add)
315
+ else:
316
+ pass
317
+
318
+ except:
319
+ if tool_res_as_final and tools and not tool_res:
320
+ for item in tools:
321
+ tool_res_to_add = item.tool.run(params=item.kwargs) if isinstance(item, ToolSet) else item.run()
322
+ if tool_res_to_add not in tool_res:
323
+ tool_res += str(tool_res_to_add)
324
+ else:
305
325
  pass
326
+ elif tools and not tool_res:
327
+ tool_res = res["choices"][0]["message"]["content"]
328
+ else:
329
+ pass
330
+
331
+
332
+ if tool_res_as_final:
333
+ return tool_res
334
+ else:
335
+ res = litellm.completion(model=self.model, messages=messages, **params)
336
+ self._tokens += int(res["usage"]["total_tokens"])
337
+ return res.choices[0].message.content
306
338
 
307
- if tool_res_as_final:
308
- return tool_res
309
- else:
310
- res = litellm.completion(model=self.model, messages=messages, **params)
311
- self._tokens += int(res["usage"]["total_tokens"])
312
- return res.choices[0].message.content
313
339
 
314
340
  except JSONSchemaValidationError as e:
315
341
  self._logger.log(level="error", message="Raw Response: {}".format(e.raw_response), color="red")
@@ -70,17 +70,16 @@ class EvaluationItem(BaseModel):
70
70
  else: return None
71
71
 
72
72
 
73
-
74
73
  class Evaluation(BaseModel):
75
74
  items: List[EvaluationItem] = []
76
- latency: int = Field(default=None, description="seconds")
75
+ latency: int = Field(default=None, description="job execution latency in seconds")
77
76
  tokens: int = Field(default=None, description="tokens consumed")
78
- responsible_agent: Any = Field(default=None, description="store agent instance that evaluates the outcome")
77
+ eval_by: Any = Field(default=None, description="stores agent object that evaluates the outcome")
79
78
 
80
79
  @model_validator(mode="after")
81
- def set_up_responsible_agent(self) -> Self:
80
+ def set_up_evaluator(self) -> Self:
82
81
  from versionhq.agent.inhouse_agents import vhq_task_evaluator
83
- self.responsible_agent = vhq_task_evaluator
82
+ self.eval_by = vhq_task_evaluator
84
83
  return self
85
84
 
86
85
 
@@ -88,7 +87,7 @@ class Evaluation(BaseModel):
88
87
  """
89
88
  Create and store evaluation results in the memory metadata
90
89
  """
91
- eval_by = self.responsible_agent.role if self.responsible_agent else None
90
+ eval_by = self.eval_by.role if self.eval_by else None
92
91
  score = self.aggregate_score
93
92
  eval_criteria = ", ".join([item.criteria for item in self.items]) if self.items else None
94
93
  suggestion = self.suggestion_summary
@@ -1,11 +1,11 @@
1
- from typing import List
1
+ from typing import List, Type
2
2
  from enum import Enum
3
3
 
4
4
  from pydantic import BaseModel
5
5
 
6
6
  from versionhq.task.model import Task
7
7
  from versionhq.agent.model import Agent
8
- from versionhq.team.model import Team, TeamMember, Formation
8
+ from versionhq.team.model import Team, Member, Formation
9
9
  from versionhq.agent.inhouse_agents import vhq_formation_planner
10
10
  from versionhq._utils import Logger
11
11
 
@@ -15,10 +15,10 @@ def form_agent_network(
15
15
  expected_outcome: str,
16
16
  agents: List[Agent] = None,
17
17
  context: str = None,
18
- formation: Formation = None
18
+ formation: Type[Formation] = None
19
19
  ) -> Team | None:
20
20
  """
21
- Make a formation of agents from the given task description, agents (optional), context (optional), and expected outcome (optional).
21
+ Make a formation of agents from the given task description, expected outcome, agents (optional), and context (optional).
22
22
  """
23
23
 
24
24
  if not task:
@@ -29,8 +29,37 @@ def form_agent_network(
29
29
  Logger(verbose=True).log(level="error", message="Missing expected outcome.", color="red")
30
30
  return None
31
31
 
32
+ if formation:
33
+ try:
34
+ match formation:
35
+ case Formation():
36
+ if formation == Formation.UNDEFINED:
37
+ formation = None
38
+ else:
39
+ pass
40
+
41
+ case str():
42
+ matched = [item for item in Formation._member_names_ if item == formation.upper()]
43
+ if matched:
44
+ formation = getattr(Formation, matched[0])
45
+ else:
46
+ # Formation._generate_next_value_(name=f"CUSTOM_{formation.upper()}", start=100, count=6, last_values=Formation.HYBRID.name)
47
+ Logger(verbose=True).log(level="warning", message=f"The formation {formation} is invalid. We'll recreate a valid formation.", color="yellow")
48
+ formation = None
49
+
50
+ case int() | float():
51
+ formation = Formation(int(formation))
52
+
53
+ case _:
54
+ Logger(verbose=True).log(level="warning", message=f"The formation {formation} is invalid. We'll recreate a valid formation.", color="yellow")
55
+ formation = None
56
+
57
+ except Exception as e:
58
+ Logger(verbose=True).log(level="warning", message=f"The formation {formation} is invalid: {str(e)}. We'll recreate a formation.", color="yellow")
59
+ formation = None
32
60
 
33
61
  try:
62
+ prompt_formation = formation.name if formation and isinstance(formation, Formation) else f"Select the best formation to effectively execute the tasks from the given Enum sets: {str(Formation.__dict__)}."
34
63
  class Outcome(BaseModel):
35
64
  formation: Enum
36
65
  agent_roles: list[str]
@@ -42,73 +71,78 @@ def form_agent_network(
42
71
  Create a team of specialized agents designed to automate the following task and deliver the expected outcome. Consider the necessary roles for each agent with a clear task description. If you think we neeed a leader to handle the automation, return a leader_agent role as well, but if not, leave the a leader_agent role blank.
43
72
  Task: {str(task)}
44
73
  Expected outcome: {str(expected_outcome)}
74
+ Formation: {prompt_formation}
45
75
  """,
46
76
  pydantic_output=Outcome
47
77
  )
48
78
 
49
- if formation:
50
- vhq_task.description += f"Select 1 formation you think the best from the given Enum sets: {str(Formation.__dict__)}"
51
-
52
79
  if agents:
53
80
  vhq_task.description += "Consider adding following agents in the formation: " + ", ".join([agent.role for agent in agents if isinstance(agent, Agent)])
54
81
 
55
82
  res = vhq_task.execute_sync(agent=vhq_formation_planner, context=context)
56
- formation_ = Formation.SUPERVISING
83
+ _formation = Formation.SUPERVISING
57
84
 
58
85
  if res.pydantic:
59
86
  formation_keys = [k for k, v in Formation._member_map_.items() if k == res.pydantic.formation.upper()]
60
87
 
61
88
  if formation_keys:
62
- formation_ = Formation[formation_keys[0]]
89
+ _formation = Formation[formation_keys[0]]
63
90
 
64
91
  created_agents = [Agent(role=item, goal=item) for item in res.pydantic.agent_roles]
65
92
  created_tasks = [Task(description=item) for item in res.pydantic.task_descriptions]
93
+
66
94
  team_tasks = []
67
95
  members = []
68
96
  leader = str(res.pydantic.leader_agent)
69
97
 
70
98
  for i in range(len(created_agents)):
71
- is_manager = bool(created_agents[i].role.lower() in leader.lower())
72
- member = TeamMember(agent=created_agents[i], is_manager=is_manager)
99
+ is_manager = bool(created_agents[i].role.lower() == leader.lower())
100
+ member = Member(agent=created_agents[i], is_manager=is_manager)
101
+
102
+ if len(created_tasks) >= i and created_tasks[i]:
103
+ member.tasks.append(created_tasks[i])
104
+
105
+ members.append(member)
73
106
 
74
- if len(created_tasks) >= i:
75
- member.task = created_tasks[i]
76
- members.append(member)
77
107
 
78
108
  if len(created_agents) < len(created_tasks):
79
- team_tasks.extend(created_tasks[len(created_agents) - 1:len(created_tasks)])
109
+ team_tasks.extend(created_tasks[len(created_agents):len(created_tasks)])
80
110
 
81
111
  members.sort(key=lambda x: x.is_manager == False)
82
- team = Team(members=members, formation=formation_)
112
+ team = Team( members=members, formation=_formation, team_tasks=team_tasks, planner_llm=vhq_formation_planner.llm)
83
113
  return team
84
114
 
85
115
  else:
86
- formation_keys = [k for k, v in Formation._member_map_.items() if k == res.json_dict["formation"].upper()]
116
+ res = res.json_dict
117
+ formation_keys = [k for k, v in Formation._member_map_.items() if k == res["formation"].upper()]
87
118
 
88
119
  if formation_keys:
89
- formation_ = Formation[formation_keys[0]]
120
+ _formation = Formation[formation_keys[0]]
121
+
122
+ created_agents = [Agent(role=item, goal=item) for item in res["agent_roles"]]
123
+ created_tasks = [Task(description=item) for item in res["task_descriptions"]]
90
124
 
91
- created_agents = [Agent(role=item, goal=item) for item in res.json_dict["agent_roles"]]
92
- created_tasks = [Task(description=item) for item in res.json_dict["task_descriptions"]]
93
125
  team_tasks = []
94
126
  members = []
95
- leader = str(res.json_dict["leader_agent"])
127
+ leader = str(res["leader_agent"])
96
128
 
97
129
  for i in range(len(created_agents)):
98
- is_manager = bool(created_agents[i].role.lower() in leader.lower())
99
- member = TeamMember(agent=created_agents[i], is_manager=is_manager)
130
+ is_manager = bool(created_agents[i].role.lower() == leader.lower())
131
+ member = Member(agent=created_agents[i], is_manager=is_manager)
132
+
133
+ if len(created_tasks) >= i and created_tasks[i]:
134
+ member.tasks.append(created_tasks[i])
100
135
 
101
- if len(created_tasks) >= i:
102
- member.task = created_tasks[i]
103
- members.append(member)
136
+ members.append(member)
104
137
 
105
138
  if len(created_agents) < len(created_tasks):
106
- team_tasks.extend(created_tasks[len(created_agents) - 1:len(created_tasks)])
139
+ team_tasks.extend(created_tasks[len(created_agents):len(created_tasks)])
107
140
 
108
- members.sort(key=lambda x: x.is_manager == True)
109
- team = Team(members=members, formation=formation_)
141
+ members.sort(key=lambda x: x.is_manager == False)
142
+ team = Team( members=members, formation=_formation, team_tasks=team_tasks, planner_llm=vhq_formation_planner.llm)
110
143
  return team
111
144
 
145
+
112
146
  except Exception as e:
113
- Logger(verbose=True).log(level="error", message=f"Failed to create an agent network - return None. You can try with solo agent. Error: {str(e)}", color="red")
147
+ Logger(verbose=True).log(level="error", message=f"Failed to create a agent network - return None. You can try with solo agent. Error: {str(e)}", color="red")
114
148
  return None
@@ -45,7 +45,7 @@ class TaskOutputStorageHandler:
45
45
  output_to_store = dict(
46
46
  description=str(task.description),
47
47
  raw=str(task.output.raw),
48
- responsible_agent=str(task.processed_by_agents),
48
+ responsible_agent=str(task.processed_agents),
49
49
  )
50
50
 
51
51
  self.storage.add(task=task, output=output_to_store, task_index=task_index, was_replayed=was_replayed, inputs=inputs)
versionhq/task/model.py CHANGED
@@ -203,7 +203,7 @@ class TaskOutput(BaseModel):
203
203
  description=EVALUATE.format(task_description=task.description, task_output=self.raw, eval_criteria=str(item)),
204
204
  pydantic_output=EvaluationItem
205
205
  )
206
- res = task_eval.execute_sync(agent=self.evaluation.responsible_agent)
206
+ res = task_eval.execute_sync(agent=self.evaluation.eval_by)
207
207
 
208
208
  if res.pydantic:
209
209
  item = EvaluationItem(score=res.pydantic.score, suggestion=res.pydantic.suggestion, criteria=res.pydantic.criteria)
@@ -241,10 +241,7 @@ class TaskOutput(BaseModel):
241
241
 
242
242
  class Task(BaseModel):
243
243
  """
244
- Task to be executed by agents or teams.
245
- Each task must have a description.
246
- Default response is JSON string that strictly follows `response_fields` - and will be stored in TaskOuput.raw / json_dict.
247
- When `pydantic_output` is provided, we prioritize them and store raw (json string), json_dict, pydantic in the TaskOutput class.
244
+ A class that stores independent task information.
248
245
  """
249
246
 
250
247
  __hash__ = object.__hash__
@@ -282,7 +279,7 @@ class Task(BaseModel):
282
279
  eval_criteria: Optional[List[str]] = Field(default_factory=list, description="criteria to evaluate the outcome. i.e., fit to the brand tone")
283
280
 
284
281
  # recording
285
- processed_by_agents: Set[str] = Field(default_factory=set, description="store responsible agents' roles")
282
+ processed_agents: Set[str] = Field(default_factory=set, description="store roles of the agents that executed the task")
286
283
  tools_errors: int = 0
287
284
  delegations: int = 0
288
285
  latency: int | float = 0 # job latency in sec
@@ -640,7 +637,7 @@ Ref. Output image: {output_formats_to_follow}
640
637
 
641
638
  self.latency = (ended_at - started_at).total_seconds()
642
639
  self.output = task_output
643
- self.processed_by_agents.add(agent.role)
640
+ self.processed_agents.add(agent.role)
644
641
 
645
642
  if self.should_evaluate:
646
643
  task_output.evaluate(task=self, latency=self.latency, tokens=self.tokens)