versionhq 1.1.12.1__py3-none-any.whl → 1.1.12.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,5 +1,5 @@
1
- EVALUATE="""Assess the accuracy and quality of the following task output to the task described below. Score based on the criterion (0-1, 0=worst, 1=best) and suggest improvements. Vary scores; don't assign identical values. Store criteria in the "criteria" field.
2
- Task: {task_description}
3
- Task Output: {task_output}
1
+ EVALUATE="""Evaluate the provided task output against the given task description, assigning a score between 0 (worst) and 1 (best) based on the specified criteria. Scores should be numerical (integers or decimals). Provide specific suggestions for improvement. Do not assign identical scores to different criteria:
2
+ Task output: {task_output}
3
+ Task description: {task_description}
4
4
  Evaluation criteria: {eval_criteria}
5
5
  """
@@ -1,9 +0,0 @@
1
- from enum import Enum
2
-
3
-
4
- class TaskOutputFormat(str, Enum):
5
- """Enum that represents the output format of a task."""
6
-
7
- JSON = "json"
8
- PYDANTIC = "pydantic model"
9
- RAW = "raw"
@@ -3,6 +3,8 @@ from typing_extensions import Self
3
3
 
4
4
  from pydantic import BaseModel, Field, model_validator
5
5
 
6
+ from versionhq.memory.model import MemoryMetadata
7
+
6
8
  """
7
9
  Evaluate task output from accuracy, token consumption, latency perspectives, and mark the score from 0 to 1.
8
10
  """
@@ -59,10 +61,10 @@ class EvaluationItem(BaseModel):
59
61
  """
60
62
  criteria: str
61
63
  suggestion: str
62
- score: int | float
64
+ score: float
63
65
 
64
66
  def _convert_score_to_score_format(self, weight: int = 1) -> ScoreFormat | None:
65
- if self.score and isinstance(self.score, (int, float)):
67
+ if self.score and isinstance(self.score, float):
66
68
  return ScoreFormat(rate=self.score, weight=weight)
67
69
 
68
70
  else: return None
@@ -75,7 +77,6 @@ class Evaluation(BaseModel):
75
77
  tokens: int = Field(default=None, description="tokens consumed")
76
78
  responsible_agent: Any = Field(default=None, description="store agent instance that evaluates the outcome")
77
79
 
78
-
79
80
  @model_validator(mode="after")
80
81
  def set_up_responsible_agent(self) -> Self:
81
82
  from versionhq.agent.inhouse_agents import vhq_task_evaluator
@@ -83,6 +84,18 @@ class Evaluation(BaseModel):
83
84
  return self
84
85
 
85
86
 
87
+ def _create_memory_metadata(self) -> MemoryMetadata:
88
+ """
89
+ Create and store evaluation results in the memory metadata
90
+ """
91
+ eval_by = self.responsible_agent.role if self.responsible_agent else None
92
+ score = self.aggregate_score
93
+ eval_criteria = ", ".join([item.criteria for item in self.items]) if self.items else None
94
+ suggestion = self.suggestion_summary
95
+ memory_metadata = MemoryMetadata(eval_by=eval_by, score=score, eval_criteria=eval_criteria, suggestion=suggestion)
96
+ return memory_metadata
97
+
98
+
86
99
  @property
87
100
  def aggregate_score(self) -> float:
88
101
  """
@@ -0,0 +1,114 @@
1
+ from typing import List
2
+ from enum import Enum
3
+
4
+ from pydantic import BaseModel
5
+
6
+ from versionhq.task.model import Task
7
+ from versionhq.agent.model import Agent
8
+ from versionhq.team.model import Team, TeamMember, Formation
9
+ from versionhq.agent.inhouse_agents import vhq_formation_planner
10
+ from versionhq._utils import Logger
11
+
12
+
13
+ def form_agent_network(
14
+ task: str,
15
+ expected_outcome: str,
16
+ agents: List[Agent] = None,
17
+ context: str = None,
18
+ formation: Formation = None
19
+ ) -> Team | None:
20
+ """
21
+ Make a formation of agents from the given task description, agents (optional), context (optional), and expected outcome (optional).
22
+ """
23
+
24
+ if not task:
25
+ Logger(verbose=True).log(level="error", message="Missing task description.", color="red")
26
+ return None
27
+
28
+ if not expected_outcome:
29
+ Logger(verbose=True).log(level="error", message="Missing expected outcome.", color="red")
30
+ return None
31
+
32
+
33
+ try:
34
+ class Outcome(BaseModel):
35
+ formation: Enum
36
+ agent_roles: list[str]
37
+ task_descriptions: list[str]
38
+ leader_agent: str
39
+
40
+ vhq_task = Task(
41
+ description=f"""
42
+ Create a team of specialized agents designed to automate the following task and deliver the expected outcome. Consider the necessary roles for each agent with a clear task description. If you think we neeed a leader to handle the automation, return a leader_agent role as well, but if not, leave the a leader_agent role blank.
43
+ Task: {str(task)}
44
+ Expected outcome: {str(expected_outcome)}
45
+ """,
46
+ pydantic_output=Outcome
47
+ )
48
+
49
+ if formation:
50
+ vhq_task.description += f"Select 1 formation you think the best from the given Enum sets: {str(Formation.__dict__)}"
51
+
52
+ if agents:
53
+ vhq_task.description += "Consider adding following agents in the formation: " + ", ".join([agent.role for agent in agents if isinstance(agent, Agent)])
54
+
55
+ res = vhq_task.execute_sync(agent=vhq_formation_planner, context=context)
56
+ formation_ = Formation.SUPERVISING
57
+
58
+ if res.pydantic:
59
+ formation_keys = [k for k, v in Formation._member_map_.items() if k == res.pydantic.formation.upper()]
60
+
61
+ if formation_keys:
62
+ formation_ = Formation[formation_keys[0]]
63
+
64
+ created_agents = [Agent(role=item, goal=item) for item in res.pydantic.agent_roles]
65
+ created_tasks = [Task(description=item) for item in res.pydantic.task_descriptions]
66
+ team_tasks = []
67
+ members = []
68
+ leader = str(res.pydantic.leader_agent)
69
+
70
+ for i in range(len(created_agents)):
71
+ is_manager = bool(created_agents[i].role.lower() in leader.lower())
72
+ member = TeamMember(agent=created_agents[i], is_manager=is_manager)
73
+
74
+ if len(created_tasks) >= i:
75
+ member.task = created_tasks[i]
76
+ members.append(member)
77
+
78
+ if len(created_agents) < len(created_tasks):
79
+ team_tasks.extend(created_tasks[len(created_agents) - 1:len(created_tasks)])
80
+
81
+ members.sort(key=lambda x: x.is_manager == False)
82
+ team = Team(members=members, formation=formation_)
83
+ return team
84
+
85
+ else:
86
+ formation_keys = [k for k, v in Formation._member_map_.items() if k == res.json_dict["formation"].upper()]
87
+
88
+ if formation_keys:
89
+ formation_ = Formation[formation_keys[0]]
90
+
91
+ created_agents = [Agent(role=item, goal=item) for item in res.json_dict["agent_roles"]]
92
+ created_tasks = [Task(description=item) for item in res.json_dict["task_descriptions"]]
93
+ team_tasks = []
94
+ members = []
95
+ leader = str(res.json_dict["leader_agent"])
96
+
97
+ for i in range(len(created_agents)):
98
+ is_manager = bool(created_agents[i].role.lower() in leader.lower())
99
+ member = TeamMember(agent=created_agents[i], is_manager=is_manager)
100
+
101
+ if len(created_tasks) >= i:
102
+ member.task = created_tasks[i]
103
+ members.append(member)
104
+
105
+ if len(created_agents) < len(created_tasks):
106
+ team_tasks.extend(created_tasks[len(created_agents) - 1:len(created_tasks)])
107
+
108
+ members.sort(key=lambda x: x.is_manager == True)
109
+ team = Team(members=members, formation=formation_)
110
+ return team
111
+
112
+ except Exception as e:
113
+ Logger(verbose=True).log(level="error", message=f"Failed to create an agent network - return None. You can try with solo agent. Error: {str(e)}", color="red")
114
+ return None
versionhq/task/model.py CHANGED
@@ -5,18 +5,17 @@ import uuid
5
5
  import inspect
6
6
  from concurrent.futures import Future
7
7
  from hashlib import md5
8
- from typing import Any, Dict, List, Set, Optional, Tuple, Callable, Type, TypeVar
8
+ from typing import Any, Dict, List, Set, Optional, Callable, Type
9
9
  from typing_extensions import Annotated, Self
10
10
 
11
11
  from pydantic import UUID4, BaseModel, Field, PrivateAttr, field_validator, model_validator, InstanceOf, field_validator
12
12
  from pydantic_core import PydanticCustomError
13
13
 
14
- from versionhq._utils.process_config import process_config
15
- from versionhq.task import TaskOutputFormat
14
+
16
15
  from versionhq.task.log_handler import TaskOutputStorageHandler
17
16
  from versionhq.task.evaluate import Evaluation, EvaluationItem
18
17
  from versionhq.tool.model import Tool, ToolSet
19
- from versionhq._utils.logger import Logger
18
+ from versionhq._utils import process_config, Logger
20
19
 
21
20
 
22
21
  class ResponseField(BaseModel):
@@ -77,9 +76,7 @@ class ResponseField(BaseModel):
77
76
  if self.properties:
78
77
  for item in self.properties:
79
78
  nested_p.update(**item._format_props())
80
-
81
- if item.required:
82
- nested_r.append(item.title)
79
+ nested_r.append(item.title)
83
80
 
84
81
  props = {
85
82
  "type": schema_type,
@@ -97,8 +94,6 @@ class ResponseField(BaseModel):
97
94
  if self.properties:
98
95
  for item in self.properties:
99
96
  p.update(**item._format_props())
100
-
101
- # if item.required:
102
97
  r.append(item.title)
103
98
 
104
99
  props = {
@@ -204,12 +199,25 @@ class TaskOutput(BaseModel):
204
199
  eval_criteria = task.eval_criteria if task.eval_criteria else ["Overall competitiveness", ]
205
200
 
206
201
  for item in eval_criteria:
207
- task_1 = Task(
202
+ task_eval = Task(
208
203
  description=EVALUATE.format(task_description=task.description, task_output=self.raw, eval_criteria=str(item)),
209
204
  pydantic_output=EvaluationItem
210
205
  )
211
- res_a = task_1.execute_sync(agent=self.evaluation.responsible_agent)
212
- self.evaluation.items.append(EvaluationItem(**res_a.json_dict))
206
+ res = task_eval.execute_sync(agent=self.evaluation.responsible_agent)
207
+
208
+ if res.pydantic:
209
+ item = EvaluationItem(score=res.pydantic.score, suggestion=res.pydantic.suggestion, criteria=res.pydantic.criteria)
210
+ self.evaluation.items.append(item)
211
+
212
+ else:
213
+ try:
214
+ item = EvaluationItem(
215
+ score=float(res.json_dict["score"]), suggestion=res.json_dict["suggestion"], criteria=res.json_dict["criteria"]
216
+ )
217
+ self.evaluation.items.append(item)
218
+ except Exception as e:
219
+ Logger(verbose=True).log(level="error", message=f"Failed to convert the evaluation items: {str(e)}", color="red")
220
+ pass
213
221
 
214
222
  return self.evaluation
215
223
 
@@ -224,14 +232,6 @@ class TaskOutput(BaseModel):
224
232
 
225
233
  @property
226
234
  def json(self) -> Optional[str]:
227
- if self.output_format != TaskOutputFormat.JSON:
228
- raise ValueError(
229
- """
230
- Invalid output format requested.
231
- If you would like to access the JSON output,
232
- pleae make sure to set the output_json property for the task
233
- """
234
- )
235
235
  return json.dumps(self.json_dict)
236
236
 
237
237
 
@@ -239,7 +239,6 @@ class TaskOutput(BaseModel):
239
239
  return str(self.pydantic) if self.pydantic else str(self.json_dict) if self.json_dict else self.raw
240
240
 
241
241
 
242
-
243
242
  class Task(BaseModel):
244
243
  """
245
244
  Task to be executed by agents or teams.
@@ -394,14 +393,6 @@ Ref. Output image: {output_formats_to_follow}
394
393
  return "\n".join(task_slices)
395
394
 
396
395
 
397
- def _get_output_format(self) -> TaskOutputFormat:
398
- if self.output_json == True:
399
- return TaskOutputFormat.JSON
400
- if self.output_pydantic == True:
401
- return TaskOutputFormat.PYDANTIC
402
- return TaskOutputFormat.RAW
403
-
404
-
405
396
  def _structure_response_format(self, data_type: str = "object", model_provider: str = "gemini") -> Dict[str, Any] | None:
406
397
  """
407
398
  Structure a response format either from`response_fields` or `pydantic_output`.
@@ -508,75 +499,59 @@ Ref. Output image: {output_formats_to_follow}
508
499
  self.description = self._original_description.format(**inputs)
509
500
 
510
501
 
511
- def _create_short_term_memory(self, agent, task_output: TaskOutput) -> None:
502
+ def _create_short_and_long_term_memories(self, agent: Any, task_output: TaskOutput) -> None:
512
503
  """
513
- After the task execution, create and save short-term memory of the responsible agent.
504
+ After the task execution, create and save short-term/long-term memories in the storage.
514
505
  """
515
-
516
506
  from versionhq.agent.model import Agent
517
- from versionhq.memory.model import ShortTermMemory
518
-
519
- try:
520
- if isinstance(agent, Agent) and agent.use_memory == True:
521
- if hasattr(agent, "short_term_memory"):
522
- agent.short_term_memory.save(value=task_output.raw, metadata={ "observation": self.description, }, agent=agent.role)
523
- else:
524
- agent.short_term_memory = ShortTermMemory(agent=agent, embedder_config=agent.embedder_config)
525
- agent.short_term_memory.save(value=task_output.raw, metadata={ "observation": self.description, }, agent=agent.role)
507
+ from versionhq.memory.model import ShortTermMemory, MemoryMetadata, LongTermMemory
526
508
 
527
- except Exception as e:
528
- self._logger.log(level="error", message=f"Failed to add to short term memory: {str(e)}", color="red")
529
- pass
509
+ agent = agent if isinstance(agent, Agent) else Agent(role=str(agent), goal=str(agent), use_memory=True)
530
510
 
531
-
532
- def _create_long_term_memory(self, agent, task_output: TaskOutput) -> None:
533
- """
534
- Create and save long-term and entity memory items based on evaluation.
535
- """
536
- from versionhq.agent.model import Agent
537
- from versionhq.memory.model import LongTermMemory, LongTermMemoryItem
511
+ if agent.use_memory == False:
512
+ return None
538
513
 
539
514
  try:
540
- if isinstance(agent, Agent) and agent.use_memory == True:
541
- evaluation = task_output.evaluation if task_output.evaluation else task_output.evaluate(task=self)
542
-
543
- long_term_memory_item = LongTermMemoryItem(
544
- agent=str(agent.role),
545
- task=str(self.description),
546
- datetime=str(datetime.datetime.now()),
547
- quality=evaluation.aggregate_score,
548
- metadata={
549
- "suggestions": evaluation.suggestion_summary,
550
- "quality": evaluation.aggregate_score,
551
- },
552
- )
515
+ evaluation = task_output.evaluation if task_output.evaluation else None
516
+ memory_metadata = evaluation._create_memory_metadata() if evaluation else MemoryMetadata()
517
+
518
+ agent.short_term_memory = agent.short_term_memory if agent.short_term_memory else ShortTermMemory(agent=agent, embedder_config=agent.embedder_config)
519
+ agent.short_term_memory.save(
520
+ task_description=str(self.description),
521
+ task_output=str(task_output.raw),
522
+ agent=str(agent.role),
523
+ metadata=memory_metadata
524
+ )
553
525
 
554
- if hasattr(agent, "long_term_memory"):
555
- agent.long_term_memory.save(item=long_term_memory_item)
556
- else:
557
- agent.long_term_memory = LongTermMemory(agent=agent)
558
- agent.long_term_memory.save(item=long_term_memory_item)
526
+ agent.long_term_memory = agent.long_term_memory if agent.long_term_memory else LongTermMemory()
527
+ agent.long_term_memory.save(
528
+ task_description=str(self.description),
529
+ task_output=str(task_output.raw),
530
+ agent=str(agent.role),
531
+ metadata=memory_metadata
532
+ )
559
533
 
560
534
  except AttributeError as e:
561
535
  self._logger.log(level="error", message=f"Missing attributes for long term memory: {str(e)}", color="red")
562
536
  pass
563
537
 
564
538
  except Exception as e:
565
- self._logger.log(level="error", message=f"Failed to add to long term memory: {str(e)}", color="red")
539
+ self._logger.log(level="error", message=f"Failed to add to the memory: {str(e)}", color="red")
566
540
  pass
567
541
 
568
542
 
569
543
  # task execution
570
- def execute_sync(self, agent, context: Optional[str] = None) -> TaskOutput:
544
+ def execute_sync(self, agent, context: Optional[str | List[Any]] = None) -> TaskOutput:
571
545
  """
572
546
  Execute the task synchronously.
573
547
  When the task has context, make sure we have executed all the tasks in the context first.
574
548
  """
575
549
 
576
550
  if self.context:
577
- for task in self.context:
578
- if task.output is None:
579
- task._execute_core(agent, context)
551
+ if isinstance(self.context, list):
552
+ for task in self.context:
553
+ if isinstance(task, Task) and task.output is None:
554
+ task._execute_core(agent, context)
580
555
 
581
556
  return self._execute_core(agent, context)
582
557
 
@@ -670,9 +645,7 @@ Ref. Output image: {output_formats_to_follow}
670
645
  if self.should_evaluate:
671
646
  task_output.evaluate(task=self, latency=self.latency, tokens=self.tokens)
672
647
 
673
- self._create_short_term_memory(agent=agent, task_output=task_output)
674
- self._create_long_term_memory(agent=agent, task_output=task_output)
675
-
648
+ self._create_short_and_long_term_memories(agent=agent, task_output=task_output)
676
649
 
677
650
  if self.callback and isinstance(self.callback, Callable):
678
651
  kwargs = { **self.callback_kwargs, **task_output.json_dict }
@@ -702,7 +675,7 @@ Ref. Output image: {output_formats_to_follow}
702
675
 
703
676
  @property
704
677
  def key(self) -> str:
705
- output_format = TaskOutputFormat.JSON if self.response_fields else TaskOutputFormat.PYDANTIC if self.pydantic_output is not None else TaskOutputFormat.RAW
678
+ output_format = "json" if self.response_fields else "pydantic" if self.pydantic_output is not None else "raw"
706
679
  source = [self.description, output_format]
707
680
  return md5("|".join(source).encode(), usedforsecurity=False).hexdigest()
708
681
 
@@ -1,4 +1,4 @@
1
- from typing import Dict, Optional, Type, List, Any, TypeVar
1
+ from typing import Dict, Type, List, Any
2
2
 
3
3
  from pydantic import BaseModel, Field, InstanceOf
4
4
 
versionhq/team/model.py CHANGED
@@ -1,17 +1,16 @@
1
1
  import uuid
2
2
  import warnings
3
- import json
4
3
  from enum import Enum
5
4
  from dotenv import load_dotenv
6
5
  from concurrent.futures import Future
7
6
  from hashlib import md5
8
- from typing import Any, Dict, List, TYPE_CHECKING, Callable, Optional, Tuple
9
- from pydantic import UUID4, InstanceOf, Json, BaseModel, Field, PrivateAttr, field_validator, model_validator
7
+ from typing import Any, Dict, List, Callable, Optional, Tuple
8
+ from pydantic import UUID4, BaseModel, Field, PrivateAttr, field_validator, model_validator
10
9
  from pydantic._internal._generate_schema import GenerateSchema
11
10
  from pydantic_core import PydanticCustomError, core_schema
12
11
 
13
12
  from versionhq.agent.model import Agent
14
- from versionhq.task.model import Task, TaskOutput, ConditionalTask, TaskOutputFormat
13
+ from versionhq.task.model import Task, TaskOutput, ConditionalTask
15
14
  from versionhq.task.formatter import create_raw_outputs
16
15
  from versionhq.team.team_planner import TeamPlanner
17
16
  from versionhq._utils.logger import Logger
@@ -38,6 +37,16 @@ load_dotenv(override=True)
38
37
  # pass
39
38
 
40
39
 
40
+
41
+ class Formation(str, Enum):
42
+ SOLO = 1
43
+ SUPERVISING = 2
44
+ NETWORK = 3
45
+ RANDOM = 4
46
+ HYBRID = 10
47
+ UNDEFINED = 0
48
+
49
+
41
50
  class TaskHandlingProcess(str, Enum):
42
51
  """
43
52
  Class representing the different processes that can be used to tackle multiple tasks.
@@ -47,25 +56,25 @@ class TaskHandlingProcess(str, Enum):
47
56
  consensual = "consensual"
48
57
 
49
58
 
50
- class TeamOutput(BaseModel):
59
+ class TeamOutput(TaskOutput):
51
60
  """
52
- Store outputs of the tasks handled by the team.
53
- `json_dict` and `raw` store overall output of tasks that handled by the team,
54
- while `task_output_list` stores each TaskOutput instance to the tasks handled by the team members.
55
- Note that `raw` and `json_dict` will be prioritized as TeamOutput to refer over `task_output_list`.
61
+ A class to store output from the team, inherited from TaskOutput class.
56
62
  """
57
63
 
58
64
  team_id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True, description="store the team ID that generate the TeamOutput")
59
- raw: str = Field(default="", description="raw output of the team lead task handled by the team leader")
60
- pydantic: Optional[Any] = Field(default=None, description="`raw` converted to the abs. pydantic model")
61
- json_dict: Dict[str, Any] = Field(default=None, description="`raw` converted to dictionary")
62
- task_output_list: list[TaskOutput] = Field(default=list, description="store output of all the tasks that the team has executed")
65
+ task_description: str = Field(default=None, description="store initial request (task description) from the client")
66
+ task_outputs: list[TaskOutput] = Field(default=list, description="store outputs of all tasks that the team has executed")
63
67
  token_usage: UsageMetrics = Field(default=dict, description="processed token summary")
64
68
 
69
+
70
+ def return_all_task_outputs(self) -> List[Dict[str, Any]]:
71
+ res = [output.json_dict for output in self.task_outputs]
72
+ return res
73
+
74
+
65
75
  def __str__(self):
66
76
  return (str(self.pydantic) if self.pydantic else str(self.json_dict) if self.json_dict else self.raw)
67
77
 
68
-
69
78
  def __getitem__(self, key):
70
79
  if self.pydantic and hasattr(self.pydantic, key):
71
80
  return getattr(self.pydantic, key)
@@ -75,35 +84,6 @@ class TeamOutput(BaseModel):
75
84
  raise KeyError(f"Key '{key}' not found in the team output.")
76
85
 
77
86
 
78
- @property
79
- def json(self) -> Optional[str]:
80
- if self.tasks_output[-1].output_format != TaskOutputFormat.JSON:
81
- raise ValueError(
82
- "No JSON output found in the final task. Please make sure to set the output_json property in the final task in your team."
83
- )
84
- return json.dumps(self.json_dict)
85
-
86
-
87
- def to_dict(self) -> Dict[str, Any]:
88
- """
89
- Convert pydantic / raw output into dict and return the dict.
90
- When we only have `raw` output, return `{ output: raw }` to avoid an error
91
- """
92
-
93
- output_dict = {}
94
- if self.json_dict:
95
- output_dict.update(self.json_dict)
96
- elif self.pydantic:
97
- output_dict.update(self.pydantic.model_dump())
98
- else:
99
- output_dict.upate({ "output": self.raw })
100
- return output_dict
101
-
102
-
103
- def return_all_task_outputs(self) -> List[Dict[str, Any]]:
104
- res = [output.json_dict for output in self.task_output_list]
105
- return res
106
-
107
87
 
108
88
  class TeamMember(BaseModel):
109
89
  """
@@ -134,6 +114,7 @@ class Team(BaseModel):
134
114
  id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
135
115
  name: Optional[str] = Field(default=None)
136
116
  members: List[TeamMember] = Field(default_factory=list)
117
+ formation: Optional[Formation] = Field(default=None)
137
118
 
138
119
  # formation planning
139
120
  planning_llm: Optional[Any] = Field(default=None, description="llm to generate formation")
@@ -324,7 +305,7 @@ class Team(BaseModel):
324
305
  raw=final_task_output.raw,
325
306
  json_dict=final_task_output.json_dict,
326
307
  pydantic=final_task_output.pydantic,
327
- task_output_list=task_outputs,
308
+ task_outputs=task_outputs,
328
309
  token_usage=token_usage,
329
310
  )
330
311
 
@@ -406,9 +387,9 @@ class Team(BaseModel):
406
387
  return self._create_team_output(task_outputs, lead_task_output)
407
388
 
408
389
 
409
- def kickoff(self, kwargs_before: Optional[Dict[str, str]] = None, kwargs_after: Optional[Dict[str, Any]] = None) -> TeamOutput:
390
+ def launch(self, kwargs_before: Optional[Dict[str, str]] = None, kwargs_after: Optional[Dict[str, Any]] = None) -> TeamOutput:
410
391
  """
411
- Kickoff the team:
392
+ Confirm and launch the formation - execute tasks and record outputs.
412
393
  0. Assign an agent to a task - using conditions (manager prioritizes team_tasks) and planning_llm.
413
394
  1. Address `before_kickoff_callbacks` if any.
414
395
  2. Handle team members' tasks in accordance with the process.
@@ -8,7 +8,11 @@ load_dotenv(override=True)
8
8
 
9
9
  class TeamPlanner:
10
10
  """
11
- Assign agents to multiple tasks.
11
+ A class to handle agent formations based on the given task description.
12
+ 1) complexity
13
+ 2) agent built (or use given agents)
14
+ 3) knowledge, memory sharing
15
+ 4) form a team
12
16
  """
13
17
 
14
18
  from versionhq.task.model import Task, ResponseField, TaskOutput
@@ -59,7 +63,6 @@ class TeamPlanner:
59
63
  return new_member_list
60
64
 
61
65
 
62
-
63
66
  def _handle_task_planning(self, context: Optional[str] = None, tools: Optional[str] = None) -> TaskOutput:
64
67
  """
65
68
  Handles the team planning by creating detailed step-by-step plans for each task.
versionhq/tool/model.py CHANGED
@@ -2,7 +2,7 @@ from abc import ABC, abstractmethod
2
2
  from inspect import signature
3
3
  from typing import Any, Dict, Callable, Type, Optional, get_args, get_origin, get_type_hints
4
4
  from typing_extensions import Self
5
- from pydantic import InstanceOf, BaseModel, ConfigDict, Field, field_validator, model_validator, PrivateAttr, create_model
5
+ from pydantic import InstanceOf, BaseModel, Field, field_validator, model_validator, PrivateAttr, create_model
6
6
  from pydantic_core import PydanticCustomError
7
7
 
8
8
  from versionhq.llm.llm_vars import SchemaType