versionhq 1.2.1.15__py3-none-any.whl → 1.2.1.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
versionhq/__init__.py CHANGED
@@ -27,11 +27,11 @@ from versionhq.tool.composio_tool import ComposioHandler
27
27
  from versionhq.memory.contextual_memory import ContextualMemory
28
28
  from versionhq.memory.model import ShortTermMemory,LongTermMemory, UserMemory, MemoryItem
29
29
 
30
- from versionhq.task.formation import form_agent_network
30
+ from versionhq.agent_network.formation import form_agent_network
31
31
  from versionhq.task_graph.draft import workflow
32
32
 
33
33
 
34
- __version__ = "1.2.1.15"
34
+ __version__ = "1.2.1.17"
35
35
  __all__ = [
36
36
  "Agent",
37
37
 
@@ -10,7 +10,7 @@ vhq_client_manager = Agent(
10
10
  role="vhq-Client Manager",
11
11
  goal="Efficiently communicate with the client on the task progress",
12
12
  llm=DEFAULT_MODEL_NAME,
13
- use_memory=True,
13
+ with_memory=True,
14
14
  )
15
15
 
16
16
 
@@ -21,7 +21,7 @@ vhq_task_evaluator = Agent(
21
21
  llm_config=dict(top_p=0.8, top_k=30, max_tokens=5000, temperature=0.9),
22
22
  maxit=1,
23
23
  max_retry_limit=1,
24
- use_memory=True # refer past eval records of similar tasks
24
+ with_memory=True # refer past eval records of similar tasks
25
25
  )
26
26
 
27
27
 
versionhq/agent/model.py CHANGED
@@ -60,7 +60,6 @@ class Agent(BaseModel):
60
60
  """
61
61
 
62
62
  __hash__ = object.__hash__
63
- _logger: Logger = PrivateAttr(default_factory=lambda: Logger(verbose=True))
64
63
  _rpm_controller: Optional[RPMController] = PrivateAttr(default=None)
65
64
  _request_within_rpm_limit: Any = PrivateAttr(default=None)
66
65
  _token_process: TokenProcess = PrivateAttr(default_factory=TokenProcess)
@@ -76,39 +75,38 @@ class Agent(BaseModel):
76
75
 
77
76
  # knowledge
78
77
  knowledge_sources: Optional[List[BaseKnowledgeSource | Any]] = Field(default=None)
78
+ embedder_config: Optional[Dict[str, Any]] = Field(default=None, description="embedder configuration for knowledge sources")
79
79
  _knowledge: Optional[Knowledge] = PrivateAttr(default=None)
80
80
 
81
81
  # memory
82
- use_memory: bool = Field(default=False, description="whether to store/use memory when executing the task")
83
- memory_config: Optional[Dict[str, Any]] = Field(default=None, description="configuration for the memory. need to store user_id for UserMemory")
82
+ with_memory: bool = Field(default=False, description="whether to use memories during the task execution")
83
+ memory_config: Optional[Dict[str, Any]] = Field(default=None, description="memory config. needs to store user_id for UserMemory to work")
84
84
  short_term_memory: Optional[InstanceOf[ShortTermMemory]] = Field(default=None)
85
85
  long_term_memory: Optional[InstanceOf[LongTermMemory]] = Field(default=None)
86
86
  user_memory: Optional[InstanceOf[UserMemory]] = Field(default=None)
87
- embedder_config: Optional[Dict[str, Any]] = Field(default=None, description="embedder configuration for the agent's knowledge")
88
87
 
89
88
  # prompting
90
89
  use_developer_prompt: Optional[bool] = Field(default=True, description="Use developer prompt when calling the llm")
91
- developer_propmt_template: Optional[str] = Field(default=None, description="ddeveloper prompt template")
92
- user_prompt_template: Optional[str] = Field(default=None, description="user prompt template")
90
+ developer_propmt_template: Optional[str] = Field(default=None, description="abs. file path to developer prompt template")
91
+ user_prompt_template: Optional[str] = Field(default=None, description="abs. file path to user prompt template")
93
92
 
94
93
  # task execution rules
95
- network: Optional[List[Any]] = Field(default=None, description="store a list of agent networks that the agent belong as a member")
96
- allow_delegation: bool = Field(default=False,description="if the agent can delegate the task to another agent or ask some help")
97
- max_retry_limit: int = Field(default=2 ,description="max. number of retry for the task execution when an error occurs")
98
- maxit: Optional[int] = Field(default=25,description="max. number of total optimization loops conducted when an error occurs")
94
+ networks: Optional[List[Any]] = Field(default_factory=list, description="store a list of agent networks that the agent belong as a member")
95
+ allow_delegation: bool = Field(default=False, description="whether to delegate the task to another agent")
96
+ max_retry_limit: int = Field(default=2, description="max. number of task retries when an error occurs")
97
+ maxit: Optional[int] = Field(default=25, description="max. number of total optimization loops conducted when an error occurs")
99
98
  callbacks: Optional[List[Callable]] = Field(default_factory=list, description="callback functions to execute after any task execution")
100
99
 
101
100
  # llm settings cascaded to the LLM model
102
101
  llm: str | InstanceOf[LLM] | Dict[str, Any] = Field(default=None)
103
- function_calling_llm: str | InstanceOf[LLM] | Dict[str, Any] = Field(default=None)
104
- respect_context_window: bool = Field(default=True,description="Keep messages under the context window size by summarizing content")
105
- max_tokens: Optional[int] = Field(default=None, description="max. number of tokens for the agent's execution")
106
- max_execution_time: Optional[int] = Field(default=None, description="max. execution time for an agent to execute a task")
102
+ func_calling_llm: str | InstanceOf[LLM] | Dict[str, Any] = Field(default=None)
103
+ respect_context_window: bool = Field(default=True,description="keep messages under the context window size")
104
+ max_execution_time: Optional[int] = Field(default=None, description="max. task execution time in seconds")
107
105
  max_rpm: Optional[int] = Field(default=None, description="max. number of requests per minute")
108
106
  llm_config: Optional[Dict[str, Any]] = Field(default=None, description="other llm config cascaded to the LLM model")
109
107
 
110
- # cache, error, ops handling
111
- formatting_errors: int = Field(default=0, description="number of formatting errors.")
108
+ # # cache, error, ops handling
109
+ # formatting_errors: int = Field(default=0, description="number of formatting errors.")
112
110
 
113
111
 
114
112
  @field_validator("id", mode="before")
@@ -136,18 +134,18 @@ class Agent(BaseModel):
136
134
  @model_validator(mode="after")
137
135
  def set_up_llm(self) -> Self:
138
136
  """
139
- Set up `llm` and `function_calling_llm` as valid LLM objects using the given kwargs.
137
+ Set up `llm` and `func_calling_llm` as valid LLM objects using the given kwargs.
140
138
  """
141
139
  self.llm = self._convert_to_llm_object(llm=self.llm)
142
140
 
143
- function_calling_llm = self.function_calling_llm if self.function_calling_llm else self.llm if self.llm else None
144
- function_calling_llm = self._convert_to_llm_object(llm=function_calling_llm)
145
- if function_calling_llm._supports_function_calling():
146
- self.function_calling_llm = function_calling_llm
141
+ func_calling_llm = self.func_calling_llm if self.func_calling_llm else self.llm if self.llm else None
142
+ func_calling_llm = self._convert_to_llm_object(llm=func_calling_llm)
143
+ if func_calling_llm._supports_function_calling():
144
+ self.func_calling_llm = func_calling_llm
147
145
  elif self.llm._supports_function_calling():
148
- self.function_calling_llm = self.llm
146
+ self.func_calling_llm = self.llm
149
147
  else:
150
- self.function_calling_llm = self._convert_to_llm_object(llm=LLM(model=DEFAULT_MODEL_NAME))
148
+ self.func_calling_llm = self._convert_to_llm_object(llm=LLM(model=DEFAULT_MODEL_NAME))
151
149
  return self
152
150
 
153
151
 
@@ -179,7 +177,7 @@ class Agent(BaseModel):
179
177
  model_name = (getattr(self.llm, "model_name") or getattr(self.llm, "deployment_name") or str(self.llm))
180
178
  llm_obj = LLM(model=model_name if model_name else DEFAULT_MODEL_NAME)
181
179
  llm_params = {
182
- "max_tokens": (getattr(llm, "max_tokens") or self.max_tokens or 3000),
180
+ "max_tokens": (getattr(llm, "max_tokens") or 3000),
183
181
  "timeout": getattr(llm, "timeout", self.max_execution_time),
184
182
  "callbacks": getattr(llm, "callbacks", None),
185
183
  "temperature": getattr(llm, "temperature", None),
@@ -222,7 +220,7 @@ class Agent(BaseModel):
222
220
 
223
221
 
224
222
  llm.timeout = self.max_execution_time if llm.timeout is None else llm.timeout
225
- llm.max_tokens = self.max_tokens if self.max_tokens else llm.max_tokens
223
+ # llm.max_tokens = self.max_tokens if self.max_tokens else llm.max_tokens
226
224
 
227
225
  if llm.provider is None:
228
226
  provider_name = llm.model.split("/")[0]
@@ -262,7 +260,7 @@ class Agent(BaseModel):
262
260
  tool_list.append(item)
263
261
 
264
262
  else:
265
- self._logger.log(level="error", message=f"Tool {str(item)} is missing a function.", color="red")
263
+ Logger().log(level="error", message=f"Tool {str(item)} is missing a function.", color="red")
266
264
  raise PydanticCustomError("invalid_tool", f"The tool {str(item)} is missing a function.", {})
267
265
 
268
266
  self.tools = tool_list
@@ -346,7 +344,7 @@ class Agent(BaseModel):
346
344
  self._knowledge = Knowledge(sources=knowledge_sources, embedder_config=self.embedder_config, collection_name=collection_name)
347
345
 
348
346
  except:
349
- self._logger.log(level="warning", message="We cannot find the format for the source. Add BaseKnowledgeSource objects instead.", color="yellow")
347
+ Logger().log(level="warning", message="We cannot find the format for the source. Add BaseKnowledgeSource objects instead.", color="yellow")
350
348
 
351
349
  return self
352
350
 
@@ -357,7 +355,7 @@ class Agent(BaseModel):
357
355
  Set up memories: stm, ltm, and um
358
356
  """
359
357
 
360
- # if self.use_memory == True:
358
+ # if self.with_memory == True:
361
359
  self.long_term_memory = self.long_term_memory if self.long_term_memory else LongTermMemory()
362
360
  self.short_term_memory = self.short_term_memory if self.short_term_memory else ShortTermMemory(agent=self, embedder_config=self.embedder_config)
363
361
 
@@ -371,21 +369,13 @@ class Agent(BaseModel):
371
369
  return self
372
370
 
373
371
 
374
- def _train(self) -> Self:
375
- """
376
- Fine-tuned the base model using OpenAI train framework.
377
- """
378
- if not isinstance(self.llm, LLM):
379
- pass
380
-
381
-
382
- def update_llm(self, llm: Any = None, llm_config: Optional[Dict[str, Any]] = None) -> Self:
372
+ def _update_llm(self, llm: Any = None, llm_config: Optional[Dict[str, Any]] = None) -> Self:
383
373
  """
384
374
  Update llm and llm_config of the exsiting agent. (Other conditions will remain the same.)
385
375
  """
386
376
 
387
377
  if not llm and not llm_config:
388
- self._logger.log(level="error", message="Missing llm or llm_config values to update", color="red")
378
+ Logger().log(level="error", message="Missing llm or llm_config values to update", color="red")
389
379
  pass
390
380
 
391
381
  self.llm = llm
@@ -398,59 +388,15 @@ class Agent(BaseModel):
398
388
  return self.set_up_llm()
399
389
 
400
390
 
401
- def update(self, **kwargs) -> Self:
391
+ def _train(self) -> Self:
402
392
  """
403
- Update the existing agent. Address variables that require runnning set_up_x methods first, then update remaining variables.
393
+ Fine-tuned the base model using OpenAI train framework.
404
394
  """
405
-
406
- if not kwargs:
407
- self._logger.log(level="error", message="Missing values to update", color="red")
408
- return self
409
-
410
- for k, v in kwargs.items():
411
- match k:
412
- case "tools":
413
- self.tools = kwargs.get(k, self.tools)
414
- self.set_up_tools()
415
-
416
- case "role" | "goal":
417
- self.role = kwargs.get("role", self.role)
418
- self.goal = kwargs.get("goal", self.goal)
419
- if not self.backstory:
420
- self.set_up_backstory()
421
-
422
- if self.backstory:
423
- self.backstory += f"new role: {self.role}, new goal: {self.goal}"
424
-
425
- case "max_rpm":
426
- self.max_rpm = kwargs.get(k, self.max_rpm)
427
- self.set_up_rpm()
428
-
429
- case "knowledge_sources":
430
- self.knowledge_sources = kwargs.get("knowledge_sources", self.knowledge_sources)
431
- self.set_up_knowledge()
432
-
433
- case "use_memory" | "memory_config":
434
- self.use_memory = kwargs.get("use_memory", self.use_memory)
435
- self.memory_config = kwargs.get("memory_config", self.memory_config)
436
- self.set_up_memory()
437
-
438
- case "llm" | "llm_config":
439
- self.llm = kwargs.get("llm", self.llm)
440
- self.llm_config = kwargs.get("llm_config", self.llm_config)
441
- self.update_llm(llm=self.llm, llm_config=self.llm_config)
442
-
443
- case _:
444
- try:
445
- setattr(self, k, v)
446
- except Exception as e:
447
- self._logger.log(level="error", message=f"Failed to update the key: {k} We'll skip. Error: {str(e)}", color="red")
448
- pass
449
-
450
- return self
395
+ if not isinstance(self.llm, LLM):
396
+ pass
451
397
 
452
398
 
453
- def invoke(
399
+ def _invoke(
454
400
  self,
455
401
  prompts: str,
456
402
  response_format: Optional[Dict[str, Any]] = None,
@@ -477,21 +423,21 @@ class Agent(BaseModel):
477
423
  if self._rpm_controller and self.max_rpm:
478
424
  self._rpm_controller.check_or_wait()
479
425
 
480
- self._logger.log(level="info", message=f"Messages sent to the model: {messages}", color="blue")
426
+ Logger().log(level="info", message=f"Messages sent to the model: {messages}", color="blue")
481
427
 
482
428
  if tool_res_as_final:
483
- raw_response = self.function_calling_llm.call(messages=messages, tools=tools, tool_res_as_final=True)
484
- task.tokens = self.function_calling_llm._tokens
429
+ raw_response = self.func_calling_llm.call(messages=messages, tools=tools, tool_res_as_final=True)
430
+ task.tokens = self.func_calling_llm._tokens
485
431
  else:
486
432
  raw_response = self.llm.call(messages=messages, response_format=response_format, tools=tools)
487
433
  task.tokens = self.llm._tokens
488
434
 
489
435
  task_execution_counter += 1
490
- self._logger.log(level="info", message=f"Agent response: {raw_response}", color="green")
436
+ Logger().log(level="info", message=f"Agent response: {raw_response}", color="green")
491
437
  return raw_response
492
438
 
493
439
  except Exception as e:
494
- self._logger.log(level="error", message=f"An error occured. The agent will retry: {str(e)}", color="red")
440
+ Logger().log(level="error", message=f"An error occured. The agent will retry: {str(e)}", color="red")
495
441
 
496
442
  while not raw_response and task_execution_counter <= self.max_retry_limit:
497
443
  while (not raw_response or raw_response == "" or raw_response is None) and iterations < self.maxit:
@@ -503,14 +449,89 @@ class Agent(BaseModel):
503
449
  iterations += 1
504
450
 
505
451
  task_execution_counter += 1
506
- self._logger.log(level="info", message=f"Agent #{task_execution_counter} response: {raw_response}", color="green")
452
+ Logger().log(level="info", message=f"Agent #{task_execution_counter} response: {raw_response}", color="green")
507
453
  return raw_response
508
454
 
509
455
  if not raw_response:
510
- self._logger.log(level="error", message="Received None or empty response from the model", color="red")
456
+ Logger().log(level="error", message="Received None or empty response from the model", color="red")
511
457
  raise ValueError("Invalid response from LLM call - None or empty.")
512
458
 
513
459
 
460
+ def update(self, **kwargs) -> Self:
461
+ """
462
+ Update the existing agent. Address variables that require runnning set_up_x methods first, then update remaining variables.
463
+ """
464
+
465
+ if not kwargs:
466
+ Logger().log(level="error", message="Missing values to update", color="red")
467
+ return self
468
+
469
+ for k, v in kwargs.items():
470
+ match k:
471
+ case "tools":
472
+ self.tools = kwargs.get(k, self.tools)
473
+ self.set_up_tools()
474
+
475
+ case "role" | "goal":
476
+ self.role = kwargs.get("role", self.role)
477
+ self.goal = kwargs.get("goal", self.goal)
478
+ if not self.backstory:
479
+ self.set_up_backstory()
480
+
481
+ if self.backstory:
482
+ self.backstory += f"new role: {self.role}, new goal: {self.goal}"
483
+
484
+ case "max_rpm":
485
+ self.max_rpm = kwargs.get(k, self.max_rpm)
486
+ self.set_up_rpm()
487
+
488
+ case "knowledge_sources":
489
+ self.knowledge_sources = kwargs.get("knowledge_sources", self.knowledge_sources)
490
+ self.set_up_knowledge()
491
+
492
+ case "with_memory" | "memory_config":
493
+ self.with_memory = kwargs.get("with_memory", self.with_memory)
494
+ self.memory_config = kwargs.get("memory_config", self.memory_config)
495
+ self.set_up_memory()
496
+
497
+ case "llm" | "llm_config":
498
+ self.llm = kwargs.get("llm", self.llm)
499
+ self.llm_config = kwargs.get("llm_config", self.llm_config)
500
+ self._update_llm(llm=self.llm, llm_config=self.llm_config)
501
+
502
+ case _:
503
+ try:
504
+ setattr(self, k, v)
505
+ except Exception as e:
506
+ Logger().log(level="error", message=f"Failed to update the field: {k} We'll skip it. Error: {str(e)}", color="red")
507
+ pass
508
+
509
+ return self
510
+
511
+
512
+ def start(self, context: Any = None, tool_res_as_final: bool = False) -> Any | None:
513
+ """
514
+ Defines and executes a task when it is not given and returns TaskOutput object.
515
+ """
516
+
517
+ if not self.goal or not self.role:
518
+ return None
519
+
520
+ from versionhq.task.model import Task
521
+
522
+ class Output(BaseModel):
523
+ result: str
524
+ steps: list[str]
525
+
526
+ task = Task(
527
+ description=f"Generate a simple result in a sentence to achieve the goal: {self.goal}. If needed, list up necessary steps in concise manner.",
528
+ pydantic_output=Output,
529
+ tool_res_as_final=tool_res_as_final,
530
+ )
531
+ res = task.execute(agent=self, context=context)
532
+ return res
533
+
534
+
514
535
  def execute_task(self, task, context: Optional[Any] = None, task_tools: Optional[List[Tool | ToolSet]] = list()) -> str:
515
536
  """
516
537
  Format a task prompt, adding context from knowledge and memory (if given), and invoke LLM.
@@ -534,7 +555,7 @@ class Agent(BaseModel):
534
555
  if agent_knowledge_context:
535
556
  task_prompt += agent_knowledge_context
536
557
 
537
- if self.use_memory == True:
558
+ if self.with_memory == True:
538
559
  contextual_memory = ContextualMemory(
539
560
  memory_config=self.memory_config, stm=self.short_term_memory, ltm=self.long_term_memory, um=self.user_memory
540
561
  )
@@ -546,14 +567,14 @@ class Agent(BaseModel):
546
567
 
547
568
 
548
569
  ## comment out for now
549
- # if self.network and self.network._train:
570
+ # if self.networks and self.networks._train:
550
571
  # task_prompt = self._training_handler(task_prompt=task_prompt)
551
572
  # else:
552
573
  # task_prompt = self._use_trained_data(task_prompt=task_prompt)
553
574
 
554
575
  try:
555
576
  self._times_executed += 1
556
- raw_response = self.invoke(
577
+ raw_response = self._invoke(
557
578
  prompts=task_prompt,
558
579
  response_format=task._structure_response_format(model_provider=self.llm.provider),
559
580
  tools=tools,
@@ -563,11 +584,11 @@ class Agent(BaseModel):
563
584
 
564
585
  except Exception as e:
565
586
  self._times_executed += 1
566
- self._logger.log(level="error", message=f"The agent failed to execute the task. Error: {str(e)}", color="red")
587
+ Logger().log(level="error", message=f"The agent failed to execute the task. Error: {str(e)}", color="red")
567
588
  raw_response = self.execute_task(task, context, task_tools)
568
589
 
569
590
  if self._times_executed > self.max_retry_limit:
570
- self._logger.log(level="error", message=f"Max retry limit has exceeded.", color="red")
591
+ Logger().log(level="error", message=f"Max retry limit has exceeded.", color="red")
571
592
  raise e
572
593
 
573
594
  if self.max_rpm and self._rpm_controller:
@@ -576,27 +597,5 @@ class Agent(BaseModel):
576
597
  return raw_response
577
598
 
578
599
 
579
- def start(self, context: Any = None) -> Any | None:
580
- """
581
- Defines and executes a task when it is not given and returns TaskOutput object.
582
- """
583
-
584
- if not self.goal or not self.role:
585
- return None
586
-
587
- from versionhq.task.model import Task, ResponseField
588
-
589
- class Output(BaseModel):
590
- result: str
591
- steps: list[str]
592
-
593
- task = Task(
594
- description=f"Generate a simple result in a sentence to achieve the goal: {self.goal}. If needed, list up necessary steps in concise manner.",
595
- pydantic_output=Output
596
- )
597
- res = task.execute(agent=self, context=context)
598
- return res
599
-
600
-
601
600
  def __repr__(self):
602
601
  return f"Agent(role={self.role}, goal={self.goal}"
@@ -0,0 +1,157 @@
1
+ from typing import List, Type
2
+ from enum import Enum
3
+
4
+ from pydantic import BaseModel, create_model, Field
5
+
6
+ from versionhq.task.model import Task
7
+ from versionhq.agent.model import Agent
8
+ from versionhq.agent_network.model import AgentNetwork, Member, Formation
9
+ from versionhq.agent.inhouse_agents import vhq_formation_planner
10
+ from versionhq._utils import Logger
11
+
12
+
13
+ def form_agent_network(
14
+ task: str,
15
+ expected_outcome: str | Type[BaseModel],
16
+ agents: List[Agent] = None,
17
+ context: str = None,
18
+ formation: Type[Formation] = None
19
+ ) -> AgentNetwork | None:
20
+ """
21
+ Make a formation of agents from the given task description, expected outcome, agents (optional), and context (optional).
22
+ """
23
+
24
+ if not task:
25
+ Logger(verbose=True).log(level="error", message="Missing task description.", color="red")
26
+ return None
27
+
28
+ if not expected_outcome:
29
+ Logger(verbose=True).log(level="error", message="Missing expected outcome.", color="red")
30
+ return None
31
+
32
+ if formation:
33
+ try:
34
+ match formation:
35
+ case Formation():
36
+ pass
37
+
38
+ case str():
39
+ matched = [item for item in Formation.s_ if item == formation.upper()]
40
+ if matched:
41
+ formation = getattr(Formation, matched[0])
42
+ else:
43
+ # Formation._generate_next_value_(name=f"CUSTOM_{formation.upper()}", start=100, count=6, last_values=Formation.HYBRID.name)
44
+ Logger(verbose=True).log(level="warning", message=f"The formation {formation} is invalid. We'll recreate a valid formation.", color="yellow")
45
+ formation = None
46
+
47
+ case int() | float():
48
+ formation = Formation(int(formation))
49
+
50
+ case _:
51
+ Logger(verbose=True).log(level="warning", message=f"The formation {formation} is invalid. We'll recreate a valid formation.", color="yellow")
52
+ formation = None
53
+
54
+ except Exception as e:
55
+ Logger(verbose=True).log(level="warning", message=f"The formation {formation} is invalid: {str(e)}. We'll recreate a formation.", color="yellow")
56
+ formation = None
57
+
58
+ # try:
59
+ prompt_formation = formation.name if formation and isinstance(formation, Formation) else f"Select the best formation to effectively execute the tasks from the given Enum sets: {str(Formation.__dict__)}."
60
+
61
+ prompt_expected_outcome = expected_outcome if isinstance(expected_outcome, str) else expected_outcome.model_dump_json()
62
+
63
+ class Outcome(BaseModel):
64
+ formation: Enum
65
+ agent_roles: list[str]
66
+ task_descriptions: list[str]
67
+ task_outcomes: list[list[str]]
68
+ leader_agent: str
69
+
70
+ vhq_task = Task(
71
+ description=f"Design a team of specialized agents to fully automate the following task and achieve the expected outcome. For each agent, define its role, task description, and expected outputs via the task with items in a list. Then specify the team formation if the formation is not given. If you think SUPERVISING or HYBRID is the best formation, include a leader_agent role, else leave the leader_agent role blank.\nTask: {str(task)}\nExpected outcome: {prompt_expected_outcome}\nFormation: {prompt_formation}",
72
+ pydantic_output=Outcome
73
+ )
74
+
75
+ if agents:
76
+ vhq_task.description += "Consider adding following agents in the formation: " + ", ".join([agent.role for agent in agents if isinstance(agent, Agent)])
77
+
78
+ res = vhq_task.execute(agent=vhq_formation_planner, context=context)
79
+
80
+ formation_keys = ([k for k in Formation._member_map_.keys() if k == res.pydantic.formation.upper()]
81
+ if res.pydantic else [k for k in Formation._member_map_.keys() if k == res.json_dict["formation"].upper()])
82
+ _formation = Formation[formation_keys[0]] if formation_keys else Formation.SUPERVISING
83
+
84
+ network_tasks = []
85
+ members = []
86
+ leader = str(res.pydantic.leader_agent) if res.pydantic else str(res.json_dict["leader_agent"])
87
+
88
+ created_agents = [Agent(role=item, goal=item) for item in res.pydantic.agent_roles]
89
+ created_tasks = []
90
+
91
+ if res.pydantic:
92
+ for i, item in enumerate(res.pydantic.task_outcomes):
93
+ if len(res.pydantic.task_descriptions) > i and res.pydantic.task_descriptions[i]:
94
+ fields = {}
95
+ for ob in item:
96
+ try:
97
+ field_name = str(ob).lower().replace(" ", "_").replace(":", "_")[0: 10]
98
+ fields[field_name] = (str, Field(default=None))
99
+ except:
100
+ pass
101
+ output = create_model("Output", **fields) if fields else None
102
+ _task = Task(description=res.pydantic.task_descriptions[i], pydantic_output=output)
103
+ created_tasks.append(_task)
104
+
105
+ elif res.json_dict:
106
+ for i, item in enumerate(res["task_outcomes"]):
107
+ if len(res["task_descriptions"]) > i and res["task_descriptions"][i]:
108
+ fields = {}
109
+ for ob in item:
110
+ try:
111
+ field_name = str(ob).lower().replace(" ", "_").replace(":", "_")[0: 10]
112
+ fields[field_name] = (str, Field(default=None))
113
+ except:
114
+ pass
115
+ output = create_model("Output", **fields) if fields else None
116
+ _task = Task(description=res["task_descriptions"][i], pydantic_output=output)
117
+ created_tasks.append(_task)
118
+
119
+
120
+ if len(created_tasks) <= len(created_agents):
121
+ for i in range(len(created_tasks)):
122
+ is_manager = bool(created_agents[i].role.lower() == leader.lower())
123
+ member = Member(agent=created_agents[i], is_manager=is_manager, tasks=[created_tasks[i]])
124
+ members.append(member)
125
+
126
+ for i in range(len(created_tasks), len(created_agents)):
127
+ try:
128
+ is_manager = bool(created_agents[i].role.lower() == leader.lower())
129
+ member_w_o_task = Member(agent=created_agents[i], is_manager=is_manager)
130
+ members.append(member_w_o_task)
131
+ except:
132
+ pass
133
+
134
+ elif len(created_tasks) > len(created_agents):
135
+ for i in range(len(created_agents)):
136
+ is_manager = bool(created_agents[i].role.lower() == leader.lower())
137
+ member = Member(agent=created_agents[i], is_manager=is_manager, tasks=[created_tasks[i]])
138
+ members.append(member)
139
+
140
+ network_tasks.append(created_tasks[len(created_agents):len(created_tasks)])
141
+
142
+
143
+ if _formation == Formation.SUPERVISING and not [member for member in members if member.is_manager]:
144
+ manager = Member(agent=Agent(role=leader, goal=leader), is_manager=True)
145
+ members.append(manager)
146
+
147
+ members.sort(key=lambda x: x.is_manager == False)
148
+ network = AgentNetwork(members=members, formation=_formation, network_tasks=network_tasks)
149
+
150
+ Logger().log(level="info", message=f"Successfully created a agent network: {str(network.id)} with {len(network.members)} agents.", color="blue")
151
+
152
+ return network
153
+
154
+
155
+ # except Exception as e:
156
+ # Logger().log(level="error", message=f"Failed to create a agent network - return None. You can try with solo agent. Error: {str(e)}", color="red")
157
+ # return None
@@ -28,7 +28,6 @@ warnings.filterwarnings("ignore", category=SyntaxWarning, module="pysbd")
28
28
 
29
29
 
30
30
  class Formation(str, Enum):
31
- UNDEFINED = 0
32
31
  SOLO = 1
33
32
  SUPERVISING = 2
34
33
  SQUAD = 3
@@ -96,7 +95,6 @@ class AgentNetwork(BaseModel):
96
95
 
97
96
  __hash__ = object.__hash__
98
97
  _execution_span: Any = PrivateAttr()
99
- _logger: Logger = PrivateAttr(default_factory=lambda: Logger(verbose=True))
100
98
  _inputs: Optional[Dict[str, Any]] = PrivateAttr(default=None)
101
99
 
102
100
  id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
@@ -167,12 +165,12 @@ class AgentNetwork(BaseModel):
167
165
  """
168
166
  if self.process == TaskHandlingProcess.HIERARCHY or self.formation == Formation.SUPERVISING:
169
167
  if not self.managers:
170
- self._logger.log(level="error", message="The process or formation created needs at least 1 manager agent.", color="red")
168
+ Logger().log(level="error", message="The process or formation created needs at least 1 manager agent.", color="red")
171
169
  raise PydanticCustomError("missing_manager", "`manager` is required when using hierarchical process.", {})
172
170
 
173
171
  ## comment out for the formation flexibilities
174
172
  # if self.managers and (self.manager_tasks is None or self.network_tasks is None):
175
- # self._logger.log(level="error", message="The manager is idling. At least 1 task needs to be assigned to the manager.", color="red")
173
+ # Logger().log(level="error", message="The manager is idling. At least 1 task needs to be assigned to the manager.", color="red")
176
174
  # raise PydanticCustomError("missing_manager_task", "manager needs to have at least one manager task or network task.", {})
177
175
 
178
176
  return self
@@ -186,10 +184,11 @@ class AgentNetwork(BaseModel):
186
184
  if self.process == TaskHandlingProcess.SEQUENT and self.network_tasks is None:
187
185
  for task in self.tasks:
188
186
  if not [member.task == task for member in self.members]:
189
- self._logger.log(level="error", message=f"The following task needs a dedicated agent to be assinged: {task.description}", color="red")
187
+ Logger().log(level="error", message=f"The following task needs a dedicated agent to be assinged: {task.description}", color="red")
190
188
  raise PydanticCustomError("missing_agent_in_task", "Sequential process error: Agent is missing the task", {})
191
189
  return self
192
190
 
191
+
193
192
  @model_validator(mode="after")
194
193
  def validate_end_with_at_most_one_async_task(self):
195
194
  """
@@ -371,7 +370,7 @@ class AgentNetwork(BaseModel):
371
370
  task_outputs = self._process_async_tasks(futures, was_replayed)
372
371
 
373
372
  if not task_outputs:
374
- self._logger.log(level="error", message="Missing task outputs.", color="red")
373
+ Logger().log(level="error", message="Missing task outputs.", color="red")
375
374
  raise ValueError("Missing task outputs")
376
375
 
377
376
  final_task_output = lead_task_output if lead_task_output is not None else task_outputs[0] #! REFINEME
@@ -399,12 +398,12 @@ class AgentNetwork(BaseModel):
399
398
  self._assign_tasks()
400
399
 
401
400
  if kwargs_pre is not None:
402
- for func in self.pre_launch_callbacks:
401
+ for func in self.pre_launch_callbacks: # signature check
403
402
  func(**kwargs_pre)
404
403
 
405
404
  for member in self.members:
406
405
  agent = member.agent
407
- agent.network = self
406
+ agent.networks.append(self)
408
407
 
409
408
  if self.step_callback:
410
409
  agent.callbacks.append(self.step_callback)
versionhq/task/model.py CHANGED
@@ -281,7 +281,7 @@ class Task(BaseModel):
281
281
 
282
282
  # executing
283
283
  execution_type: TaskExecutionType = Field(default=TaskExecutionType.SYNC)
284
- allow_delegation: bool = Field(default=False, description="ask other agents for help and run the task instead")
284
+ allow_delegation: bool = Field(default=False, description="whether to delegate the task to another agent")
285
285
  callback: Optional[Callable] = Field(default=None, description="callback to be executed after the task is completed.")
286
286
  callback_kwargs: Optional[Dict[str, Any]] = Field(default_factory=dict, description="kwargs for the callback when the callback is callable")
287
287
 
@@ -530,9 +530,9 @@ Ref. Output image: {output_formats_to_follow}
530
530
  from versionhq.agent.model import Agent
531
531
  from versionhq.memory.model import ShortTermMemory, MemoryMetadata, LongTermMemory
532
532
 
533
- agent = agent if isinstance(agent, Agent) else Agent(role=str(agent), goal=str(agent), use_memory=True)
533
+ agent = agent if isinstance(agent, Agent) else Agent(role=str(agent), goal=str(agent), with_memory=True)
534
534
 
535
- if agent.use_memory == False:
535
+ if agent.with_memory == False:
536
536
  return None
537
537
 
538
538
  try:
@@ -574,6 +574,36 @@ Ref. Output image: {output_formats_to_follow}
574
574
  return agent
575
575
 
576
576
 
577
+ def _select_agent_to_delegate(self, agent: Any = None) -> Any | None: # return agent object or None
578
+ """
579
+ Creates or selects an agent to delegate the given task and returns Agent object else None.
580
+ """
581
+
582
+ from versionhq.agent.model import Agent
583
+
584
+ if not self.allow_delegation:
585
+ return None
586
+
587
+ agent_to_delegate: InstanceOf[Agent] = None
588
+
589
+ if not agent:
590
+ agent_to_delegate = self._build_agent_from_task()
591
+
592
+ elif agent and not agent.networks:
593
+ agent_to_delegate = Agent(role="vhq-Delegated-Agent", goal=agent.goal, llm=agent.llm)
594
+
595
+ else:
596
+ _managers = []
597
+ _members = []
598
+ for network in agent.networks:
599
+ _managers.extend(member.agent for member in network.members if member.is_manager)
600
+ _members.extend(member.agent for member in network.members if not member.is_manager)
601
+
602
+ agent_to_delegate = _managers[0] if _managers else _members[0] if _members else Agent(role="vhq-Delegated-Agent", goal=agent.goal, llm=agent.llm)
603
+
604
+ return agent_to_delegate
605
+
606
+
577
607
  # task execution
578
608
  def execute(
579
609
  self, type: TaskExecutionType = None, agent: Optional["vhq.Agent"] = None, context: Optional[Any] = None
@@ -635,19 +665,7 @@ Ref. Output image: {output_formats_to_follow}
635
665
  task_tools.append(item)
636
666
 
637
667
  if self.allow_delegation == True:
638
- agent_to_delegate = None
639
-
640
- if hasattr(agent, "network") and isinstance(agent.network, AgentNetwork):
641
- if agent.network.managers:
642
- idling_manager_agents = [manager.agent for manager in agent.network.managers if manager.is_idling]
643
- agent_to_delegate = idling_manager_agents[0] if idling_manager_agents else agent.network.managers[0]
644
- else:
645
- peers = [member.agent for member in agent.network.members if member.is_manager == False and member.agent.id is not agent.id]
646
- if len(peers) > 0:
647
- agent_to_delegate = peers[0]
648
- else:
649
- agent_to_delegate = Agent(role="vhq-Delegated-Agent", goal=agent.goal, llm=agent.llm)
650
-
668
+ agent_to_delegate = self._select_agent_to_delegate(agent=agent)
651
669
  agent = agent_to_delegate
652
670
  self.delegations += 1
653
671
 
@@ -14,7 +14,7 @@ from versionhq.task_graph.model import TaskGraph, Task, DependencyType, Node
14
14
  from versionhq._utils.logger import Logger
15
15
 
16
16
 
17
- def workflow(final_output: Type[BaseModel], context: Any = None, human: bool = False, use_memory: bool = False) -> TaskGraph | None:
17
+ def workflow(final_output: Type[BaseModel], context: Any = None, human: bool = False, with_memory: bool = False) -> TaskGraph | None:
18
18
  """
19
19
  Generate a TaskGraph object to generate the givne final_output most resource-efficiently.
20
20
  """
@@ -43,7 +43,7 @@ def workflow(final_output: Type[BaseModel], context: Any = None, human: bool = F
43
43
  ", ".join([k for k in DependencyType._member_map_.keys()]),
44
44
  ],
45
45
  llm="gemini-2.0",
46
- use_memory=use_memory,
46
+ with_memory=with_memory,
47
47
  maxit=1,
48
48
  max_retry_limit=1,
49
49
  )
@@ -2,14 +2,13 @@ import os
2
2
  import uuid
3
3
  from abc import ABC
4
4
  from dotenv import load_dotenv
5
- from typing import Any, Callable, Type, get_args, get_origin, Optional, Tuple, Dict
5
+ from typing import Any, Optional, Tuple, Dict
6
6
  from typing_extensions import Self
7
7
 
8
8
  from pydantic import BaseModel, Field, model_validator, field_validator, UUID4, PrivateAttr
9
9
  from pydantic_core import PydanticCustomError
10
10
 
11
11
  from composio import ComposioToolSet
12
- from composio_langchain import action
13
12
 
14
13
  from versionhq.tool.composio_tool_vars import ComposioAppName, ComposioAuthScheme, composio_app_set, ComposioStatus, ComposioAction
15
14
  from versionhq.tool.cache_handler import CacheHandler
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: versionhq
3
- Version: 1.2.1.15
3
+ Version: 1.2.1.17
4
4
  Summary: An agentic orchestration framework for building agent networks that handle task automation.
5
5
  Author-email: Kuriko Iwai <kuriko@versi0n.io>
6
6
  License: MIT License
@@ -1,4 +1,4 @@
1
- versionhq/__init__.py,sha256=v49Zr8uA1uDecJ9sdBDmFy0oknWouJm4rn2oWP6QMQ8,2883
1
+ versionhq/__init__.py,sha256=nNrMhlfHaW9xgRJhnU9sHM4p9c5dL1n0UeAlVkTxwY4,2892
2
2
  versionhq/_utils/__init__.py,sha256=dzoZr4cBlh-2QZuPzTdehPUCe9lP1dmRtauD7qTjUaA,158
3
3
  versionhq/_utils/i18n.py,sha256=TwA_PnYfDLA6VqlUDPuybdV9lgi3Frh_ASsb_X8jJo8,1483
4
4
  versionhq/_utils/logger.py,sha256=zgogTwAY-ujDLrdryAKhdtoaNe1nOFajmEN0V8aMR34,3155
@@ -6,14 +6,15 @@ versionhq/_utils/process_config.py,sha256=jbPGXK2Kb4iyCugJ3FwRJuU0wL5Trq2x4xFQz2
6
6
  versionhq/_utils/usage_metrics.py,sha256=NXF18dn5NNvGK7EsQ4AAghpR8ppYOjMx6ABenLLHnmM,1066
7
7
  versionhq/_utils/vars.py,sha256=bZ5Dx_bFKlt3hi4-NNGXqdk7B23If_WaTIju2fiTyPQ,57
8
8
  versionhq/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
- versionhq/agent/inhouse_agents.py,sha256=snDtgDmvZB2bZKH_RTcz5uFOMl3MTjLJwTQBebFt8hk,2532
10
- versionhq/agent/model.py,sha256=eFmkNuCJhXRhdYJR59J5PFxw5aghLvBrPRj4NXd7J0w,25873
9
+ versionhq/agent/inhouse_agents.py,sha256=vupO1viYqVb7sKohIE1zThu6JArhh5JLo5LBeSnh0kM,2534
10
+ versionhq/agent/model.py,sha256=ixfYjUY8u6CAFGuaExV8cU1WFlBgNQPxr1UQuxPBSew,25604
11
11
  versionhq/agent/parser.py,sha256=riG0dkdQCxH7uJ0AbdVdg7WvL0BXhUgJht0VtQvxJBc,4082
12
12
  versionhq/agent/rpm_controller.py,sha256=grezIxyBci_lDlwAlgWFRyR5KOocXeOhYkgN02dNFNE,2360
13
13
  versionhq/agent/TEMPLATES/Backstory.py,sha256=IAhGnnt6VUMe3wO6IzeyZPDNu7XE7Uiu3VEXUreOcKs,532
14
14
  versionhq/agent/TEMPLATES/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
15
15
  versionhq/agent_network/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
- versionhq/agent_network/model.py,sha256=P3Ntr_OIK60EXjEfaEm31HTBFypZfVr__0aC4VBt9G0,19353
16
+ versionhq/agent_network/formation.py,sha256=nGUVX8Ljiq2mQ5BNXm17SP-kuCRCA87CucRy-QB-Zv0,7426
17
+ versionhq/agent_network/model.py,sha256=hjtYIopAN52nStcM6TlV0b6ulRMrmzKH7jIkzNmZHDE,19265
17
18
  versionhq/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
18
19
  versionhq/clients/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
19
20
  versionhq/clients/customer/__init__.py,sha256=-YXh1FQfvpfLacK8SUC7bD7Wx_eIEi4yrkCC_cUasFg,217
@@ -44,25 +45,24 @@ versionhq/storage/task_output_storage.py,sha256=E1t_Fkt78dPYIOl3MP7LfQ8oGtjlzxBu
44
45
  versionhq/storage/utils.py,sha256=ByYXPoEIGJYLUqz-DWjbCAnneNrH1otiYbp12SCILpM,747
45
46
  versionhq/task/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
46
47
  versionhq/task/evaluate.py,sha256=WdUgjbZL62XrxyWe5MTz29scfzwmuAHGxJ7GvAB8Fmk,3954
47
- versionhq/task/formation.py,sha256=WH604q9bRmWH7KQCrk2qKJwisCopYX5CjJvsj4TgFjI,6894
48
48
  versionhq/task/formatter.py,sha256=N8Kmk9vtrMtBdgJ8J7RmlKNMdZWSmV8O1bDexmCWgU0,643
49
49
  versionhq/task/log_handler.py,sha256=LT7YnO7gcPR9IZS7eRvMjnHh8crMBFtqduxd8dxIbkk,1680
50
- versionhq/task/model.py,sha256=EcUW7nktGBd9CBenKLh-5HBRa02wrwYF6WeF8ju04tc,28538
50
+ versionhq/task/model.py,sha256=KshCysteol3ggfotZMfFn192dMYALg8lvjiGpyLUVQA,28948
51
51
  versionhq/task/structured_response.py,sha256=4q-hQPu7oMMHHXEzh9YW4SJ7N5eCZ7OfZ65juyl_jCI,5000
52
52
  versionhq/task/TEMPLATES/Description.py,sha256=V-4kh8xpQTKOcDMi2xnuP-fcNk6kuoz1_5tYBlDLQWQ,420
53
53
  versionhq/task_graph/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
54
54
  versionhq/task_graph/colors.py,sha256=naJCx4Vho4iuJtbW8USUXb-M5uYvd5ds2p8qbjUfRus,669
55
- versionhq/task_graph/draft.py,sha256=gTEICHEoUwlka6TDc9Kw0pKnhdrKtNw5HKhkl9gjIYk,5128
55
+ versionhq/task_graph/draft.py,sha256=AuQ2X-T5xuQ2ipMiAqeh9Pjm6I2fIf952pBQRYqdaog,5131
56
56
  versionhq/task_graph/model.py,sha256=njyHQyHrVTZP46iVkC6YvuMnGcS40vOy1wszRtf7DHY,23971
57
57
  versionhq/tool/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
58
58
  versionhq/tool/cache_handler.py,sha256=iL8FH7X0G-cdT0uhJwzuhLDaadTXOdfybZcDy151-es,1085
59
- versionhq/tool/composio_tool.py,sha256=38mEiVvTkuw1BLD233Bl1Gwxbpss1yfQiZLTWwX6BdA,8648
59
+ versionhq/tool/composio_tool.py,sha256=IATfsEnF_1RPJyGtPBmAtEJh5XPcgDHpyG3SUR461Og,8572
60
60
  versionhq/tool/composio_tool_vars.py,sha256=FvBuEXsOQUYnN7RTFxT20kAkiEYkxWKkiVtgpqOzKZQ,1843
61
61
  versionhq/tool/decorator.py,sha256=C4ZM7Xi2gwtEMaSeRo-geo_g_MAkY77WkSLkAuY0AyI,1205
62
62
  versionhq/tool/model.py,sha256=PO4zNWBZcJhYVur381YL1dy6zqurio2jWjtbxOxZMGI,12194
63
63
  versionhq/tool/tool_handler.py,sha256=2m41K8qo5bGCCbwMFferEjT-XZ-mE9F0mDUOBkgivOI,1416
64
- versionhq-1.2.1.15.dist-info/LICENSE,sha256=cRoGGdM73IiDs6nDWKqPlgSv7aR4n-qBXYnJlCMHCeE,1082
65
- versionhq-1.2.1.15.dist-info/METADATA,sha256=bmi_7y_zYR9MIDpsD23IfdAGJ8YYs-ifo4xE1CJOFSE,22033
66
- versionhq-1.2.1.15.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
67
- versionhq-1.2.1.15.dist-info/top_level.txt,sha256=DClQwxDWqIUGeRJkA8vBlgeNsYZs4_nJWMonzFt5Wj0,10
68
- versionhq-1.2.1.15.dist-info/RECORD,,
64
+ versionhq-1.2.1.17.dist-info/LICENSE,sha256=cRoGGdM73IiDs6nDWKqPlgSv7aR4n-qBXYnJlCMHCeE,1082
65
+ versionhq-1.2.1.17.dist-info/METADATA,sha256=WenkpMZmDc-5XW_iq-8ksRqp3YTvXzAbr5sjEfw09BQ,22033
66
+ versionhq-1.2.1.17.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
67
+ versionhq-1.2.1.17.dist-info/top_level.txt,sha256=DClQwxDWqIUGeRJkA8vBlgeNsYZs4_nJWMonzFt5Wj0,10
68
+ versionhq-1.2.1.17.dist-info/RECORD,,
@@ -1,159 +0,0 @@
1
- from typing import List, Type
2
- from enum import Enum
3
-
4
- from pydantic import BaseModel
5
-
6
- from versionhq.task.model import Task
7
- from versionhq.agent.model import Agent
8
- from versionhq.agent_network.model import AgentNetwork, Member, Formation
9
- from versionhq.agent.inhouse_agents import vhq_formation_planner
10
- from versionhq._utils import Logger
11
-
12
-
13
- def form_agent_network(
14
- task: str,
15
- expected_outcome: str,
16
- agents: List[Agent] = None,
17
- context: str = None,
18
- formation: Type[Formation] = None
19
- ) -> AgentNetwork | None:
20
- """
21
- Make a formation of agents from the given task description, expected outcome, agents (optional), and context (optional).
22
- """
23
-
24
- if not task:
25
- Logger(verbose=True).log(level="error", message="Missing task description.", color="red")
26
- return None
27
-
28
- if not expected_outcome:
29
- Logger(verbose=True).log(level="error", message="Missing expected outcome.", color="red")
30
- return None
31
-
32
- if formation:
33
- try:
34
- match formation:
35
- case Formation():
36
- if formation == Formation.UNDEFINED:
37
- formation = None
38
- else:
39
- pass
40
-
41
- case str():
42
- matched = [item for item in Formation.s_ if item == formation.upper()]
43
- if matched:
44
- formation = getattr(Formation, matched[0])
45
- else:
46
- # Formation._generate_next_value_(name=f"CUSTOM_{formation.upper()}", start=100, count=6, last_values=Formation.HYBRID.name)
47
- Logger(verbose=True).log(level="warning", message=f"The formation {formation} is invalid. We'll recreate a valid formation.", color="yellow")
48
- formation = None
49
-
50
- case int() | float():
51
- formation = Formation(int(formation))
52
-
53
- case _:
54
- Logger(verbose=True).log(level="warning", message=f"The formation {formation} is invalid. We'll recreate a valid formation.", color="yellow")
55
- formation = None
56
-
57
- except Exception as e:
58
- Logger(verbose=True).log(level="warning", message=f"The formation {formation} is invalid: {str(e)}. We'll recreate a formation.", color="yellow")
59
- formation = None
60
-
61
- try:
62
- prompt_formation = formation.name if formation and isinstance(formation, Formation) else f"Select the best formation to effectively execute the tasks from the given Enum sets: {str(Formation.__dict__)}."
63
- class Outcome(BaseModel):
64
- formation: Enum
65
- agent_roles: list[str]
66
- task_descriptions: list[str]
67
- leader_agent: str
68
-
69
- vhq_task = Task(
70
- description=f"""
71
- Create a team of specialized agents designed to automate the following task and deliver the expected outcome. Consider the necessary roles for each agent with a clear task description. If you think we neeed a leader to handle the automation, return a leader_agent role as well, but if not, leave the a leader_agent role blank. When you have a leader_agent, the formation must be SUPERVISING or HYBRID.
72
- Task: {str(task)}
73
- Expected outcome: {str(expected_outcome)}
74
- Formation: {prompt_formation}
75
- """,
76
- pydantic_output=Outcome
77
- )
78
-
79
- if agents:
80
- vhq_task.description += "Consider adding following agents in the formation: " + ", ".join([agent.role for agent in agents if isinstance(agent, Agent)])
81
-
82
- res = vhq_task.execute(agent=vhq_formation_planner, context=context)
83
- _formation = Formation.SUPERVISING
84
-
85
-
86
- if res.pydantic:
87
- formation_keys = [k for k, v in Formation._member_map_.items() if k == res.pydantic.formation.upper()]
88
-
89
- if formation_keys:
90
- _formation = Formation[formation_keys[0]]
91
-
92
-
93
- network_tasks = []
94
- members = []
95
- leader = str(res.pydantic.leader_agent)
96
-
97
- created_agents = [Agent(role=item, goal=item) for item in res.pydantic.agent_roles]
98
- created_tasks = [Task(description=item) for item in res.pydantic.task_descriptions]
99
-
100
-
101
- for i in range(len(created_agents)):
102
- is_manager = bool(created_agents[i].role.lower() == leader.lower())
103
- member = Member(agent=created_agents[i], is_manager=is_manager)
104
-
105
- if len(created_tasks) >= i and created_tasks[i]:
106
- member.tasks.append(created_tasks[i])
107
- members.append(member)
108
-
109
-
110
- if len(created_agents) < len(created_tasks):
111
- network_tasks.extend(created_tasks[len(created_agents):len(created_tasks)])
112
-
113
- if _formation == Formation.SUPERVISING and not [member for member in members if member.is_manager]:
114
- manager = Member(agent=Agent(role=leader, goal=leader), is_manager=True)
115
- members.append(manager)
116
-
117
- members.sort(key=lambda x: x.is_manager == False)
118
- network = AgentNetwork(members=members, formation=_formation, network_tasks=network_tasks)
119
- return network
120
-
121
- else:
122
- res = res.json_dict
123
- formation_keys = [k for k, v in Formation._member_map_.items() if k == res["formation"].upper()]
124
-
125
- if formation_keys:
126
- _formation = Formation[formation_keys[0]]
127
-
128
- created_agents = [Agent(role=item, goal=item) for item in res["agent_roles"]]
129
- created_tasks = [Task(description=item) for item in res["task_descriptions"]]
130
-
131
- network_tasks = []
132
- members = []
133
- leader = str(res["leader_agent"])
134
-
135
- for i in range(len(created_agents)):
136
- is_manager = bool(created_agents[i].role.lower() == leader.lower())
137
- member = Member(agent=created_agents[i], is_manager=is_manager)
138
-
139
- if len(created_tasks) >= i and created_tasks[i]:
140
- member.tasks.append(created_tasks[i])
141
-
142
- members.append(member)
143
-
144
- if len(created_agents) < len(created_tasks):
145
- network_tasks.extend(created_tasks[len(created_agents):len(created_tasks)])
146
-
147
- if _formation == Formation.SUPERVISING and not [member for member in members if member.is_manager]:
148
- member = Member(agent=Agent(role=leader, goal=leader), is_manager=True)
149
- members.append(member)
150
-
151
- members.sort(key=lambda x: x.is_manager == False)
152
- network = AgentNetwork(members=members, formation=_formation, network_tasks=network_tasks)
153
-
154
- return network
155
-
156
-
157
- except Exception as e:
158
- Logger(verbose=True).log(level="error", message=f"Failed to create a agent network - return None. You can try with solo agent. Error: {str(e)}", color="red")
159
- return None