versionhq 1.1.7.4__py3-none-any.whl → 1.1.7.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
versionhq/__init__.py CHANGED
@@ -17,7 +17,7 @@ from versionhq.team.model import Team, TeamOutput
17
17
  from versionhq.tool.model import Tool
18
18
 
19
19
 
20
- __version__ = "1.1.7.4"
20
+ __version__ = "1.1.7.7"
21
21
  __all__ = [
22
22
  "Agent",
23
23
  "Customer",
versionhq/agent/model.py CHANGED
@@ -85,7 +85,7 @@ class Agent(ABC, BaseModel):
85
85
  """
86
86
 
87
87
  __hash__ = object.__hash__
88
- _logger: Logger = PrivateAttr(default_factory=lambda: Logger(verbose=False))
88
+ _logger: Logger = PrivateAttr(default_factory=lambda: Logger(verbose=True))
89
89
  _rpm_controller: Optional[RPMController] = PrivateAttr(default=None)
90
90
  _request_within_rpm_limit: Any = PrivateAttr(default=None)
91
91
  _token_process: TokenProcess = PrivateAttr(default_factory=TokenProcess)
@@ -327,7 +327,7 @@ class Agent(ABC, BaseModel):
327
327
  messages = []
328
328
  messages.append({"role": "user", "content": prompts}) #! REFINEME
329
329
  messages.append({"role": "assistant", "content": self.backstory})
330
- print("Messages sent to the model:", messages)
330
+ self._logger.log(level="info", message=f"Messages sent to the model: {messages}", color="blue")
331
331
 
332
332
  callbacks = kwargs.get("callbacks", None)
333
333
 
@@ -338,7 +338,7 @@ class Agent(ABC, BaseModel):
338
338
  callbacks=callbacks,
339
339
  )
340
340
  task_execution_counter += 1
341
- print("Agent's #1 res: ", response)
341
+ self._logger.log(level="info", message=f"Agent's first response: {response}", color="blue")
342
342
 
343
343
  if (response is None or response == "") and task_execution_counter < self.max_retry_limit:
344
344
  while task_execution_counter <= self.max_retry_limit:
@@ -349,10 +349,10 @@ class Agent(ABC, BaseModel):
349
349
  callbacks=callbacks,
350
350
  )
351
351
  task_execution_counter += 1
352
- print(f"Agent's #{task_execution_counter} res: ", response)
352
+ self._logger.log(level="info", message=f"Agent's next response: {response}", color="blue")
353
353
 
354
354
  elif response is None or response == "":
355
- print("Received None or empty response from LLM call.")
355
+ self._logger.log(level="error", message="Received None or empty response from the model", color="red")
356
356
  raise ValueError("Invalid response from LLM call - None or empty.")
357
357
 
358
358
  return {"output": response.output if hasattr(response, "output") else response}
@@ -61,21 +61,21 @@ class MessagingComponent(ABC, BaseModel):
61
61
  score: Union[float, InstanceOf[Score]] = Field(default=None)
62
62
 
63
63
 
64
- def store_scoring_result(self, scoring_subject: str, score: Union[int, Score, ScoreFormat] = None):
64
+ def store_scoring_result(self, scoring_subject: str, score_raw: Union[int, Score, ScoreFormat] = None):
65
65
  """
66
66
  Set up the `score` field
67
67
  """
68
68
 
69
- if isinstance(score, Score):
70
- setattr(self, "score", score)
69
+ if isinstance(score_raw, Score):
70
+ setattr(self, "score", score_raw)
71
71
 
72
- elif isinstance(score, ScoreFormat):
72
+ elif isinstance(score_raw, ScoreFormat):
73
73
  score_instance = Score()
74
- setattr(score_instance, scoring_subject, score)
74
+ setattr(score_instance, scoring_subject, score_raw)
75
75
  setattr(self, "score", score_instance)
76
76
 
77
- elif isinstance(score, int) or isinstance(score, float):
78
- score_instance, score_format_instance = Score(), ScoreFormat(rate=score, weight=1)
77
+ elif isinstance(score_raw, int) or isinstance(score_raw, float):
78
+ score_instance, score_format_instance = Score(), ScoreFormat(rate=score_raw, weight=1)
79
79
  setattr(score_instance, "kwargs", { scoring_subject: score_format_instance })
80
80
  setattr(self, "score", score_instance)
81
81
 
File without changes
@@ -0,0 +1,141 @@
1
+ import appdirs
2
+ import os
3
+ import json
4
+ import sqlite3
5
+ import datetime
6
+ from typing import Any, Dict, List, Optional
7
+ from dotenv import load_dotenv
8
+ from pathlib import Path
9
+
10
+ from versionhq._utils.logger import Logger
11
+
12
+ load_dotenv(override=True)
13
+
14
+
15
+ def fetch_db_storage_path():
16
+ project_directory_name = os.environ.get("STORAGE_DIR", Path.cwd().name)
17
+ app_author = "versionhq"
18
+ data_dir = Path(appdirs.user_data_dir(project_directory_name, app_author))
19
+ data_dir.mkdir(parents=True, exist_ok=True)
20
+ return data_dir
21
+
22
+ storage_path = fetch_db_storage_path()
23
+ default_db_name = "task_outputs"
24
+
25
+
26
+ class TaskOutputSQLiteStorage:
27
+ """
28
+ An SQLite storage class to handle storing task outputs.
29
+ """
30
+
31
+ def __init__(self, db_path: str = f"{storage_path}/{default_db_name}.db") -> None:
32
+ self.db_path = db_path
33
+ self._logger = Logger(verbose=True)
34
+ self._initialize_db()
35
+
36
+
37
+ def _initialize_db(self):
38
+ """
39
+ Initializes the SQLite database and creates LTM table.
40
+ """
41
+
42
+ try:
43
+ with sqlite3.connect(self.db_path) as conn:
44
+ cursor = conn.cursor()
45
+ cursor.execute(
46
+ """
47
+ CREATE TABLE IF NOT EXISTS task_outputs (
48
+ task_id TEXT PRIMARY KEY,
49
+ output JSON,
50
+ task_index INTEGER,
51
+ inputs JSON,
52
+ was_replayed BOOLEAN,
53
+ timestamp DATETIME DEFAULT CURRENT_TIMESTAMP
54
+ )
55
+ """
56
+ )
57
+ conn.commit()
58
+
59
+ except sqlite3.Error as e:
60
+ self._logger.log(level="error", message=f"DATABASE INITIALIZATION ERROR: {e}", color="red")
61
+
62
+
63
+ def add(self, task, output: Dict[str, Any], task_index: int, was_replayed: bool = False, inputs: Dict[str, Any] = {}):
64
+ try:
65
+ with sqlite3.connect(self.db_path) as conn:
66
+ cursor = conn.cursor()
67
+ cursor.execute(
68
+ """INSERT OR REPLACE INTO task_outputs
69
+ (task_id, output, task_index, inputs, was_replayed, timestamp)
70
+ VALUES (?, ?, ?, ?, ?, ?)
71
+ """,
72
+ (str(task.id), json.dumps(output), task_index, json.dumps(inputs), was_replayed, datetime.datetime.now())
73
+ )
74
+ conn.commit()
75
+
76
+ except sqlite3.Error as e:
77
+ self._logger.log(level="error", message=f"SAVING TASK OUTPUTS ERROR: {e}", color="red")
78
+
79
+
80
+ def update(self, task_index: int, **kwargs):
81
+ try:
82
+ with sqlite3.connect(self.db_path) as conn:
83
+ cursor = conn.cursor()
84
+
85
+ fields, values = [], []
86
+ for k, v in kwargs.items():
87
+ fields.append(f"{k} = ?")
88
+ values.append(json.dumps(v) if isinstance(v, dict) else v)
89
+
90
+ query = f"UPDATE latest_kickoff_task_outputs SET {', '.join(fields)} WHERE task_index = ?" # nosec
91
+ values.append(task_index)
92
+ cursor.execute(query, tuple(values))
93
+ conn.commit()
94
+
95
+ if cursor.rowcount == 0:
96
+ self._logger.log(
97
+ level="info", message=f"No row found with task_index {task_index}. No update performed.", color="yellow",
98
+ )
99
+
100
+ except sqlite3.Error as e:
101
+ self._logger.log(level="error", message=f"UPDATE TASK OUTPUTS ERROR: {e}", color="red")
102
+
103
+
104
+ def load(self) -> Optional[List[Dict[str, Any]]]:
105
+ try:
106
+ with sqlite3.connect(self.db_path) as conn:
107
+ cursor = conn.cursor()
108
+ cursor.execute("""
109
+ SELECT *
110
+ FROM task_outputs
111
+ ORDER BY task_index
112
+ """)
113
+
114
+ rows = cursor.fetchall()
115
+ results = []
116
+ for row in rows:
117
+ result = {
118
+ "task_id": row[0],
119
+ "output": json.loads(row[1]),
120
+ "task_index": row[2],
121
+ "inputs": json.loads(row[3]),
122
+ "was_replayed": row[4],
123
+ "timestamp": row[5],
124
+ }
125
+ results.append(result)
126
+ return results
127
+
128
+ except sqlite3.Error as e:
129
+ self._logger.log(level="error", message=f"LOADING TASK OUTPUTS ERROR: {e}", color="red")
130
+ return None
131
+
132
+
133
+ def delete_all(self):
134
+ try:
135
+ with sqlite3.connect(self.db_path) as conn:
136
+ cursor = conn.cursor()
137
+ cursor.execute("DELETE FROM task_outputs")
138
+ conn.commit()
139
+
140
+ except sqlite3.Error as e:
141
+ self._logger.log(level="error", message=f"ERROR: Failed to delete all: {e}", color="red")
@@ -0,0 +1,59 @@
1
+ from datetime import datetime
2
+ from typing import Any, Dict, List, Optional
3
+
4
+ from pydantic import BaseModel, Field
5
+
6
+ from versionhq.storage.task_output_storage import TaskOutputSQLiteStorage
7
+
8
+
9
+ class ExecutionLog(BaseModel):
10
+ task_id: str
11
+ output: Dict[str, Any]
12
+ timestamp: datetime = Field(default_factory=datetime.now)
13
+ task_index: int
14
+ inputs: Dict[str, Any] = Field(default_factory=dict)
15
+ was_replayed: bool = False
16
+
17
+ def __getitem__(self, key: str) -> Any:
18
+ return getattr(self, key)
19
+
20
+
21
+
22
+ class TaskOutputStorageHandler:
23
+
24
+ def __init__(self):
25
+ self.storage = TaskOutputSQLiteStorage()
26
+
27
+
28
+ def update(self, task, task_index: int, was_replayed: bool = False, inputs: Dict[str, Any] = {}) -> None:
29
+ """
30
+ task: task instance
31
+ """
32
+ saved_outputs = self.load()
33
+ if saved_outputs is None:
34
+ raise ValueError("Logs cannot be None")
35
+
36
+ self.add(task, task_index, was_replayed, inputs)
37
+
38
+
39
+ def add(self, task, task_index: int, was_replayed: bool = False, inputs: Dict[str, Any] = {}) -> None:
40
+ from versionhq.task.model import Task
41
+
42
+ output_to_store = dict()
43
+
44
+ if isinstance(task, Task):
45
+ output_to_store = dict(
46
+ description=str(task.description),
47
+ raw=str(task.output.raw),
48
+ responsible_agent=str(task.processed_by_agents),
49
+ )
50
+
51
+ self.storage.add(task=task, output=output_to_store, task_index=task_index, was_replayed=was_replayed, inputs=inputs)
52
+
53
+
54
+ def reset(self) -> None:
55
+ self.storage.delete_all()
56
+
57
+
58
+ def load(self) -> Optional[List[Dict[str, Any]]]:
59
+ return self.storage.load()
versionhq/task/model.py CHANGED
@@ -11,7 +11,9 @@ from pydantic_core import PydanticCustomError
11
11
 
12
12
  from versionhq._utils.process_config import process_config
13
13
  from versionhq.task import TaskOutputFormat
14
+ from versionhq.task.log_handler import TaskOutputStorageHandler
14
15
  from versionhq.tool.model import Tool, ToolCalled
16
+ from versionhq._utils.logger import Logger
15
17
 
16
18
 
17
19
  class ResponseField(BaseModel):
@@ -59,20 +61,6 @@ class ResponseField(BaseModel):
59
61
  return base_model
60
62
 
61
63
 
62
- class AgentOutput(BaseModel):
63
- """
64
- Keep adding agents' learning and recommendation and store it in `pydantic` field of `TaskOutput` class.
65
- Since the TaskOutput class has `agent` field, we don't add any info on the agent that handled the task.
66
- """
67
- customer_id: str = Field(default=None, max_length=126, description="customer uuid")
68
- customer_analysis: str = Field(default=None, max_length=256, description="analysis of the customer")
69
- product_overview: str = Field(default=None, max_length=256, description="analysis of the client's business")
70
- usp: str = Field()
71
- cohort_timeframe: int = Field(default=None, max_length=256, description="suitable cohort timeframe in days")
72
- kpi_metrics: List[str] = Field(default=list, description="Ideal KPIs to be tracked")
73
- assumptions: List[Dict[str, Any]] = Field(default=list, description="assumptions to test")
74
-
75
-
76
64
 
77
65
  class TaskOutput(BaseModel):
78
66
  """
@@ -133,11 +121,13 @@ class Task(BaseModel):
133
121
  """
134
122
 
135
123
  __hash__ = object.__hash__
124
+ _original_description: str = PrivateAttr(default=None)
125
+ _logger: Logger = PrivateAttr()
126
+ _task_output_handler = TaskOutputStorageHandler()
136
127
 
137
128
  id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True, description="unique identifier for the object, not set by user")
138
129
  name: Optional[str] = Field(default=None)
139
130
  description: str = Field(description="Description of the actual task")
140
- _original_description: str = PrivateAttr(default=None)
141
131
 
142
132
  # output
143
133
  expected_output_json: bool = Field(default=True)
@@ -161,7 +151,7 @@ class Task(BaseModel):
161
151
  callback_kwargs: Optional[Dict[str, Any]] = Field(default_factory=dict, description="kwargs for the callback when the callback is callable")
162
152
 
163
153
  # recording
164
- processed_by_agents: Set[str] = Field(default_factory=set)
154
+ processed_by_agents: Set[str] = Field(default_factory=set, description="store responsible agents' roles")
165
155
  used_tools: int = 0
166
156
  tools_errors: int = 0
167
157
  delegations: int = 0
@@ -372,7 +362,7 @@ Your outputs MUST adhere to the following format and should NOT include any irre
372
362
 
373
363
 
374
364
  # task execution
375
- def execute_sync(self, agent, context: Optional[str] = None) -> TaskOutput:
365
+ def execute_sync(self, agent, context: Optional[str] = None, tools: Optional[List[Any]] = None) -> TaskOutput:
376
366
  """
377
367
  Execute the task synchronously.
378
368
  When the task has context, make sure we have executed all the tasks in the context first.
@@ -386,7 +376,7 @@ Your outputs MUST adhere to the following format and should NOT include any irre
386
376
  return self._execute_core(agent, context)
387
377
 
388
378
 
389
- def execute_async(self, agent, context: Optional[str] = None) -> Future[TaskOutput]:
379
+ def execute_async(self, agent, context: Optional[str] = None, tools: Optional[List[Any]] = None) -> Future[TaskOutput]:
390
380
  """
391
381
  Execute the task asynchronously.
392
382
  """
@@ -395,21 +385,21 @@ Your outputs MUST adhere to the following format and should NOT include any irre
395
385
  threading.Thread(
396
386
  daemon=True,
397
387
  target=self._execute_task_async,
398
- args=(agent, context, future),
388
+ args=(agent, context, tools, future),
399
389
  ).start()
400
390
  return future
401
391
 
402
392
 
403
- def _execute_task_async(self, agent, context: Optional[str], future: Future[TaskOutput]) -> None:
393
+ def _execute_task_async(self, agent, context: Optional[str], tools: Optional[List[Any]], future: Future[TaskOutput]) -> None:
404
394
  """
405
395
  Execute the task asynchronously with context handling.
406
396
  """
407
397
 
408
- result = self._execute_core(agent, context)
398
+ result = self._execute_core(agent, context, tools)
409
399
  future.set_result(result)
410
400
 
411
401
 
412
- def _execute_core(self, agent, context: Optional[str]) -> TaskOutput:
402
+ def _execute_core(self, agent, context: Optional[str], tools: Optional[List[Any]] = []) -> TaskOutput:
413
403
  """
414
404
  Run the core execution logic of the task.
415
405
  To speed up the process, when the format is not expected to return, we will skip the conversion process.
@@ -436,9 +426,14 @@ Your outputs MUST adhere to the following format and should NOT include any irre
436
426
  agent = agent_to_delegate
437
427
  self.delegations += 1
438
428
 
439
- output_raw = agent.execute_task(task=self, context=context)
440
- output_json_dict = self.create_json_output(raw_result=output_raw) if self.expected_output_json is True else None
441
- output_pydantic = self.create_pydantic_output(output_json_dict=output_json_dict) if self.expected_output_pydantic else None
429
+ output_raw, output_json_dict, output_pydantic = agent.execute_task(task=self, context=context, tools=tools), None, None
430
+
431
+ if self.expected_output_json:
432
+ output_json_dict = self.create_json_output(raw_result=output_raw)
433
+
434
+ if self.expected_output_pydantic:
435
+ output_pydantic = self.create_pydantic_output(output_json_dict=output_json_dict)
436
+
442
437
  task_output = TaskOutput(
443
438
  task_id=self.id,
444
439
  raw=output_raw,
@@ -451,10 +446,7 @@ Your outputs MUST adhere to the following format and should NOT include any irre
451
446
  # self._set_end_execution_time(start_time)
452
447
 
453
448
  if self.callback:
454
- if isinstance(self.callback, Callable):
455
- self.callback(**self.callback_kwargs)
456
- else:
457
- self.callback(self.output)
449
+ self.callback({ **self.callback_kwargs, **self.output.__dict__ })
458
450
 
459
451
  # if self._execution_span:
460
452
  # # self._telemetry.task_ended(self._execution_span, self, agent.team)
@@ -470,24 +462,32 @@ Your outputs MUST adhere to the following format and should NOT include any irre
470
462
  return task_output
471
463
 
472
464
 
465
+ def _store_execution_log(self, task_index: int, was_replayed: bool = False, inputs: Optional[Dict[str, Any]] = {}) -> None:
466
+ """
467
+ Store the task execution log.
468
+ """
469
+
470
+ self._task_output_handler.update(task=self, task_index=task_index, was_replayed=was_replayed, inputs=inputs)
471
+
472
+
473
+
473
474
  class ConditionalTask(Task):
474
475
  """
475
476
  A task that can be conditionally executed based on the output of another task.
476
- Use this with `Team`.
477
+ When the `condition` return True, execute the task, else skipped with `skipped task output`.
477
478
  """
478
479
 
479
480
  condition: Callable[[TaskOutput], bool] = Field(
480
481
  default=None,
481
- description="max. number of retries for an agent to execute a task when an error occurs.",
482
+ description="max. number of retries for an agent to execute a task when an error occurs",
482
483
  )
483
484
 
484
- def __init__(
485
- self,
486
- condition: Callable[[Any], bool],
487
- **kwargs,
488
- ):
485
+
486
+ def __init__(self, condition: Callable[[Any], bool], **kwargs):
489
487
  super().__init__(**kwargs)
490
488
  self.condition = condition
489
+ self._logger = Logger(verbose=True)
490
+
491
491
 
492
492
  def should_execute(self, context: TaskOutput) -> bool:
493
493
  """
@@ -496,5 +496,25 @@ class ConditionalTask(Task):
496
496
  """
497
497
  return self.condition(context)
498
498
 
499
+
499
500
  def get_skipped_task_output(self):
500
- return TaskOutput(task_id=self.id, raw="", pydantic=None, json_dict=None)
501
+ return TaskOutput(task_id=self.id, raw="", pydantic=None, json_dict={})
502
+
503
+
504
+ def _handle_conditional_task(self, task_outputs: List[TaskOutput], task_index: int, was_replayed: bool) -> Optional[TaskOutput]:
505
+ """
506
+ When the conditional task should be skipped, return `skipped_task_output` as task_output else return None
507
+ """
508
+
509
+ previous_output = task_outputs[task_index - 1] if task_outputs and len(task_outputs) > 1 else None
510
+
511
+ if previous_output and not self.should_execute(previous_output):
512
+ self._logger.log(level="debug", message=f"Skipping conditional task: {self.description}", color="yellow")
513
+ skipped_task_output = self.get_skipped_task_output()
514
+ self.output = skipped_task_output
515
+
516
+ if not was_replayed:
517
+ self._store_execution_log(self, task_index=task_index, was_replayed=was_replayed, inputs={})
518
+ return skipped_task_output
519
+
520
+ return None
versionhq/team/model.py CHANGED
@@ -188,14 +188,9 @@ class Team(BaseModel):
188
188
  2. manager_task,
189
189
  3. members' tasks
190
190
  """
191
- sorted_member_tasks = [
192
- member.task for member in self.members if member.is_manager == True
193
- ] + [member.task for member in self.members if member.is_manager == False]
194
- return (
195
- self.team_tasks + sorted_member_tasks
196
- if len(self.team_tasks) > 0
197
- else sorted_member_tasks
198
- )
191
+ sorted_member_tasks = [member.task for member in self.members if member.is_manager == True] + [member.task for member in self.members if member.is_manager == False]
192
+ return self.team_tasks + sorted_member_tasks if len(self.team_tasks) > 0 else sorted_member_tasks
193
+
199
194
 
200
195
  # validators
201
196
  @field_validator("id", mode="before")
@@ -261,7 +256,7 @@ class Team(BaseModel):
261
256
  if task.async_execution:
262
257
  async_task_count += 1
263
258
  else:
264
- break # stop traversing when a non-async task is found
259
+ break
265
260
 
266
261
  if async_task_count > 1:
267
262
  raise PydanticCustomError(
@@ -287,47 +282,23 @@ class Team(BaseModel):
287
282
  result[task_id] if hasattr(result, str(task_id)) else result
288
283
  )
289
284
 
285
+
290
286
  # task execution
291
287
  def _process_async_tasks(
292
- self, futures: List[Tuple[Task, Future[TaskOutput], int]], was_replayed: bool = False
293
- ) -> List[TaskOutput]:
288
+ self, futures: List[Tuple[Task, Future[TaskOutput], int]], was_replayed: bool = False
289
+ ) -> List[TaskOutput]:
290
+ """
291
+ When we have `Future` tasks, updated task outputs and task execution logs accordingly.
292
+ """
293
+
294
294
  task_outputs: List[TaskOutput] = []
295
+
295
296
  for future_task, future, task_index in futures:
296
297
  task_output = future.result()
297
298
  task_outputs.append(task_output)
298
- self._process_task_result(future_task, task_output)
299
- self._store_execution_log(
300
- future_task, task_output, task_index, was_replayed
301
- )
302
- return task_outputs
303
-
304
-
305
- def _handle_conditional_task(
306
- self,
307
- task: ConditionalTask,
308
- task_outputs: List[TaskOutput],
309
- futures: List[Tuple[Task, Future[TaskOutput], int]],
310
- task_index: int,
311
- was_replayed: bool,
312
- ) -> Optional[TaskOutput]:
313
-
314
- if futures:
315
- task_outputs = self._process_async_tasks(futures, was_replayed)
316
- futures.clear()
317
-
318
- previous_output = task_outputs[task_index - 1] if task_outputs else None
319
- if previous_output is not None and not task.should_execute(previous_output):
320
- self._logger.log(
321
- "debug",
322
- f"Skipping conditional task: {task.description}",
323
- color="yellow",
324
- )
325
- skipped_task_output = task.get_skipped_task_output()
299
+ future_task._store_execution_log(task_index, was_replayed)
326
300
 
327
- if not was_replayed:
328
- self._store_execution_log(task, skipped_task_output, task_index)
329
- return skipped_task_output
330
- return None
301
+ return task_outputs
331
302
 
332
303
 
333
304
  def _create_team_output(self, task_outputs: List[TaskOutput], lead_task_output: TaskOutput = None) -> TeamOutput:
@@ -401,38 +372,33 @@ class Team(BaseModel):
401
372
  if responsible_agent is None:
402
373
  responsible_agent = self.manager_agent if self.manager_agent else self.members[0].agent
403
374
 
404
- # self._prepare_agent_tools(task)
405
- # self._log_task_start(task, responsible_agent)
406
-
407
375
  if isinstance(task, ConditionalTask):
408
- skipped_task_output = self._handle_conditional_task(task, task_outputs, futures, task_index, was_replayed)
376
+ skipped_task_output = task._handle_conditional_task(task_outputs, futures, task_index, was_replayed)
409
377
  if skipped_task_output:
410
378
  continue
411
379
 
380
+ # self._prepare_agent_tools(task)
381
+ # self._log_task_start(task, responsible_agent)
382
+
412
383
  if task.async_execution:
413
384
  context = create_raw_outputs(tasks=[task, ],task_outputs=([last_sync_output,] if last_sync_output else []))
414
- future = task.execute_async(agent=responsible_agent, context=context
415
- # tools=responsible_agent.tools
416
- )
385
+ future = task.execute_async(agent=responsible_agent, context=context, tools=responsible_agent.tools)
417
386
  futures.append((task, future, task_index))
418
387
  else:
419
- if futures:
420
- task_outputs = self._process_async_tasks(futures, was_replayed)
421
- futures.clear()
422
-
423
- context = create_raw_outputs(tasks=[task,], task_outputs=([ last_sync_output,] if last_sync_output else [] ))
424
- task_output = task.execute_sync(agent=responsible_agent, context=context
425
- # tools=responsible_agent.tools
388
+ context = create_raw_outputs(tasks=[task,], task_outputs=([last_sync_output,] if last_sync_output else [] ))
389
+ task_output = task.execute_sync(agent=responsible_agent, context=context, tools=responsible_agent.tools
426
390
  )
427
391
  if responsible_agent is self.manager_agent:
428
392
  lead_task_output = task_output
429
393
 
430
394
  task_outputs.append(task_output)
431
395
  # self._process_task_result(task, task_output)
432
- # self._store_execution_log(task, task_output, task_index, was_replayed)
396
+ task._store_execution_log(task_index, was_replayed)
397
+
398
+
399
+ if futures:
400
+ task_outputs = self._process_async_tasks(futures, was_replayed)
433
401
 
434
- # if futures:
435
- # task_outputs = self._process_async_tasks(futures, was_replayed)
436
402
  return self._create_team_output(task_outputs, lead_task_output)
437
403
 
438
404
 
@@ -458,10 +424,6 @@ class Team(BaseModel):
458
424
  # self._task_output_handler.reset()
459
425
  # self._logging_color = "bold_purple"
460
426
 
461
- # if inputs is not None:
462
- # self._inputs = inputs
463
- # self._interpolate_inputs(inputs)
464
-
465
427
 
466
428
  # i18n = I18N(prompt_file=self.prompt_file)
467
429
 
@@ -469,14 +431,13 @@ class Team(BaseModel):
469
431
  agent = member.agent
470
432
  agent.team = self
471
433
 
472
- # add the team's common callbacks to each agent.
473
- if not agent.function_calling_llm:
434
+ if not agent.function_calling_llm and self.function_calling_llm:
474
435
  agent.function_calling_llm = self.function_calling_llm
475
436
 
476
437
  # if agent.allow_code_execution:
477
438
  # agent.tools += agent.get_code_execution_tools()
478
439
 
479
- if not agent.step_callback:
440
+ if not agent.step_callback and self.step_callback:
480
441
  agent.step_callback = self.step_callback
481
442
 
482
443
  if self.process is None:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: versionhq
3
- Version: 1.1.7.4
3
+ Version: 1.1.7.7
4
4
  Summary: LLM orchestration frameworks for model-agnostic AI agents that handle complex outbound workflows
5
5
  Author-email: Kuriko Iwai <kuriko@versi0n.io>
6
6
  License: MIT License
@@ -32,6 +32,9 @@ Keywords: orchestration framework,orchestration,ai agent,multi-agent system,RAG,
32
32
  Classifier: Programming Language :: Python
33
33
  Classifier: License :: OSI Approved :: MIT License
34
34
  Classifier: Operating System :: OS Independent
35
+ Classifier: Development Status :: 3 - Alpha
36
+ Classifier: Intended Audience :: Developers
37
+ Classifier: Topic :: Software Development :: Build Tools
35
38
  Requires-Python: >=3.12
36
39
  Description-Content-Type: text/markdown
37
40
  License-File: LICENSE
@@ -43,16 +46,20 @@ Requires-Dist: typing
43
46
  Requires-Dist: json-repair>=0.31.0
44
47
  Requires-Dist: litellm>=1.55.8
45
48
  Requires-Dist: openai>=1.57.0
46
- Requires-Dist: composio-openai>=0.6.0
47
- Requires-Dist: pre-commit>=4.0.1
48
- Requires-Dist: gunicorn>=23.0.0
49
+ Requires-Dist: composio-openai>=0.6.9
49
50
  Requires-Dist: composio>=0.1.0
50
51
  Requires-Dist: setuptools>=75.6.0
51
52
  Requires-Dist: wheel>=0.45.1
53
+ Requires-Dist: python-dotenv>=1.0.0
54
+ Requires-Dist: appdirs>=1.4.4
52
55
 
53
56
  # Overview
54
57
 
55
- ![MIT license](https://img.shields.io/badge/License-MIT-green) [![Publisher](https://github.com/versionHQ/multi-agent-system/actions/workflows/publish.yml/badge.svg)](https://github.com/versionHQ/multi-agent-system/actions/workflows/publish.yml) ![PyPI](https://img.shields.io/badge/PyPI-v1.1.7.3-blue) ![python ver](https://img.shields.io/badge/Python-3.12/3.13-purple) ![pyenv ver](https://img.shields.io/badge/pyenv-2.4.23-orange)
58
+ ![MIT license](https://img.shields.io/badge/License-MIT-green)
59
+ [![Publisher](https://github.com/versionHQ/multi-agent-system/actions/workflows/publish.yml/badge.svg)](https://github.com/versionHQ/multi-agent-system/actions/workflows/publish.yml)
60
+ ![PyPI](https://img.shields.io/badge/PyPI-v1.1.7.5-blue)
61
+ ![python ver](https://img.shields.io/badge/Python-3.12/3.13-purple)
62
+ ![pyenv ver](https://img.shields.io/badge/pyenv-2.5.0-orange)
56
63
 
57
64
 
58
65
  An LLM orchestration frameworks for multi-agent systems with RAG to autopilot outbound workflows.
@@ -64,10 +71,11 @@ Messaging workflows are created at individual level, and will be deployed on thi
64
71
 
65
72
  **Visit:**
66
73
 
67
- - [Landing page](https://home.versi0n.io)
68
- - [Client app](https://versi0n.io/)
69
- - [Orchestration frameworks](https://github.com/versionHQ/multi-agent-system)
70
- - [Test client app](https://github.com/versionHQ/test-client-app)
74
+ - [PyPI](https://pypi.org/project/versionhq/)
75
+ - [Github (LLM orchestration)](https://github.com/versionHQ/multi-agent-system)
76
+ - [Github (Test client app)](https://github.com/versionHQ/test-client-app)
77
+ - [Use case](https://versi0n.io/) - client app (alpha)
78
+
71
79
 
72
80
  <hr />
73
81
 
@@ -87,6 +95,8 @@ LLM-powered `agent`s and `team`s use `tool`s and their own knowledge to complete
87
95
 
88
96
  - [Key Features](#key-features)
89
97
  - [Usage](#usage)
98
+ - [Case 1. Build an AI agent on LLM of your choice and execute a task:](#case-1-build-an-ai-agent-on-llm-of-your-choice-and-execute-a-task)
99
+ - [Case 2. Form a team to handle multiple tasks:](#case-2-form-a-team-to-handle-multiple-tasks)
90
100
  - [Technologies Used](#technologies-used)
91
101
  - [Project Structure](#project-structure)
92
102
  - [Setup](#setup)
@@ -132,7 +142,8 @@ Multiple `agents` can form a `team` to complete complex tasks together.
132
142
 
133
143
  2. You can use the `versionhq` module in your Python app.
134
144
 
135
- - **i.e.,** Make LLM-based agent execute the task and return JSON dict.
145
+
146
+ ### Case 1. Build an AI agent on LLM of your choice and execute a task:
136
147
 
137
148
  ```
138
149
  from versionhq.agent.model import Agent
@@ -142,6 +153,7 @@ Multiple `agents` can form a `team` to complete complex tasks together.
142
153
  role="demo",
143
154
  goal="amazing project goal",
144
155
  skillsets=["skill_1", "skill_2", ],
156
+ tools=["amazing RAG tool",]
145
157
  llm="llm-of-your-choice"
146
158
  )
147
159
 
@@ -165,7 +177,41 @@ This will return a dictionary with keys defined in the `ResponseField`.
165
177
  { test1: "answer1", "test2": ["answer2-1", "answer2-2", "answer2-3",] }
166
178
  ```
167
179
 
168
- For more info: [PyPI package](https://pypi.org/project/versionhq/)
180
+ ### Case 2. Form a team to handle multiple tasks:
181
+
182
+ ```
183
+ from versionhq.agent.model import Agent
184
+ from versionhq.task.model import Task, ResponseField
185
+ from versionhq.team.model import Team, TeamMember
186
+
187
+ agent_a = Agent(role="agent a", goal="My amazing goals", llm="llm-of-your-choice")
188
+ agent_b = Agent(role="agent b", goal="My amazing goals", llm="llm-of-your-choice")
189
+
190
+ task_1 = Task(
191
+ description="Analyze the client's business model.",
192
+ output_field_list=[ResponseField(title="test1", type=str, required=True),],
193
+ allow_delegation=True
194
+ )
195
+
196
+ task_2 = Task(
197
+ description="Define the cohort.",
198
+ output_field_list=[ResponseField(title="test1", type=int, required=True),],
199
+ allow_delegation=False
200
+ )
201
+
202
+ team = Team(
203
+ members=[
204
+ TeamMember(agent=agent_a, is_manager=False, task=task_1),
205
+ TeamMember(agent=agent_b, is_manager=True, task=task_2),
206
+ ],
207
+ )
208
+ res = team.kickoff()
209
+ ```
210
+
211
+ This will return a list with dictionaries with keys defined in the `ResponseField` of each task.
212
+
213
+ Tasks can be delegated to a team manager, peers in the team, or completely new agent.
214
+
169
215
 
170
216
  <hr />
171
217
 
@@ -233,8 +279,7 @@ src/
233
279
  ```
234
280
  uv venv
235
281
  source .venv/bin/activate
236
-
237
- uv pip install -r requirements.txt -v
282
+ uv pip sync
238
283
  ```
239
284
 
240
285
  * In case of AssertionError/module mismatch, run Python version control using `.pyenv`
@@ -1,4 +1,4 @@
1
- versionhq/__init__.py,sha256=HXgS9ZXHPzYzddtQdb7-MLMFjnrZQChOzfbGbl5ot_o,871
1
+ versionhq/__init__.py,sha256=UNk3K1Icj6U6bX4hmcE-7PSzT3mYI9f_i_gXybscmWc,871
2
2
  versionhq/_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
3
  versionhq/_utils/cache_handler.py,sha256=zDQKzIn7vp-M2-uepHFxgJstjfftZS5mzXKL_-4uVvI,370
4
4
  versionhq/_utils/i18n.py,sha256=TwA_PnYfDLA6VqlUDPuybdV9lgi3Frh_ASsb_X8jJo8,1483
@@ -7,7 +7,7 @@ versionhq/_utils/process_config.py,sha256=ogrhovLbwe0ocQlcohRgBBRtww7C3pk9hikjvg
7
7
  versionhq/_utils/rpm_controller.py,sha256=T7waIGeblu5K58erY4lqVLcPsWM7W9UFdU3DG9Dsk0w,2214
8
8
  versionhq/_utils/usage_metrics.py,sha256=c33a_28y8ECUgflsKN3mkNm0fNkWgZmXwybMwIqoKXA,1098
9
9
  versionhq/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
- versionhq/agent/model.py,sha256=h9QHB-Hi_71JinPlfL_83kaB-Ks8npzIqE7BVHwn1Aw,18439
10
+ versionhq/agent/model.py,sha256=fK7rb10IFrgDDCD78owNRdMsIQ_aj7nmIQdp1WbWJc8,18621
11
11
  versionhq/agent/parser.py,sha256=GhoNQo4WloVM3vGnAmt9lnEOTARX7nWMhJE55rF_5Rs,5500
12
12
  versionhq/agent/TEMPLATES/Backstory.py,sha256=cdngBx1GEv7nroR46FEhnysnBJ9mEVL763_9np6Skkc,395
13
13
  versionhq/agent/TEMPLATES/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -18,22 +18,25 @@ versionhq/clients/customer/model.py,sha256=rQnCv_wdCdrYAsUjyB6X6ULiuWfqcBBoarXcQ
18
18
  versionhq/clients/product/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
19
19
  versionhq/clients/product/model.py,sha256=Us3UnzYlub6ipBislMN-JrvxZx0ocl9PtQJINJ8XtBA,2385
20
20
  versionhq/clients/workflow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
21
- versionhq/clients/workflow/model.py,sha256=8EbrUdS0vUhutx3_8M8A0Dw1BXpCqp5AFWbg4SGHj9M,5695
21
+ versionhq/clients/workflow/model.py,sha256=qpRCDwULhSLWDFkSYrvXW5m07bKDrwttTmqsYA9ZVP4,5727
22
22
  versionhq/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
23
23
  versionhq/llm/llm_vars.py,sha256=YZoXqFBW7XpclUZ14_AAz7WOjoyCXnGcI959GSpX2q0,5343
24
24
  versionhq/llm/model.py,sha256=PdwisrlrsDqd6gXwXCyGbGTRTeGZ8SXpt_gfua8qunk,8266
25
+ versionhq/storage/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
26
+ versionhq/storage/task_output_storage.py,sha256=RxvF_lSRUuo2Af3zMAuc1aOymx1e6e_5VJ2y757_Hu0,4910
25
27
  versionhq/task/__init__.py,sha256=g4mCATnn1mUXxsfQ5p6IpPawr8O421wVIT8kMKEcxQw,180
26
28
  versionhq/task/formatter.py,sha256=N8Kmk9vtrMtBdgJ8J7RmlKNMdZWSmV8O1bDexmCWgU0,643
27
- versionhq/task/model.py,sha256=LzB2E6s1ghmxRUG1gWjJs7DTcnBQFZUzCQ1l_90y2eo,18560
29
+ versionhq/task/log_handler.py,sha256=KJRrcNZgFSKhlNzvtYFnvtp6xukaF1s7ifX9u4zWrN8,1683
30
+ versionhq/task/model.py,sha256=QCNRHulD4RdOVZAUWft4rAqlTIqcbZTqSA-lDUoSfpQ,19314
28
31
  versionhq/team/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
29
- versionhq/team/model.py,sha256=RyspmYVtXW3f4MKjWT1mnpM9XdE435d8HbEQms7LHMU,19821
32
+ versionhq/team/model.py,sha256=M99sMAlKyS9jQsufH2xoGrv2dDIvaJWsmIQgW-0pT0M,18305
30
33
  versionhq/team/team_planner.py,sha256=B1UOn_DYVVterUn2CAd80jfO4sViJCCXPJA3abSSugg,2143
31
34
  versionhq/tool/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
32
35
  versionhq/tool/decorator.py,sha256=Y-j4jkoujD5LUvpe8uf3p5Zagk2XVaRKC9rkIE-2geo,1189
33
36
  versionhq/tool/model.py,sha256=JZOEcZRIEfcrjL8DgrFYDt4YNgMF8rXS26RK6D2x9mc,6906
34
37
  versionhq/tool/tool_handler.py,sha256=e-2VfG9zFpfPG_oMoPXye93GDovs7FuUASWQwUTLrJ0,1498
35
- versionhq-1.1.7.4.dist-info/LICENSE,sha256=7CCXuMrAjPVsUvZrsBq9DsxI2rLDUSYXR_qj4yO_ZII,1077
36
- versionhq-1.1.7.4.dist-info/METADATA,sha256=x5wxp9x2KxtPRJQUudsm3UtbwElH5YlAslCYRWaI-TM,14413
37
- versionhq-1.1.7.4.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
38
- versionhq-1.1.7.4.dist-info/top_level.txt,sha256=DClQwxDWqIUGeRJkA8vBlgeNsYZs4_nJWMonzFt5Wj0,10
39
- versionhq-1.1.7.4.dist-info/RECORD,,
38
+ versionhq-1.1.7.7.dist-info/LICENSE,sha256=7CCXuMrAjPVsUvZrsBq9DsxI2rLDUSYXR_qj4yO_ZII,1077
39
+ versionhq-1.1.7.7.dist-info/METADATA,sha256=tHhEWrbEEt-NyUVKKQwMImjFByi5LJiCBVLzZc86L_E,15919
40
+ versionhq-1.1.7.7.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
41
+ versionhq-1.1.7.7.dist-info/top_level.txt,sha256=DClQwxDWqIUGeRJkA8vBlgeNsYZs4_nJWMonzFt5Wj0,10
42
+ versionhq-1.1.7.7.dist-info/RECORD,,