versionhq 1.1.7.1__tar.gz → 1.1.7.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/PKG-INFO +9 -10
  2. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/README.md +8 -9
  3. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/pyproject.toml +1 -1
  4. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/src/versionhq/__init__.py +5 -2
  5. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/src/versionhq/agent/model.py +3 -3
  6. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/src/versionhq/clients/product/model.py +11 -36
  7. versionhq-1.1.7.3/src/versionhq/clients/workflow/model.py +158 -0
  8. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/src/versionhq/task/model.py +34 -17
  9. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/src/versionhq/team/model.py +8 -18
  10. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/src/versionhq.egg-info/PKG-INFO +9 -10
  11. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/src/versionhq.egg-info/SOURCES.txt +1 -0
  12. versionhq-1.1.7.3/tests/clients/workflow_test.py +21 -0
  13. versionhq-1.1.7.3/tests/task/task_test.py +258 -0
  14. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/tests/team/team_test.py +5 -11
  15. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/uv.lock +13 -13
  16. versionhq-1.1.7.1/src/versionhq/clients/workflow/model.py +0 -174
  17. versionhq-1.1.7.1/tests/task/task_test.py +0 -112
  18. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/.github/workflows/publish.yml +0 -0
  19. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/.github/workflows/publish_testpypi.yml +0 -0
  20. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/.github/workflows/run_tests.yml +0 -0
  21. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/.github/workflows/security_check.yml +0 -0
  22. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/.gitignore +0 -0
  23. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/.pre-commit-config.yaml +0 -0
  24. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/.python-version +0 -0
  25. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/LICENSE +0 -0
  26. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/SECURITY.md +0 -0
  27. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/db/preprocess.py +0 -0
  28. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/requirements.txt +0 -0
  29. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/runtime.txt +0 -0
  30. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/setup.cfg +0 -0
  31. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/src/versionhq/_utils/__init__.py +0 -0
  32. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/src/versionhq/_utils/cache_handler.py +0 -0
  33. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/src/versionhq/_utils/i18n.py +0 -0
  34. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/src/versionhq/_utils/logger.py +0 -0
  35. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/src/versionhq/_utils/process_config.py +0 -0
  36. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/src/versionhq/_utils/rpm_controller.py +0 -0
  37. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/src/versionhq/_utils/usage_metrics.py +0 -0
  38. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/src/versionhq/agent/TEMPLATES/Backstory.py +0 -0
  39. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/src/versionhq/agent/TEMPLATES/__init__.py +0 -0
  40. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/src/versionhq/agent/__init__.py +0 -0
  41. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/src/versionhq/agent/parser.py +0 -0
  42. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/src/versionhq/cli/__init__.py +0 -0
  43. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/src/versionhq/clients/__init__.py +0 -0
  44. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/src/versionhq/clients/customer/__init__.py +0 -0
  45. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/src/versionhq/clients/customer/model.py +0 -0
  46. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/src/versionhq/clients/product/__init__.py +0 -0
  47. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/src/versionhq/clients/workflow/__init__.py +0 -0
  48. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/src/versionhq/llm/__init__.py +0 -0
  49. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/src/versionhq/llm/llm_vars.py +0 -0
  50. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/src/versionhq/llm/model.py +0 -0
  51. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/src/versionhq/task/__init__.py +0 -0
  52. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/src/versionhq/task/formatter.py +0 -0
  53. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/src/versionhq/team/__init__.py +0 -0
  54. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/src/versionhq/team/team_planner.py +0 -0
  55. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/src/versionhq/tool/__init__.py +0 -0
  56. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/src/versionhq/tool/decorator.py +0 -0
  57. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/src/versionhq/tool/model.py +0 -0
  58. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/src/versionhq/tool/tool_handler.py +0 -0
  59. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/src/versionhq.egg-info/dependency_links.txt +0 -0
  60. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/src/versionhq.egg-info/requires.txt +0 -0
  61. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/src/versionhq.egg-info/top_level.txt +0 -0
  62. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/tests/__init__.py +0 -0
  63. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/tests/agent/__init__.py +0 -0
  64. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/tests/agent/agent_test.py +0 -0
  65. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/tests/cli/__init__.py +0 -0
  66. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/tests/conftest.py +0 -0
  67. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/tests/task/__init__.py +0 -0
  68. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/tests/team/Prompts/Demo_test.py +0 -0
  69. {versionhq-1.1.7.1 → versionhq-1.1.7.3}/tests/team/__init__.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: versionhq
3
- Version: 1.1.7.1
3
+ Version: 1.1.7.3
4
4
  Summary: LLM orchestration frameworks for model-agnostic AI agents that handle complex outbound workflows
5
5
  Author-email: Kuriko Iwai <kuriko@versi0n.io>
6
6
  License: MIT License
@@ -142,7 +142,7 @@ Multiple `agents` can form a `team` to complete complex tasks together.
142
142
  role="demo",
143
143
  goal="amazing project goal",
144
144
  skillsets=["skill_1", "skill_2", ],
145
- llm="llm-of-choice"
145
+ llm="llm-of-your-choice"
146
146
  )
147
147
 
148
148
  task = Task(
@@ -153,20 +153,19 @@ Multiple `agents` can form a `team` to complete complex tasks together.
153
153
  ResponseField(title="test1", type=str, required=True),
154
154
  ResponseField(title="test2", type=list, required=True),
155
155
  ],
156
- context=["amazing context",],
157
- tools=["amazing tool"],
158
156
  callback=None,
159
157
  )
160
-
161
- res = task.execute_sync(agent=agent)
162
-
158
+ res = task.execute_sync(agent=agent, context="amazing context to consider.")
163
159
  return res.to_dict()
164
-
165
160
  ```
166
161
 
167
- For more details:
162
+ This will return a dictionary with keys defined in the `ResponseField`.
163
+
164
+ ```
165
+ { test1: "answer1", "test2": ["answer2-1", "answer2-2", "answer2-3",] }
166
+ ```
168
167
 
169
- [PyPi package](https://pypi.org/project/versionhq/)
168
+ For more info: [PyPI package](https://pypi.org/project/versionhq/)
170
169
 
171
170
  <hr />
172
171
 
@@ -90,7 +90,7 @@ Multiple `agents` can form a `team` to complete complex tasks together.
90
90
  role="demo",
91
91
  goal="amazing project goal",
92
92
  skillsets=["skill_1", "skill_2", ],
93
- llm="llm-of-choice"
93
+ llm="llm-of-your-choice"
94
94
  )
95
95
 
96
96
  task = Task(
@@ -101,20 +101,19 @@ Multiple `agents` can form a `team` to complete complex tasks together.
101
101
  ResponseField(title="test1", type=str, required=True),
102
102
  ResponseField(title="test2", type=list, required=True),
103
103
  ],
104
- context=["amazing context",],
105
- tools=["amazing tool"],
106
104
  callback=None,
107
105
  )
108
-
109
- res = task.execute_sync(agent=agent)
110
-
106
+ res = task.execute_sync(agent=agent, context="amazing context to consider.")
111
107
  return res.to_dict()
112
-
113
108
  ```
114
109
 
115
- For more details:
110
+ This will return a dictionary with keys defined in the `ResponseField`.
111
+
112
+ ```
113
+ { test1: "answer1", "test2": ["answer2-1", "answer2-2", "answer2-3",] }
114
+ ```
116
115
 
117
- [PyPi package](https://pypi.org/project/versionhq/)
116
+ For more info: [PyPI package](https://pypi.org/project/versionhq/)
118
117
 
119
118
  <hr />
120
119
 
@@ -15,7 +15,7 @@ exclude = ["test*", "__pycache__"]
15
15
 
16
16
  [project]
17
17
  name = "versionhq"
18
- version = "1.1.7.1"
18
+ version = "1.1.7.3"
19
19
  authors = [{ name = "Kuriko Iwai", email = "kuriko@versi0n.io" }]
20
20
  description = "LLM orchestration frameworks for model-agnostic AI agents that handle complex outbound workflows"
21
21
  readme = "README.md"
@@ -10,20 +10,23 @@ warnings.filterwarnings(
10
10
  from versionhq.agent.model import Agent
11
11
  from versionhq.clients.customer.model import Customer
12
12
  from versionhq.clients.product.model import Product, ProductProvider
13
- from versionhq.clients.workflow.model import MessagingWorkflow
13
+ from versionhq.clients.workflow.model import MessagingWorkflow, MessagingComponent, Score, ScoreFormat
14
14
  from versionhq.llm.model import LLM
15
15
  from versionhq.task.model import Task, TaskOutput
16
16
  from versionhq.team.model import Team, TeamOutput
17
17
  from versionhq.tool.model import Tool
18
18
 
19
19
 
20
- __version__ = "1.1.7.1"
20
+ __version__ = "1.1.7.3"
21
21
  __all__ = [
22
22
  "Agent",
23
23
  "Customer",
24
24
  "Product",
25
25
  "ProductProvider",
26
26
  "MessagingWorkflow",
27
+ "MessagingComponent",
28
+ "Score",
29
+ "ScoreFormat",
27
30
  "LLM",
28
31
  "Task",
29
32
  "TaskOutput",
@@ -95,7 +95,7 @@ class Agent(ABC, BaseModel):
95
95
  role: str = Field(description="role of the agent - used in summary and logs")
96
96
  goal: str = Field(description="concise goal of the agent (details are set in the Task instance)")
97
97
  backstory: Optional[str] = Field(default=None, description="system context passed to the LLM")
98
- knowledge: Optional[str] = Field(default=None)
98
+ knowledge: Optional[str] = Field(default=None, description="external knowledge fed to the agent")
99
99
  skillsets: Optional[List[str]] = Field(default_factory=list)
100
100
 
101
101
  # tools
@@ -358,7 +358,7 @@ class Agent(ABC, BaseModel):
358
358
  return {"output": response.output if hasattr(response, "output") else response}
359
359
 
360
360
 
361
- def execute_task(self, task, context: Optional[str] = None) -> str:
361
+ def execute_task(self, task, context: Optional[str] = None, tools: Optional[str] = None) -> str:
362
362
  """
363
363
  Execute the task and return the output in string.
364
364
  To simplify, the tools are cascaded from the `tools_called` under the `task` Task instance if any.
@@ -367,7 +367,7 @@ class Agent(ABC, BaseModel):
367
367
  """
368
368
 
369
369
  task_prompt = task.prompt()
370
- if context:
370
+ if context is not task.prompt_context: # as `task.prompt()` includes adding `task.prompt_context` to the prompt.
371
371
  task_prompt += context
372
372
 
373
373
  tool_results = []
@@ -1,15 +1,6 @@
1
1
  import uuid
2
2
  from typing import Any, Dict, List, Callable, Type, Optional, get_args, get_origin
3
- from pydantic import (
4
- UUID4,
5
- InstanceOf,
6
- BaseModel,
7
- ConfigDict,
8
- Field,
9
- create_model,
10
- field_validator,
11
- model_validator,
12
- )
3
+ from pydantic import UUID4, InstanceOf, BaseModel, ConfigDict, Field, create_model, field_validator, model_validator
13
4
  from pydantic_core import PydanticCustomError
14
5
 
15
6
 
@@ -23,24 +14,16 @@ class ProductProvider(BaseModel):
23
14
 
24
15
  id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
25
16
  name: Optional[str] = Field(default=None, description="client name")
26
- region: Optional[str] = Field(
27
- default=None, description="region of client's main business operation"
28
- )
29
- data_pipeline: Optional[List[str]] = Field(
30
- default=None, description="store the data pipelines that the client is using"
31
- )
32
- destinations: Optional[List[str]] = Field(
33
- default=None,
34
- description="store the destination services that the client is using",
35
- )
17
+ region: Optional[str] = Field(default=None, description="region of client's main business operation")
18
+ data_pipeline: Optional[List[str]] = Field(default=None, description="store the data pipelines that the client is using")
19
+ destinations: Optional[List[str]] = Field(default=None,description="store the destination services that the client is using")
36
20
 
37
21
  @field_validator("id", mode="before")
38
22
  @classmethod
39
23
  def _deny_user_set_id(cls, v: Optional[UUID4]) -> None:
40
24
  if v:
41
- raise PydanticCustomError(
42
- "may_not_set_field", "This field is not to be set by the user.", {}
43
- )
25
+ raise PydanticCustomError("may_not_set_field", "This field is not to be set by the user.", {})
26
+
44
27
 
45
28
 
46
29
  class Product(BaseModel):
@@ -51,24 +34,16 @@ class Product(BaseModel):
51
34
  id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
52
35
  name: Optional[str] = Field(default=None, description="product name")
53
36
  description: Optional[str] = Field(
54
- default=None,
55
- max_length=256,
56
- description="product description scraped from landing url or client input. cascade to the agent",
57
- )
37
+ default=None,max_length=256,description="product description scraped from landing url or client input. cascade to the agent")
58
38
  provider: Optional[ProductProvider] = Field(default=None)
59
39
  audience: Optional[str] = Field(default=None, description="target audience")
60
40
  usp: Optional[str] = Field(default=None)
61
- landing_url: Optional[str] = Field(
62
- default=None, description="marketing url of the product if any"
63
- )
64
- cohort_timeframe: Optional[int] = Field(
65
- default=30, description="ideal cohort timeframe of the product in days"
66
- )
41
+ landing_url: Optional[str] = Field(default=None, description="marketing url of the product if any")
42
+ cohort_timeframe: Optional[int] = Field(default=30, description="ideal cohort timeframe of the product in days")
43
+
67
44
 
68
45
  @field_validator("id", mode="before")
69
46
  @classmethod
70
47
  def _deny_user_set_id(cls, v: Optional[UUID4]) -> None:
71
48
  if v:
72
- raise PydanticCustomError(
73
- "may_not_set_field", "This field is not to be set by the user.", {}
74
- )
49
+ raise PydanticCustomError("may_not_set_field", "This field is not to be set by the user.", {})
@@ -0,0 +1,158 @@
1
+ import uuid
2
+ from abc import ABC
3
+ from datetime import date, datetime, time, timedelta
4
+ from typing import Any, Dict, List, Union, Callable, Type, Optional, get_args, get_origin
5
+ from pydantic import UUID4, InstanceOf, BaseModel, ConfigDict, Field, create_model, field_validator, model_validator
6
+ from pydantic_core import PydanticCustomError
7
+
8
+ from versionhq.clients.product.model import Product
9
+ from versionhq.clients.customer.model import Customer
10
+ from versionhq.agent.model import Agent
11
+ from versionhq.team.model import Team
12
+
13
+
14
+ class ScoreFormat:
15
+ def __init__(self, rate: Union[float, int] = 0, weight: int = 1):
16
+ self.rate = rate
17
+ self.weight = weight
18
+ self.aggregate = rate * weight
19
+
20
+
21
+ class Score:
22
+ """
23
+ Evaluate the score on 0 (no performance) to 1 scale.
24
+ `rate`: Any float from 0.0 to 1.0 given by an agent.
25
+ `weight`: Importance of each factor to the aggregated score.
26
+ """
27
+
28
+ def __init__(
29
+ self,
30
+ brand_tone: ScoreFormat = ScoreFormat(0, 0),
31
+ audience: ScoreFormat = ScoreFormat(0, 0),
32
+ track_record: ScoreFormat = ScoreFormat(0, 0),
33
+ **kwargs: Optional[Dict[str, ScoreFormat]],
34
+ ):
35
+ self.brand_tone = brand_tone
36
+ self.audience = audience
37
+ self.track_record = track_record
38
+ self.kwargs = kwargs
39
+
40
+
41
+ def result(self) -> int:
42
+ aggregate_score = self.brand_tone.aggregate + self.audience.aggregate + self.track_record.aggregate
43
+ denominator = self.brand_tone.weight + self.audience.weight + self.track_record.weight
44
+
45
+ for k, v in self.kwargs.items():
46
+ aggregate_score += v.aggregate
47
+ denominator += v.weight
48
+
49
+ if denominator == 0:
50
+ return 0
51
+
52
+ return round(aggregate_score / denominator, 2)
53
+
54
+
55
+
56
+ class MessagingComponent(ABC, BaseModel):
57
+ layer_id: int = Field(default=0, description="add id of the layer: 0, 1, 2")
58
+ message: str = Field(default=None, max_length=1024, description="text message content to be sent")
59
+ interval: Optional[str] = Field(
60
+ default=None,description="interval to move on to the next layer. if this is the last layer, set as `None`")
61
+ score: Union[float, InstanceOf[Score]] = Field(default=None)
62
+
63
+
64
+ def store_scoring_result(self, scoring_subject: str, score: Union[int, Score, ScoreFormat] = None):
65
+ """
66
+ Set up the `score` field
67
+ """
68
+
69
+ if isinstance(score, Score):
70
+ setattr(self, "score", score)
71
+
72
+ elif isinstance(score, ScoreFormat):
73
+ score_instance = Score()
74
+ setattr(score_instance, scoring_subject, score)
75
+ setattr(self, "score", score_instance)
76
+
77
+ elif isinstance(score, int) or isinstance(score, float):
78
+ score_instance, score_format_instance = Score(), ScoreFormat(rate=score, weight=1)
79
+ setattr(score_instance, "kwargs", { scoring_subject: score_format_instance })
80
+ setattr(self, "score", score_instance)
81
+
82
+ else:
83
+ pass
84
+
85
+ return self
86
+
87
+
88
+
89
+ class MessagingWorkflow(ABC, BaseModel):
90
+ """
91
+ Store 3 layers of messaging workflow sent to `customer` on the `product`
92
+ """
93
+
94
+ _created_at: Optional[datetime]
95
+ _updated_at: Optional[datetime]
96
+
97
+ model_config = ConfigDict()
98
+
99
+ id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
100
+ components: List[MessagingComponent] = Field(default_factory=list, description="store messaging components in the workflow")
101
+
102
+ # responsible tean or agents
103
+ team: Optional[Team] = Field(default=None, description="store `Team` instance responsibile for autopiloting this workflow")
104
+ agents: Optional[List[Agent]] = Field(
105
+ default=None, description="store `Agent` instances responsible for autopiloting this workflow. if the team exsits, this field remains as `None`")
106
+
107
+ # metrics
108
+ destination: Optional[str] = Field( default=None, description="destination service to launch this workflow")
109
+ product: InstanceOf[Product] = Field(default=None)
110
+ customer: InstanceOf[Customer] = Field(default=None)
111
+
112
+ metrics: Union[List[Dict[str, Any]], List[str]] = Field(
113
+ default=None, max_length=256, description="store metrics that used to predict and track the performance of this workflow.")
114
+
115
+
116
+ @property
117
+ def name(self):
118
+ if self.customer.id:
119
+ return f"Workflow ID: {self.id} - on {self.product.id} for {self.customer.id}"
120
+ else:
121
+ return f"Workflow ID: {self.id} - on {self.product.id}"
122
+
123
+
124
+ @field_validator("id", mode="before")
125
+ @classmethod
126
+ def _deny_user_set_id(cls, v: Optional[UUID4]) -> None:
127
+ if v:
128
+ raise PydanticCustomError("may_not_set_field", "This field is not to be set by the user.", {})
129
+
130
+
131
+ @model_validator(mode="after")
132
+ def set_up_destination(self):
133
+ """
134
+ Set up the destination service when self.destination is None.
135
+ Prioritize customer's destination to the product provider's destination list.
136
+ """
137
+ if self.destination is None:
138
+ if self.customer is not None:
139
+ self.destination = self.customer.on
140
+
141
+ else:
142
+ destination_list = self.product.provider.destinations
143
+ if destination_list:
144
+ self.destination = destination_list[0]
145
+ return self
146
+
147
+
148
+ def reassign_agent_or_team(self, agents: List[Agent] = None, team: Team = None) -> None:
149
+ """
150
+ Fire unresponsible agents/team and assign new one.
151
+ """
152
+
153
+ if not agents and not team:
154
+ raise ValueError("Need to add at least 1 agent or team.")
155
+
156
+ self.agents = agents
157
+ self.team = team
158
+ self.updated_at = datetime.datetime.now()
@@ -151,9 +151,10 @@ class Task(BaseModel):
151
151
  # task setup
152
152
  context: Optional[List["Task"]] = Field(default=None, description="other tasks whose outputs should be used as context")
153
153
  tools_called: Optional[List[ToolCalled]] = Field(default_factory=list, description="tools that the agent can use for this task")
154
- take_tool_res_as_final: bool = Field(default=False,description="when set True, tools res will be stored in the `TaskOutput`")
154
+ take_tool_res_as_final: bool = Field(default=False, description="when set True, tools res will be stored in the `TaskOutput`")
155
+ allow_delegation: bool = Field(default=False, description="ask other agents for help and run the task instead")
155
156
 
156
- prompt_context: Optional[str] = None
157
+ prompt_context: Optional[str] = Field(default=None)
157
158
  async_execution: bool = Field(default=False,description="whether the task should be executed asynchronously or not")
158
159
  config: Optional[Dict[str, Any]] = Field(default=None, description="configuration for the agent")
159
160
  callback: Optional[Any] = Field(default=None, description="callback to be executed after the task is completed.")
@@ -278,17 +279,20 @@ Your outputs MUST adhere to the following format and should NOT include any irre
278
279
  When we have cusotmer/product info, add them to the prompt.
279
280
  """
280
281
 
281
- task_slices = [self.description, f"{self.output_prompt}"]
282
+ task_slices = [self.description, f"{self.output_prompt}", f"Take the following context into consideration: "]
282
283
 
283
284
  if self.context:
284
285
  context_outputs = "\n".join([task.output.context_prompting() if hasattr(task, "output") else "" for task in self.context])
285
- task_slices.insert(1, f"Take the following context into consideration: {context_outputs}")
286
+ task_slices.insert(len(task_slices), context_outputs)
286
287
 
287
288
  if customer:
288
- task_slices.insert(1, f"customer overview: {customer}")
289
+ task_slices.insert(len(task_slices), f"Customer overview: {customer}")
289
290
 
290
291
  if product_overview:
291
- task_slices.insert(1, f"Product overview: {product_overview}")
292
+ task_slices.insert(len(task_slices), f"Product overview: {product_overview}")
293
+
294
+ if self.prompt_context:
295
+ task_slices.insert(len(task_slices), self.prompt_context)
292
296
 
293
297
  return "\n".join(task_slices)
294
298
 
@@ -367,7 +371,7 @@ Your outputs MUST adhere to the following format and should NOT include any irre
367
371
 
368
372
 
369
373
  # task execution
370
- def execute_sync(self, agent, context: Optional[str] = None) -> TaskOutput:
374
+ def execute_sync(self, agent, context: Optional[str] = None, callback_kwargs: Dict[str, Any] = None) -> TaskOutput:
371
375
  """
372
376
  Execute the task synchronously.
373
377
  When the task has context, make sure we have executed all the tasks in the context first.
@@ -376,12 +380,12 @@ Your outputs MUST adhere to the following format and should NOT include any irre
376
380
  if self.context:
377
381
  for task in self.context:
378
382
  if task.output is None:
379
- task._execute_core(agent, context)
383
+ task._execute_core(agent, context, callback_kwargs)
380
384
 
381
385
  return self._execute_core(agent, context)
382
386
 
383
387
 
384
- def execute_async(self, agent, context: Optional[str] = None) -> Future[TaskOutput]:
388
+ def execute_async(self, agent, context: Optional[str] = None, callback_kwargs: Dict[str, Any] = None) -> Future[TaskOutput]:
385
389
  """
386
390
  Execute the task asynchronously.
387
391
  """
@@ -390,26 +394,36 @@ Your outputs MUST adhere to the following format and should NOT include any irre
390
394
  threading.Thread(
391
395
  daemon=True,
392
396
  target=self._execute_task_async,
393
- args=(agent, context, future),
397
+ args=(agent, context, callback_kwargs, future),
394
398
  ).start()
395
399
  return future
396
400
 
397
401
 
398
- def _execute_task_async(self, agent, context: Optional[str], future: Future[TaskOutput]) -> None:
399
- """Execute the task asynchronously with context handling."""
400
- result = self._execute_core(agent, context)
402
+ def _execute_task_async(self, agent, context: Optional[str], callback_kwargs: Dict[str, Any], future: Future[TaskOutput]) -> None:
403
+ """
404
+ Execute the task asynchronously with context handling.
405
+ """
406
+
407
+ result = self._execute_core(agent, context, callback_kwargs)
401
408
  future.set_result(result)
402
409
 
403
410
 
404
- def _execute_core(self, agent, context: Optional[str]) -> TaskOutput:
411
+ def _execute_core(self, agent, context: Optional[str], callback_kwargs: Optional[Dict[str, Any]] = None) -> TaskOutput:
405
412
  """
406
413
  Run the core execution logic of the task.
414
+ To speed up the process, when the format is not expected to return, we will skip the conversion process.
407
415
  """
416
+ from versionhq.agent.model import Agent
408
417
 
409
418
  self.prompt_context = context
419
+
420
+ if self.allow_delegation:
421
+ agent = Agent(role="delegated_agent", goal=agent.goal, llm=agent.llm) #! REFINEME - logic to pick up the high performer
422
+ self.delegations += 1
423
+
410
424
  output_raw = agent.execute_task(task=self, context=context)
411
- output_json_dict = self.create_json_output(raw_result=output_raw)
412
- output_pydantic = self.create_pydantic_output(output_json_dict=output_json_dict)
425
+ output_json_dict = self.create_json_output(raw_result=output_raw) if self.expected_output_json is True else None
426
+ output_pydantic = self.create_pydantic_output(output_json_dict=output_json_dict) if self.expected_output_pydantic else None
413
427
  task_output = TaskOutput(
414
428
  task_id=self.id,
415
429
  raw=output_raw,
@@ -422,7 +436,10 @@ Your outputs MUST adhere to the following format and should NOT include any irre
422
436
  # self._set_end_execution_time(start_time)
423
437
 
424
438
  if self.callback:
425
- self.callback(self.output)
439
+ if isinstance(self.callback, Callable):
440
+ self.callback(**callback_kwargs)
441
+ else:
442
+ self.callback(self.output)
426
443
 
427
444
  # if self._execution_span:
428
445
  # # self._telemetry.task_ended(self._execution_span, self, agent.team)
@@ -7,17 +7,9 @@ from dotenv import load_dotenv
7
7
  from concurrent.futures import Future
8
8
  from hashlib import md5
9
9
  from typing import Any, Dict, List, TYPE_CHECKING, Callable, Optional, Tuple, Union
10
- from pydantic import (
11
- UUID4,
12
- InstanceOf,
13
- Json,
14
- BaseModel,
15
- Field,
16
- PrivateAttr,
17
- field_validator,
18
- model_validator,
19
- )
20
- from pydantic_core import PydanticCustomError
10
+ from pydantic import UUID4, InstanceOf, Json, BaseModel, Field, PrivateAttr, field_validator, model_validator
11
+ from pydantic._internal._generate_schema import GenerateSchema
12
+ from pydantic_core import PydanticCustomError, core_schema
21
13
 
22
14
  from versionhq.agent.model import Agent
23
15
  from versionhq.task.model import Task, TaskOutput, ConditionalTask, TaskOutputFormat
@@ -27,9 +19,6 @@ from versionhq._utils.logger import Logger
27
19
  from versionhq._utils.usage_metrics import UsageMetrics
28
20
 
29
21
 
30
- from pydantic._internal._generate_schema import GenerateSchema
31
- from pydantic_core import core_schema
32
-
33
22
  initial_match_type = GenerateSchema.match_type
34
23
 
35
24
 
@@ -301,10 +290,8 @@ class Team(BaseModel):
301
290
 
302
291
  # task execution
303
292
  def _process_async_tasks(
304
- self,
305
- futures: List[Tuple[Task, Future[TaskOutput], int]],
306
- was_replayed: bool = False,
307
- ) -> List[TaskOutput]:
293
+ self, futures: List[Tuple[Task, Future[TaskOutput], int]], was_replayed: bool = False
294
+ ) -> List[TaskOutput]:
308
295
  task_outputs: List[TaskOutput] = []
309
296
  for future_task, future, task_index in futures:
310
297
  task_output = future.result()
@@ -315,6 +302,7 @@ class Team(BaseModel):
315
302
  )
316
303
  return task_outputs
317
304
 
305
+
318
306
  def _handle_conditional_task(
319
307
  self,
320
308
  task: ConditionalTask,
@@ -323,6 +311,7 @@ class Team(BaseModel):
323
311
  task_index: int,
324
312
  was_replayed: bool,
325
313
  ) -> Optional[TaskOutput]:
314
+
326
315
  if futures:
327
316
  task_outputs = self._process_async_tasks(futures, was_replayed)
328
317
  futures.clear()
@@ -347,6 +336,7 @@ class Team(BaseModel):
347
336
  Take the output of the first task or the lead task output as the team output `raw` value.
348
337
  Note that `tasks` are already sorted by the importance.
349
338
  """
339
+
350
340
  if len(task_outputs) < 1:
351
341
  raise ValueError("Something went wrong. Kickoff should return only one task output.")
352
342
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: versionhq
3
- Version: 1.1.7.1
3
+ Version: 1.1.7.3
4
4
  Summary: LLM orchestration frameworks for model-agnostic AI agents that handle complex outbound workflows
5
5
  Author-email: Kuriko Iwai <kuriko@versi0n.io>
6
6
  License: MIT License
@@ -142,7 +142,7 @@ Multiple `agents` can form a `team` to complete complex tasks together.
142
142
  role="demo",
143
143
  goal="amazing project goal",
144
144
  skillsets=["skill_1", "skill_2", ],
145
- llm="llm-of-choice"
145
+ llm="llm-of-your-choice"
146
146
  )
147
147
 
148
148
  task = Task(
@@ -153,20 +153,19 @@ Multiple `agents` can form a `team` to complete complex tasks together.
153
153
  ResponseField(title="test1", type=str, required=True),
154
154
  ResponseField(title="test2", type=list, required=True),
155
155
  ],
156
- context=["amazing context",],
157
- tools=["amazing tool"],
158
156
  callback=None,
159
157
  )
160
-
161
- res = task.execute_sync(agent=agent)
162
-
158
+ res = task.execute_sync(agent=agent, context="amazing context to consider.")
163
159
  return res.to_dict()
164
-
165
160
  ```
166
161
 
167
- For more details:
162
+ This will return a dictionary with keys defined in the `ResponseField`.
163
+
164
+ ```
165
+ { test1: "answer1", "test2": ["answer2-1", "answer2-2", "answer2-3",] }
166
+ ```
168
167
 
169
- [PyPi package](https://pypi.org/project/versionhq/)
168
+ For more info: [PyPI package](https://pypi.org/project/versionhq/)
170
169
 
171
170
  <hr />
172
171
 
@@ -57,6 +57,7 @@ tests/conftest.py
57
57
  tests/agent/__init__.py
58
58
  tests/agent/agent_test.py
59
59
  tests/cli/__init__.py
60
+ tests/clients/workflow_test.py
60
61
  tests/task/__init__.py
61
62
  tests/task/task_test.py
62
63
  tests/team/__init__.py
@@ -0,0 +1,21 @@
1
+ import os
2
+ import pytest
3
+ from versionhq.agent.model import Agent
4
+ from versionhq.llm.model import LLM
5
+ from versionhq.clients.workflow.model import Score, ScoreFormat, MessagingWorkflow, MessagingComponent
6
+
7
+ MODEL_NAME = os.environ.get("LITELLM_MODEL_NAME", "gpt-3.5-turbo")
8
+ LITELLM_API_KEY = os.environ.get("LITELLM_API_KEY")
9
+
10
+
11
+ def test_store_scores():
12
+ """
13
+ Test if the final result will be calcurated using a random subject
14
+ """
15
+
16
+ messaging_component = MessagingComponent(message="demo")
17
+ score_raw = 15
18
+ messaging_component.store_scoring_result("demo", score=score_raw)
19
+
20
+ assert messaging_component.score is not None
21
+ assert messaging_component.score.result() is not None