versionhq 1.1.4.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- versionhq/__init__.py +33 -0
- versionhq/_utils/__init__.py +0 -0
- versionhq/_utils/cache_handler.py +13 -0
- versionhq/_utils/i18n.py +48 -0
- versionhq/_utils/logger.py +57 -0
- versionhq/_utils/process_config.py +28 -0
- versionhq/_utils/rpm_controller.py +73 -0
- versionhq/_utils/usage_metrics.py +31 -0
- versionhq/agent/__init__.py +0 -0
- versionhq/agent/model.py +472 -0
- versionhq/agent/parser.py +148 -0
- versionhq/cli/__init__.py +0 -0
- versionhq/clients/__init__.py +0 -0
- versionhq/clients/customer/__init__.py +0 -0
- versionhq/clients/customer/model.py +57 -0
- versionhq/clients/product/__init__.py +0 -0
- versionhq/clients/product/model.py +74 -0
- versionhq/clients/workflow/__init__.py +0 -0
- versionhq/clients/workflow/model.py +174 -0
- versionhq/llm/__init__.py +0 -0
- versionhq/llm/llm_vars.py +173 -0
- versionhq/llm/model.py +245 -0
- versionhq/task/__init__.py +9 -0
- versionhq/task/formatter.py +22 -0
- versionhq/task/model.py +430 -0
- versionhq/team/__init__.py +0 -0
- versionhq/team/model.py +585 -0
- versionhq/team/team_planner.py +55 -0
- versionhq/tool/__init__.py +0 -0
- versionhq/tool/composio.py +102 -0
- versionhq/tool/decorator.py +40 -0
- versionhq/tool/model.py +220 -0
- versionhq/tool/tool_handler.py +47 -0
- versionhq-1.1.4.4.dist-info/LICENSE +21 -0
- versionhq-1.1.4.4.dist-info/METADATA +353 -0
- versionhq-1.1.4.4.dist-info/RECORD +38 -0
- versionhq-1.1.4.4.dist-info/WHEEL +5 -0
- versionhq-1.1.4.4.dist-info/top_level.txt +1 -0
@@ -0,0 +1,22 @@
|
|
1
|
+
from typing import List
|
2
|
+
from versionhq.task.model import Task, TaskOutput
|
3
|
+
|
4
|
+
|
5
|
+
def create_raw_outputs(tasks: List[Task], task_outputs: List[TaskOutput]) -> str:
|
6
|
+
"""
|
7
|
+
Generate string context from the tasks.
|
8
|
+
"""
|
9
|
+
|
10
|
+
context = ""
|
11
|
+
if len(task_outputs) > 0:
|
12
|
+
dividers = "\n\n----------\n\n"
|
13
|
+
context = dividers.join(output.raw for output in task_outputs)
|
14
|
+
|
15
|
+
else:
|
16
|
+
task_outputs_from_task = [
|
17
|
+
task.output for task in tasks if task.output is not None
|
18
|
+
]
|
19
|
+
dividers = "\n\n----------\n\n"
|
20
|
+
context = dividers.join(output.raw for output in task_outputs_from_task)
|
21
|
+
|
22
|
+
return context
|
versionhq/task/model.py
ADDED
@@ -0,0 +1,430 @@
|
|
1
|
+
import json
|
2
|
+
import threading
|
3
|
+
import uuid
|
4
|
+
from concurrent.futures import Future
|
5
|
+
from hashlib import md5
|
6
|
+
from typing import Any, Dict, List, Set, Optional, Tuple, Callable
|
7
|
+
|
8
|
+
from pydantic import (
|
9
|
+
UUID4,
|
10
|
+
BaseModel,
|
11
|
+
Field,
|
12
|
+
PrivateAttr,
|
13
|
+
field_validator,
|
14
|
+
model_validator,
|
15
|
+
)
|
16
|
+
from pydantic_core import PydanticCustomError
|
17
|
+
|
18
|
+
from versionhq._utils.process_config import process_config
|
19
|
+
from versionhq.task import TaskOutputFormat
|
20
|
+
from versionhq.tool.model import Tool, ToolCalled
|
21
|
+
|
22
|
+
|
23
|
+
class ResponseField(BaseModel):
|
24
|
+
"""
|
25
|
+
Field class to use in the response schema for the JSON response.
|
26
|
+
"""
|
27
|
+
|
28
|
+
title: str = Field(default=None)
|
29
|
+
type: str = Field(default=None)
|
30
|
+
required: bool = Field(default=True)
|
31
|
+
|
32
|
+
|
33
|
+
class TaskOutput(BaseModel):
|
34
|
+
"""
|
35
|
+
Store the final output of the task in TaskOutput class.
|
36
|
+
Depending on the task output format, use `raw`, `pydantic`, `json_dict` accordingly.
|
37
|
+
"""
|
38
|
+
|
39
|
+
class AgentOutput(BaseModel):
|
40
|
+
"""
|
41
|
+
Keep adding agents' learning and recommendation and store it in `pydantic` field of `TaskOutput` class.
|
42
|
+
Since the TaskOutput class has `agent` field, we don't add any info on the agent that handled the task.
|
43
|
+
"""
|
44
|
+
|
45
|
+
customer_id: str = Field(
|
46
|
+
default=None, max_length=126, description="customer uuid"
|
47
|
+
)
|
48
|
+
customer_analysis: str = Field(
|
49
|
+
default=None, max_length=256, description="analysis of the customer"
|
50
|
+
)
|
51
|
+
business_overview: str = Field(
|
52
|
+
default=None,
|
53
|
+
max_length=256,
|
54
|
+
description="analysis of the client's business",
|
55
|
+
)
|
56
|
+
cohort_timeframe: int = Field(
|
57
|
+
default=None,
|
58
|
+
max_length=256,
|
59
|
+
description="Suitable cohort timeframe in days",
|
60
|
+
)
|
61
|
+
kpi_metrics: List[str] = Field(
|
62
|
+
default=list, description="Ideal KPIs to be tracked"
|
63
|
+
)
|
64
|
+
assumptions: List[Dict[str, Any]] = Field(
|
65
|
+
default=list, description="assumptions to test"
|
66
|
+
)
|
67
|
+
|
68
|
+
task_id: UUID4 = Field(
|
69
|
+
default_factory=uuid.uuid4, frozen=True, description="store Task ID"
|
70
|
+
)
|
71
|
+
raw: str = Field(default="", description="Raw output of the task")
|
72
|
+
pydantic: Optional[BaseModel | AgentOutput] = Field(
|
73
|
+
default=None, description="Pydantic output of task"
|
74
|
+
)
|
75
|
+
json_dict: Optional[Dict[str, Any]] = Field(
|
76
|
+
default=None, description="JSON dictionary of task"
|
77
|
+
)
|
78
|
+
|
79
|
+
def __str__(self) -> str:
|
80
|
+
return (
|
81
|
+
str(self.pydantic)
|
82
|
+
if self.pydantic
|
83
|
+
else str(self.json_dict) if self.json_dict else self.raw
|
84
|
+
)
|
85
|
+
|
86
|
+
@property
|
87
|
+
def json(self) -> Optional[str]:
|
88
|
+
if self.output_format != TaskOutputFormat.JSON:
|
89
|
+
raise ValueError(
|
90
|
+
"""
|
91
|
+
Invalid output format requested.
|
92
|
+
If you would like to access the JSON output,
|
93
|
+
pleae make sure to set the output_json property for the task
|
94
|
+
"""
|
95
|
+
)
|
96
|
+
return json.dumps(self.json_dict)
|
97
|
+
|
98
|
+
def to_dict(self) -> Dict[str, Any]:
|
99
|
+
"""Convert json_output and pydantic_output to a dictionary."""
|
100
|
+
output_dict = {}
|
101
|
+
if self.json_dict:
|
102
|
+
output_dict.update(self.json_dict)
|
103
|
+
elif self.pydantic:
|
104
|
+
output_dict.update(self.pydantic.model_dump())
|
105
|
+
return output_dict
|
106
|
+
|
107
|
+
|
108
|
+
class Task(BaseModel):
|
109
|
+
"""
|
110
|
+
Task to be executed by the agent or the team.
|
111
|
+
Each task must have a description and at least one expected output format either Pydantic, Raw, or JSON, with necessary fields in ResponseField.
|
112
|
+
Then output will be stored in TaskOutput class.
|
113
|
+
"""
|
114
|
+
|
115
|
+
__hash__ = object.__hash__
|
116
|
+
|
117
|
+
id: UUID4 = Field(
|
118
|
+
default_factory=uuid.uuid4,
|
119
|
+
frozen=True,
|
120
|
+
description="unique identifier for the object, not set by user",
|
121
|
+
)
|
122
|
+
name: Optional[str] = Field(default=None)
|
123
|
+
description: str = Field(description="Description of the actual task")
|
124
|
+
_original_description: str = PrivateAttr(default=None)
|
125
|
+
|
126
|
+
# output
|
127
|
+
expected_output_raw: bool = Field(default=False)
|
128
|
+
expected_output_json: bool = Field(default=True)
|
129
|
+
expected_output_pydantic: bool = Field(default=False)
|
130
|
+
output_field_list: Optional[List[ResponseField]] = Field(
|
131
|
+
default=[
|
132
|
+
ResponseField(title="output", type="str", required=True),
|
133
|
+
]
|
134
|
+
)
|
135
|
+
output: Optional[TaskOutput] = Field(
|
136
|
+
default=None, description="store the final task output in TaskOutput class"
|
137
|
+
)
|
138
|
+
|
139
|
+
# task setup
|
140
|
+
context: Optional[List["Task"]] = Field(
|
141
|
+
default=None, description="other tasks whose outputs should be used as context"
|
142
|
+
)
|
143
|
+
tools_called: Optional[List[ToolCalled]] = Field(
|
144
|
+
default_factory=list, description="tools that the agent can use for this task"
|
145
|
+
)
|
146
|
+
take_tool_res_as_final: bool = Field(
|
147
|
+
default=False,
|
148
|
+
description="when set True, tools res will be stored in the `TaskOutput`",
|
149
|
+
)
|
150
|
+
|
151
|
+
prompt_context: Optional[str] = None
|
152
|
+
async_execution: bool = Field(
|
153
|
+
default=False,
|
154
|
+
description="whether the task should be executed asynchronously or not",
|
155
|
+
)
|
156
|
+
config: Optional[Dict[str, Any]] = Field(
|
157
|
+
default=None, description="configuration for the agent"
|
158
|
+
)
|
159
|
+
callback: Optional[Any] = Field(
|
160
|
+
default=None, description="callback to be executed after the task is completed."
|
161
|
+
)
|
162
|
+
|
163
|
+
# recording
|
164
|
+
processed_by_agents: Set[str] = Field(default_factory=set)
|
165
|
+
used_tools: int = 0
|
166
|
+
tools_errors: int = 0
|
167
|
+
delegations: int = 0
|
168
|
+
|
169
|
+
@property
|
170
|
+
def output_prompt(self):
|
171
|
+
"""
|
172
|
+
Draft prompts on the output format by converting `output_field_list` to dictionary.
|
173
|
+
"""
|
174
|
+
|
175
|
+
output_prompt, output_dict = "", dict()
|
176
|
+
for item in self.output_field_list:
|
177
|
+
output_dict[item.title] = f"your answer in {item.type}"
|
178
|
+
|
179
|
+
output_prompt = f"""
|
180
|
+
The output formats include the following format:
|
181
|
+
{output_dict}
|
182
|
+
"""
|
183
|
+
return output_prompt
|
184
|
+
|
185
|
+
@property
|
186
|
+
def expected_output_formats(self) -> List[TaskOutputFormat]:
|
187
|
+
outputs = []
|
188
|
+
if self.expected_output_json:
|
189
|
+
outputs.append(TaskOutputFormat.JSON)
|
190
|
+
if self.expected_output_pydantic:
|
191
|
+
outputs.append(TaskOutputFormat.PYDANTIC)
|
192
|
+
if self.expected_output_raw:
|
193
|
+
outputs.append(TaskOutputFormat.RAW)
|
194
|
+
return outputs
|
195
|
+
|
196
|
+
@property
|
197
|
+
def key(self) -> str:
|
198
|
+
output_format = (
|
199
|
+
TaskOutputFormat.JSON
|
200
|
+
if self.expected_output_json == True
|
201
|
+
else (
|
202
|
+
TaskOutputFormat.PYDANTIC
|
203
|
+
if self.expected_output_pydantic == True
|
204
|
+
else TaskOutputFormat.RAW
|
205
|
+
)
|
206
|
+
)
|
207
|
+
source = [self.description, output_format]
|
208
|
+
return md5("|".join(source).encode(), usedforsecurity=False).hexdigest()
|
209
|
+
|
210
|
+
@property
|
211
|
+
def summary(self) -> str:
|
212
|
+
return f"""
|
213
|
+
Task: {self.id} - {self.description}
|
214
|
+
"task_description": {self.description}
|
215
|
+
"task_expected_output": {self.output_prompt}
|
216
|
+
"task_tools": {", ".join([tool_called.tool.name for tool_called in self.tools_called])}
|
217
|
+
"""
|
218
|
+
|
219
|
+
# validators
|
220
|
+
@model_validator(mode="before")
|
221
|
+
@classmethod
|
222
|
+
def process_model_config(cls, values: Dict[str, Any]):
|
223
|
+
return process_config(values_to_update=values, model_class=cls)
|
224
|
+
|
225
|
+
@field_validator("id", mode="before")
|
226
|
+
@classmethod
|
227
|
+
def _deny_user_set_id(cls, v: Optional[UUID4]) -> None:
|
228
|
+
if v:
|
229
|
+
raise PydanticCustomError(
|
230
|
+
"may_not_set_field", "This field is not to be set by the user.", {}
|
231
|
+
)
|
232
|
+
|
233
|
+
@model_validator(mode="after")
|
234
|
+
def validate_required_fields(self):
|
235
|
+
required_fields = [
|
236
|
+
"description",
|
237
|
+
]
|
238
|
+
for field in required_fields:
|
239
|
+
if getattr(self, field) is None:
|
240
|
+
raise ValueError(
|
241
|
+
f"{field} must be provided either directly or through config"
|
242
|
+
)
|
243
|
+
return self
|
244
|
+
|
245
|
+
@model_validator(mode="after")
|
246
|
+
def set_attributes_based_on_config(self) -> "Task":
|
247
|
+
"""
|
248
|
+
Set attributes based on the agent configuration.
|
249
|
+
"""
|
250
|
+
|
251
|
+
if self.config:
|
252
|
+
for key, value in self.config.items():
|
253
|
+
setattr(self, key, value)
|
254
|
+
return self
|
255
|
+
|
256
|
+
@model_validator(mode="after")
|
257
|
+
def validate_output_format(self):
|
258
|
+
if (
|
259
|
+
self.expected_output_json == False
|
260
|
+
and self.expected_output_pydantic == False
|
261
|
+
and self.expeceted_output_raw == False
|
262
|
+
):
|
263
|
+
raise PydanticCustomError("Need to choose at least one output format.")
|
264
|
+
return self
|
265
|
+
|
266
|
+
@model_validator(mode="after")
|
267
|
+
def backup_description(self):
|
268
|
+
if self._original_description == None:
|
269
|
+
self._original_description = self.description
|
270
|
+
return self
|
271
|
+
|
272
|
+
def prompt(self, customer=str | None, product_overview=str | None) -> str:
|
273
|
+
"""
|
274
|
+
Return the prompt of the task.
|
275
|
+
"""
|
276
|
+
|
277
|
+
task_slices = [
|
278
|
+
self.description,
|
279
|
+
f"Customer overview: {customer}",
|
280
|
+
f"Product overview: {product_overview}",
|
281
|
+
f"Follow the output formats decribled below. Your response should NOT contain any other element from the following formats.: {self.output_prompt}",
|
282
|
+
]
|
283
|
+
return "\n".join(task_slices)
|
284
|
+
|
285
|
+
def _export_output(
|
286
|
+
self, result: Any
|
287
|
+
) -> Tuple[Optional[BaseModel], Optional[Dict[str, Any]]]:
|
288
|
+
output_pydantic: Optional[BaseModel] = None
|
289
|
+
output_json: Optional[Dict[str, Any]] = None
|
290
|
+
dict_output = None
|
291
|
+
|
292
|
+
if isinstance(result, str):
|
293
|
+
try:
|
294
|
+
dict_output = json.loads(result)
|
295
|
+
except json.JSONDecodeError:
|
296
|
+
try:
|
297
|
+
dict_output = eval(result)
|
298
|
+
except:
|
299
|
+
try:
|
300
|
+
import ast
|
301
|
+
|
302
|
+
dict_output = ast.literal_eval(result)
|
303
|
+
except:
|
304
|
+
dict_output = None
|
305
|
+
|
306
|
+
if self.expected_output_json:
|
307
|
+
if isinstance(result, dict):
|
308
|
+
output_json = result
|
309
|
+
elif isinstance(result, BaseModel):
|
310
|
+
output_json = result.model_dump()
|
311
|
+
else:
|
312
|
+
output_json = dict_output
|
313
|
+
|
314
|
+
if self.expected_output_pydantic:
|
315
|
+
if isinstance(result, BaseModel):
|
316
|
+
output_pydantic = result
|
317
|
+
elif isinstance(result, dict):
|
318
|
+
output_json = result
|
319
|
+
else:
|
320
|
+
output_pydantic = None
|
321
|
+
|
322
|
+
return output_json, output_pydantic
|
323
|
+
|
324
|
+
def _get_output_format(self) -> TaskOutputFormat:
|
325
|
+
if self.output_json == True:
|
326
|
+
return TaskOutputFormat.JSON
|
327
|
+
if self.output_pydantic == True:
|
328
|
+
return TaskOutputFormat.PYDANTIC
|
329
|
+
return TaskOutputFormat.RAW
|
330
|
+
|
331
|
+
def interpolate_inputs(self, inputs: Dict[str, Any]) -> None:
|
332
|
+
"""
|
333
|
+
Interpolate inputs into the task description and expected output.
|
334
|
+
"""
|
335
|
+
if inputs:
|
336
|
+
self.description = self._original_description.format(**inputs)
|
337
|
+
# self.expected_output = self._original_expected_output.format(**inputs)
|
338
|
+
|
339
|
+
# task execution
|
340
|
+
def execute_sync(self, agent, context: Optional[str] = None) -> TaskOutput:
|
341
|
+
"""
|
342
|
+
Execute the task synchronously.
|
343
|
+
"""
|
344
|
+
return self._execute_core(agent, context)
|
345
|
+
|
346
|
+
def execute_async(self, agent, context: Optional[str] = None) -> Future[TaskOutput]:
|
347
|
+
"""
|
348
|
+
Execute the task asynchronously.
|
349
|
+
"""
|
350
|
+
|
351
|
+
future: Future[TaskOutput] = Future()
|
352
|
+
threading.Thread(
|
353
|
+
daemon=True,
|
354
|
+
target=self._execute_task_async,
|
355
|
+
args=(agent, context, future),
|
356
|
+
).start()
|
357
|
+
return future
|
358
|
+
|
359
|
+
def _execute_task_async(
|
360
|
+
self, agent, context: Optional[str], future: Future[TaskOutput]
|
361
|
+
) -> None:
|
362
|
+
"""Execute the task asynchronously with context handling."""
|
363
|
+
result = self._execute_core(agent, context)
|
364
|
+
future.set_result(result)
|
365
|
+
|
366
|
+
def _execute_core(self, agent, context: Optional[str]) -> TaskOutput:
|
367
|
+
"""
|
368
|
+
Run the core execution logic of the task.
|
369
|
+
"""
|
370
|
+
|
371
|
+
self.prompt_context = context
|
372
|
+
result = agent.execute_task(task=self, context=context)
|
373
|
+
output_json, output_pydantic = self._export_output(result)
|
374
|
+
task_output = TaskOutput(
|
375
|
+
task_id=self.id,
|
376
|
+
raw=result,
|
377
|
+
pydantic=output_pydantic,
|
378
|
+
json_dict=output_json,
|
379
|
+
)
|
380
|
+
self.output = task_output
|
381
|
+
self.processed_by_agents.add(agent.role)
|
382
|
+
|
383
|
+
# self._set_end_execution_time(start_time)
|
384
|
+
|
385
|
+
if self.callback:
|
386
|
+
self.callback(self.output)
|
387
|
+
|
388
|
+
# if self._execution_span:
|
389
|
+
# # self._telemetry.task_ended(self._execution_span, self, agent.team)
|
390
|
+
# self._execution_span = None
|
391
|
+
|
392
|
+
# if self.output_file:
|
393
|
+
# content = (
|
394
|
+
# json_output
|
395
|
+
# if json_output
|
396
|
+
# else pydantic_output.model_dump_json() if pydantic_output else result
|
397
|
+
# )
|
398
|
+
# self._save_file(content)
|
399
|
+
|
400
|
+
return task_output
|
401
|
+
|
402
|
+
|
403
|
+
class ConditionalTask(Task):
|
404
|
+
"""
|
405
|
+
A task that can be conditionally executed based on the output of another task.
|
406
|
+
Use this with `Team`.
|
407
|
+
"""
|
408
|
+
|
409
|
+
condition: Callable[[TaskOutput], bool] = Field(
|
410
|
+
default=None,
|
411
|
+
description="max. number of retries for an agent to execute a task when an error occurs.",
|
412
|
+
)
|
413
|
+
|
414
|
+
def __init__(
|
415
|
+
self,
|
416
|
+
condition: Callable[[Any], bool],
|
417
|
+
**kwargs,
|
418
|
+
):
|
419
|
+
super().__init__(**kwargs)
|
420
|
+
self.condition = condition
|
421
|
+
|
422
|
+
def should_execute(self, context: TaskOutput) -> bool:
|
423
|
+
"""
|
424
|
+
Decide whether the conditional task should be executed based on the provided context.
|
425
|
+
Return `True` if it should be executed.
|
426
|
+
"""
|
427
|
+
return self.condition(context)
|
428
|
+
|
429
|
+
def get_skipped_task_output(self):
|
430
|
+
return TaskOutput(task_id=self.id, raw="", pydantic=None, json_dict=None)
|
File without changes
|