versionhq 1.2.4.3__py3-none-any.whl → 1.2.4.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- versionhq/__init__.py +12 -3
- versionhq/_prompt/auto_feedback.py +2 -2
- versionhq/_prompt/model.py +24 -29
- versionhq/_utils/__init__.py +2 -0
- versionhq/_utils/convert_img_url.py +15 -0
- versionhq/_utils/is_valid_enum.py +25 -0
- versionhq/_utils/llm_as_a_judge.py +0 -1
- versionhq/_utils/usage_metrics.py +35 -14
- versionhq/agent/inhouse_agents.py +2 -2
- versionhq/agent/model.py +100 -29
- versionhq/agent_network/formation.py +6 -12
- versionhq/agent_network/model.py +4 -5
- versionhq/clients/customer/__init__.py +2 -2
- versionhq/clients/product/model.py +4 -4
- versionhq/clients/workflow/model.py +1 -1
- versionhq/llm/llm_vars.py +7 -6
- versionhq/llm/model.py +3 -1
- versionhq/storage/task_output_storage.py +2 -2
- versionhq/task/model.py +112 -100
- versionhq/task_graph/draft.py +4 -4
- versionhq/task_graph/model.py +34 -30
- versionhq/tool/composio/__init__.py +0 -0
- versionhq/tool/{composio_tool.py → composio/model.py} +4 -5
- versionhq/tool/gpt/__init__.py +6 -0
- versionhq/tool/gpt/_enum.py +28 -0
- versionhq/tool/gpt/cup.py +145 -0
- versionhq/tool/gpt/file_search.py +163 -0
- versionhq/tool/gpt/web_search.py +89 -0
- {versionhq-1.2.4.3.dist-info → versionhq-1.2.4.6.dist-info}/METADATA +4 -4
- {versionhq-1.2.4.3.dist-info → versionhq-1.2.4.6.dist-info}/RECORD +34 -26
- /versionhq/tool/{composio_tool_vars.py → composio/params.py} +0 -0
- {versionhq-1.2.4.3.dist-info → versionhq-1.2.4.6.dist-info}/LICENSE +0 -0
- {versionhq-1.2.4.3.dist-info → versionhq-1.2.4.6.dist-info}/WHEEL +0 -0
- {versionhq-1.2.4.3.dist-info → versionhq-1.2.4.6.dist-info}/top_level.txt +0 -0
@@ -1,11 +1,11 @@
|
|
1
1
|
import uuid
|
2
|
-
from abc import ABC
|
3
|
-
from typing import
|
2
|
+
from abc import ABC
|
3
|
+
from typing import Optional, List
|
4
4
|
|
5
|
-
from pydantic import UUID4,
|
5
|
+
from pydantic import UUID4, BaseModel, Field, field_validator, model_validator
|
6
6
|
from pydantic_core import PydanticCustomError
|
7
7
|
|
8
|
-
from versionhq.tool.
|
8
|
+
from versionhq.tool.composio.params import ComposioAppName
|
9
9
|
|
10
10
|
|
11
11
|
class ProductProvider(ABC, BaseModel):
|
@@ -10,7 +10,7 @@ from versionhq.agent.model import Agent
|
|
10
10
|
from versionhq.agent_network.model import AgentNetwork
|
11
11
|
from versionhq.clients.product.model import Product
|
12
12
|
from versionhq.clients.customer.model import Customer
|
13
|
-
from versionhq.tool.
|
13
|
+
from versionhq.tool.composio.params import ComposioAppName
|
14
14
|
|
15
15
|
|
16
16
|
class MessagingComponent(ABC, BaseModel):
|
versionhq/llm/llm_vars.py
CHANGED
@@ -27,8 +27,9 @@ MODELS = {
|
|
27
27
|
"o1-preview",
|
28
28
|
],
|
29
29
|
"gemini": [
|
30
|
-
"gemini/gemini-
|
31
|
-
"gemini/gemini-
|
30
|
+
"gemini/gemini-2.0-flash",
|
31
|
+
"gemini/gemini-2.0-flash-thinking-exp",
|
32
|
+
"gemini/gemini-2.0-flash-lite-preview-02-05",
|
32
33
|
"gemini/gemini-2.0-flash-exp",
|
33
34
|
],
|
34
35
|
"anthropic": [
|
@@ -75,10 +76,10 @@ ENV_VARS = {
|
|
75
76
|
"huggingface": ["HUGGINGFACE_API_KEY", ],
|
76
77
|
"bedrock": ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION_NAME"],
|
77
78
|
"sagemaker": ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION_NAME"],
|
79
|
+
"azure_ai": ["AZURE_AI_API_KEY", "AZURE_AI_API_BASE"],
|
78
80
|
}
|
79
81
|
|
80
82
|
|
81
|
-
|
82
83
|
"""
|
83
84
|
Max input token size by the model.
|
84
85
|
"""
|
@@ -90,9 +91,10 @@ LLM_CONTEXT_WINDOW_SIZES = {
|
|
90
91
|
"o1-preview": 128000,
|
91
92
|
"o1-mini": 128000,
|
92
93
|
|
93
|
-
"gemini/gemini-1.5-flash": 1048576,
|
94
|
-
"gemini/gemini-1.5-pro": 2097152,
|
95
94
|
"gemini/gemini-2.0-flash-exp": 1048576,
|
95
|
+
"gemini/gemini-2.0-flash": 1048576,
|
96
|
+
"gemini/gemini-2.0-flash-thinking-exp": 1048576,
|
97
|
+
"gemini/gemini-2.0-flash-lite-preview-02-05": 1048576,
|
96
98
|
|
97
99
|
"claude-3-7-sonnet-latest": 200000,
|
98
100
|
"claude-3-5-haiku-latest": 200000,
|
@@ -128,7 +130,6 @@ LLM_CONTEXT_WINDOW_SIZES = {
|
|
128
130
|
"""
|
129
131
|
Params for litellm.completion().
|
130
132
|
"""
|
131
|
-
|
132
133
|
PARAMS = {
|
133
134
|
"litellm": [
|
134
135
|
"api_base",
|
versionhq/llm/model.py
CHANGED
@@ -228,6 +228,9 @@ class LLM(BaseModel):
|
|
228
228
|
if self.context_window_size and valid_config["max_tokens"] > self.context_window_size:
|
229
229
|
valid_config["max_tokens"] = self.context_window_size
|
230
230
|
|
231
|
+
if "model" in valid_config:
|
232
|
+
self.model = valid_config.pop("model")
|
233
|
+
|
231
234
|
self.llm_config = valid_config
|
232
235
|
return valid_config
|
233
236
|
|
@@ -389,7 +392,6 @@ class LLM(BaseModel):
|
|
389
392
|
self._usages.append(res["usage"])
|
390
393
|
return res.choices[0].message.content
|
391
394
|
|
392
|
-
|
393
395
|
except JSONSchemaValidationError as e:
|
394
396
|
self._logger.log(level="error", message="Raw Response: {}".format(e.raw_response), color="red")
|
395
397
|
raise e
|
@@ -147,8 +147,8 @@ class TaskOutputStorageHandler:
|
|
147
147
|
description=str(task.description),
|
148
148
|
raw=str(task.output.raw),
|
149
149
|
responsible_agents=str(task.processed_agents),
|
150
|
-
tokens=task.
|
151
|
-
latency=task.
|
150
|
+
tokens=task.output.usage.total_tokens,
|
151
|
+
latency=task.output.usage.latency,
|
152
152
|
score=task.output.aggregate_score if task.output.aggregate_score else "None",
|
153
153
|
)
|
154
154
|
self.storage.add(task=task, output=output_to_store, inputs=inputs)
|
versionhq/task/model.py
CHANGED
@@ -3,7 +3,7 @@ import threading
|
|
3
3
|
import datetime
|
4
4
|
import uuid
|
5
5
|
import inspect
|
6
|
-
import
|
6
|
+
from enum import IntEnum
|
7
7
|
from concurrent.futures import Future
|
8
8
|
from hashlib import md5
|
9
9
|
from typing import Any, Dict, List, Set, Optional, Callable, Type
|
@@ -14,11 +14,15 @@ from pydantic_core import PydanticCustomError
|
|
14
14
|
|
15
15
|
import versionhq as vhq
|
16
16
|
from versionhq.task.evaluation import Evaluation, EvaluationItem
|
17
|
-
from versionhq.tool.model import Tool, ToolSet
|
17
|
+
from versionhq.tool.model import Tool, ToolSet, BaseTool
|
18
|
+
from versionhq.tool.rag_tool import RagTool
|
19
|
+
from versionhq.tool.gpt.web_search import GPTToolWebSearch
|
20
|
+
from versionhq.tool.gpt.file_search import GPTToolFileSearch
|
21
|
+
from versionhq.tool.gpt.cup import GPTToolCUP
|
18
22
|
from versionhq._utils import process_config, Logger, UsageMetrics, ErrorType
|
19
23
|
|
20
24
|
|
21
|
-
class TaskExecutionType(
|
25
|
+
class TaskExecutionType(IntEnum):
|
22
26
|
"""
|
23
27
|
Enumeration to store task execution types of independent tasks without dependencies.
|
24
28
|
"""
|
@@ -174,14 +178,15 @@ class TaskOutput(BaseModel):
|
|
174
178
|
"""
|
175
179
|
A class to store the final output of the given task in raw (string), json_dict, and pydantic class formats.
|
176
180
|
"""
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
json_dict: Dict[str, Any] = Field(default=None, description="`raw` converted to dictionary")
|
181
|
+
task_id: UUID4 = Field(default_factory=uuid.uuid4)
|
182
|
+
raw: str = Field(default="")
|
183
|
+
json_dict: Dict[str, Any] = Field(default=None)
|
181
184
|
pydantic: Optional[Any] = Field(default=None)
|
182
185
|
tool_output: Optional[Any] = Field(default=None, description="stores tool result when the task takes tool output as its final output")
|
183
186
|
callback_output: Optional[Any] = Field(default=None, description="stores task or agent callback outcome")
|
187
|
+
annotations: Optional[Dict[str, Any]] = Field(default=None)
|
184
188
|
evaluation: Optional[InstanceOf[Evaluation]] = Field(default=None, description="stores overall evaluation of the task output. stored in ltm")
|
189
|
+
usage: Optional[UsageMetrics] = Field(default=None)
|
185
190
|
|
186
191
|
|
187
192
|
def _fetch_value_of(self, key: str = None) -> Any:
|
@@ -236,7 +241,7 @@ class TaskOutput(BaseModel):
|
|
236
241
|
description = EVALUATE.format(task_description=task.description, task_output=self.raw, eval_criteria=str(item))
|
237
242
|
description = description + fsl_prompt if fsl_prompt else description
|
238
243
|
|
239
|
-
task_eval = Task(description=description,
|
244
|
+
task_eval = Task(description=description, response_schema=EvaluationItem)
|
240
245
|
res = task_eval.execute(agent=self.evaluation.eval_by)
|
241
246
|
|
242
247
|
if res.pydantic:
|
@@ -308,14 +313,10 @@ class Task(BaseModel):
|
|
308
313
|
id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True, description="unique identifier for the object, not set by user")
|
309
314
|
name: Optional[str] = Field(default=None)
|
310
315
|
description: str = Field(description="Description of the actual task")
|
311
|
-
|
312
|
-
# response format
|
313
|
-
response_schema: Optional[Type[BaseModel] | List[ResponseField]] = Field(default=None)
|
314
|
-
pydantic_output: Optional[Type[BaseModel]] = Field(default=None, description="store Pydantic class as structured response format")
|
315
|
-
response_fields: Optional[List[ResponseField]] = Field(default_factory=list, description="store list of ResponseField as structured response format")
|
316
|
+
response_schema: Optional[Type[BaseModel] | List[ResponseField]] = Field(default=None, description="stores response format")
|
316
317
|
|
317
318
|
# tool usage
|
318
|
-
tools: Optional[List[
|
319
|
+
tools: Optional[List[Any]] = Field(default_factory=list, description="tools that the agent can use aside from their tools")
|
319
320
|
can_use_agent_tools: bool = Field(default=True, description="whether the agent can use their own tools when executing the task")
|
320
321
|
tool_res_as_final: bool = Field(default=False, description="when set True, tools res will be stored in the `TaskOutput`")
|
321
322
|
|
@@ -340,7 +341,6 @@ class Task(BaseModel):
|
|
340
341
|
fsls: Optional[list[str]] = Field(default=None, description="stores ideal/weak responses")
|
341
342
|
|
342
343
|
# recording
|
343
|
-
_usage: UsageMetrics = PrivateAttr(default=None)
|
344
344
|
_delegations: int = 0
|
345
345
|
processed_agents: Set[str] = Field(default_factory=set, description="store keys of the agents that executed the task")
|
346
346
|
output: Optional[TaskOutput] = Field(default=None, description="store the final TaskOutput object")
|
@@ -365,30 +365,36 @@ class Task(BaseModel):
|
|
365
365
|
for field in required_fields:
|
366
366
|
if getattr(self, field) is None:
|
367
367
|
raise ValueError( f"{field} must be provided either directly or through config")
|
368
|
-
|
369
|
-
self._usage = UsageMetrics(id=self.id)
|
370
368
|
return self
|
371
369
|
|
372
370
|
|
373
371
|
@model_validator(mode="after")
|
374
372
|
def set_up_tools(self) -> Self:
|
375
|
-
if
|
376
|
-
pass
|
377
|
-
else:
|
373
|
+
if self.tools:
|
378
374
|
tool_list = []
|
379
375
|
for item in self.tools:
|
380
|
-
|
381
|
-
|
382
|
-
|
383
|
-
|
384
|
-
|
385
|
-
|
376
|
+
match item:
|
377
|
+
case Tool() | ToolSet() | BaseTool() | RagTool() | GPTToolCUP() | GPTToolFileSearch() | GPTToolWebSearch():
|
378
|
+
tool_list.append(item)
|
379
|
+
case type(item, callable):
|
380
|
+
tool_list.append(Tool(func=item))
|
381
|
+
case dict():
|
382
|
+
tool = None
|
383
|
+
try:
|
384
|
+
tool = Tool(**item)
|
385
|
+
except:
|
386
|
+
try:
|
387
|
+
tool = RagTool(**item)
|
388
|
+
except:
|
389
|
+
pass
|
390
|
+
case _:
|
391
|
+
pass
|
386
392
|
self.tools = tool_list
|
387
393
|
return self
|
388
394
|
|
389
395
|
|
390
396
|
def _structure_response_format(self, data_type: str = "object", model_provider: str = "gemini") -> Dict[str, Any] | None:
|
391
|
-
"""Structures `
|
397
|
+
"""Structures `response_schema` into the LLM response format."""
|
392
398
|
|
393
399
|
from versionhq.task.structured_response import StructuredOutput
|
394
400
|
|
@@ -398,28 +404,29 @@ class Task(BaseModel):
|
|
398
404
|
return response_format
|
399
405
|
|
400
406
|
else:
|
401
|
-
if self.
|
402
|
-
|
403
|
-
|
404
|
-
|
405
|
-
|
406
|
-
|
407
|
-
|
408
|
-
|
409
|
-
|
410
|
-
|
411
|
-
|
412
|
-
|
413
|
-
|
414
|
-
|
415
|
-
|
416
|
-
|
417
|
-
|
407
|
+
if self.response_schema:
|
408
|
+
if isinstance(self.response_schema, list):
|
409
|
+
properties, required_fields = {}, []
|
410
|
+
for i, item in enumerate(self.response_schema):
|
411
|
+
if isinstance(item, ResponseField):
|
412
|
+
properties.update(item._format_props())
|
413
|
+
required_fields.append(item.title)
|
414
|
+
|
415
|
+
response_schema = {
|
416
|
+
"type": data_type,
|
417
|
+
"properties": properties,
|
418
|
+
"required": required_fields,
|
419
|
+
"additionalProperties": False,
|
420
|
+
}
|
421
|
+
response_format = {
|
422
|
+
"type": "json_schema",
|
423
|
+
"json_schema": { "name": "outcome", "schema": response_schema }
|
424
|
+
}
|
418
425
|
|
419
|
-
|
420
|
-
|
426
|
+
elif issubclass(self.response_schema, BaseModel):
|
427
|
+
response_format = StructuredOutput(response_format=self.response_schema, provider=model_provider)._format()
|
421
428
|
|
422
|
-
|
429
|
+
return response_format
|
423
430
|
|
424
431
|
|
425
432
|
def _sanitize_raw_output(self, raw: str) -> Dict[str, str]:
|
@@ -435,6 +442,7 @@ class Task(BaseModel):
|
|
435
442
|
r = re.sub(r"'\b", '"', r)
|
436
443
|
r = r.strip()
|
437
444
|
r = r.replace(" ", "")
|
445
|
+
|
438
446
|
try:
|
439
447
|
output = json.loads(r)
|
440
448
|
except:
|
@@ -452,7 +460,6 @@ class Task(BaseModel):
|
|
452
460
|
except:
|
453
461
|
output = ast.literal_eval(r)
|
454
462
|
|
455
|
-
|
456
463
|
return output["json_schema"] if isinstance(output, dict) and "json_schema" in output else output if isinstance(output, dict) else { "output": str(r) }
|
457
464
|
|
458
465
|
|
@@ -471,30 +478,31 @@ class Task(BaseModel):
|
|
471
478
|
if isinstance(output, dict):
|
472
479
|
return output["json_schema"] if "json_schema" in output else output
|
473
480
|
else:
|
474
|
-
|
475
|
-
|
481
|
+
output = self._sanitize_raw_output(raw=raw)
|
482
|
+
return output
|
476
483
|
except:
|
477
484
|
output = self._sanitize_raw_output(raw=raw)
|
478
|
-
self._usage.record_errors(type=ErrorType.FORMAT)
|
479
485
|
return output
|
480
486
|
|
481
487
|
|
482
|
-
def _create_pydantic_output(self, raw: str = None, json_dict: Dict[str, Any] = None) -> InstanceOf[BaseModel]:
|
488
|
+
def _create_pydantic_output(self, raw: str = None, json_dict: Dict[str, Any] = None) -> InstanceOf[BaseModel] | None:
|
483
489
|
"""
|
484
490
|
Create pydantic output from raw or json_dict output.
|
485
491
|
"""
|
486
492
|
|
487
|
-
|
488
|
-
|
489
|
-
|
490
|
-
|
493
|
+
if self.response_schema and not isinstance(self.response_schema, list):
|
494
|
+
output_pydantic = self.response_schema
|
495
|
+
try:
|
496
|
+
json_dict = json_dict if json_dict else self._create_json_output(raw=raw)
|
491
497
|
|
492
|
-
|
493
|
-
|
494
|
-
|
495
|
-
|
498
|
+
for k, v in json_dict.items():
|
499
|
+
setattr(output_pydantic, k, v)
|
500
|
+
except:
|
501
|
+
pass
|
502
|
+
return output_pydantic
|
496
503
|
|
497
|
-
|
504
|
+
else:
|
505
|
+
return None
|
498
506
|
|
499
507
|
|
500
508
|
def interpolate_inputs(self, inputs: Dict[str, Any]) -> None:
|
@@ -638,44 +646,47 @@ class Task(BaseModel):
|
|
638
646
|
|
639
647
|
start_dt = datetime.datetime.now()
|
640
648
|
task_output: InstanceOf[TaskOutput] = None
|
641
|
-
raw_output: str = None
|
642
|
-
tool_output: str | list = None
|
643
|
-
task_tools: List[List[InstanceOf[Tool]| InstanceOf[ToolSet] | Type[Tool]]] = []
|
644
649
|
user_prompt, dev_prompt = None, None
|
645
650
|
|
646
|
-
if self.tools:
|
647
|
-
for item in self.tools:
|
648
|
-
if isinstance(item, ToolSet) or isinstance(item, Tool) or type(item) == Tool:
|
649
|
-
task_tools.append(item)
|
650
|
-
|
651
651
|
if self.allow_delegation == True:
|
652
652
|
agent_to_delegate = self._select_agent_to_delegate(agent=agent)
|
653
653
|
agent = agent_to_delegate
|
654
654
|
self._delegations += 1
|
655
655
|
|
656
|
-
|
657
|
-
|
658
|
-
|
659
|
-
|
660
|
-
|
661
|
-
|
656
|
+
user_prompt, dev_prompt, raw_output, usage = agent.execute_task(task=self, context=context)
|
657
|
+
match raw_output:
|
658
|
+
case TaskOutput():
|
659
|
+
raw_output.task_id = self.id
|
660
|
+
raw_output.usage = usage
|
661
|
+
task_output = raw_output
|
662
|
+
|
663
|
+
case str():
|
664
|
+
json_dict_output = self._create_json_output(raw=raw_output)
|
665
|
+
if "outcome" in json_dict_output:
|
666
|
+
json_dict_output = self._create_json_output(raw=str(json_dict_output["outcome"]))
|
667
|
+
|
668
|
+
pydantic_output = self._create_pydantic_output(raw=raw_output, json_dict=json_dict_output)
|
669
|
+
task_output = TaskOutput(
|
670
|
+
task_id=self.id,
|
671
|
+
raw=raw_output if raw_output is not None else "",
|
672
|
+
pydantic=pydantic_output,
|
673
|
+
json_dict=json_dict_output,
|
674
|
+
tool_output=raw_output if self.tool_res_as_final else None,
|
675
|
+
usage=usage
|
676
|
+
)
|
677
|
+
|
678
|
+
case None | "":
|
679
|
+
task_output = TaskOutput(task_id=self.id, raw="", usage=usage)
|
680
|
+
task_output.usage.record_errors(type=ErrorType.FORMAT)
|
662
681
|
|
663
|
-
|
664
|
-
|
665
|
-
|
666
|
-
|
667
|
-
|
668
|
-
|
669
|
-
|
670
|
-
|
671
|
-
task_output = TaskOutput(
|
672
|
-
task_id=self.id,
|
673
|
-
raw=raw_output if raw_output is not None else "",
|
674
|
-
pydantic=pydantic_output,
|
675
|
-
json_dict=json_dict_output,
|
676
|
-
)
|
682
|
+
case _:
|
683
|
+
task_output = TaskOutput(
|
684
|
+
task_id=self.id,
|
685
|
+
raw=raw_output,
|
686
|
+
tool_output=raw_output if self.tool_res_as_final else None,
|
687
|
+
usage=usage
|
688
|
+
)
|
677
689
|
|
678
|
-
self.output = task_output
|
679
690
|
self.processed_agents.add(agent.key)
|
680
691
|
|
681
692
|
# if self.output_file: ## disabled for now
|
@@ -691,10 +702,9 @@ class Task(BaseModel):
|
|
691
702
|
self._pfg.user_prompts.update({ index: user_prompt })
|
692
703
|
self._pfg.dev_prompts.update({ index: dev_prompt })
|
693
704
|
|
694
|
-
if
|
705
|
+
if task_output.raw:
|
695
706
|
if self.should_evaluate:
|
696
707
|
task_output.evaluate(task=self)
|
697
|
-
self.output = task_output
|
698
708
|
|
699
709
|
self._create_short_and_long_term_memories(agent=agent, task_output=task_output)
|
700
710
|
|
@@ -705,11 +715,14 @@ class Task(BaseModel):
|
|
705
715
|
valid_kwargs = { k: kwargs[k] if k in kwargs else None for k in valid_keys }
|
706
716
|
callback_res = self.callback(**valid_kwargs)
|
707
717
|
task_output.callback_output = callback_res
|
708
|
-
self.output = task_output
|
709
|
-
self._store_logs()
|
710
718
|
|
711
719
|
end_dt = datetime.datetime.now()
|
712
|
-
|
720
|
+
task_output.usage.record_latency(start_dt=start_dt, end_dt=end_dt)
|
721
|
+
if task_output.json_dict and "output" in task_output.json_dict:
|
722
|
+
task_output.usage.record_errors(type=ErrorType.FORMAT)
|
723
|
+
|
724
|
+
self.output = task_output
|
725
|
+
self._store_logs()
|
713
726
|
return task_output
|
714
727
|
|
715
728
|
|
@@ -720,7 +733,6 @@ class Task(BaseModel):
|
|
720
733
|
from versionhq._prompt.model import Prompt
|
721
734
|
from versionhq._prompt.auto_feedback import PromptFeedbackGraph
|
722
735
|
|
723
|
-
# self._usage = None
|
724
736
|
prompt = Prompt(task=self, agent=agent, context=context)
|
725
737
|
pfg = PromptFeedbackGraph(prompt=prompt, should_reform=self.human, reform_trigger_event=ReformTriggerEvent.USER_INPUT if self.human else None)
|
726
738
|
pfg = pfg.set_up_graph()
|
@@ -729,18 +741,18 @@ class Task(BaseModel):
|
|
729
741
|
try:
|
730
742
|
if self._pfg and self.output is None:
|
731
743
|
res, all_outputs = self._pfg.activate()
|
732
|
-
if all_outputs:
|
744
|
+
if all_outputs:
|
745
|
+
res.usage = self._pfg.usage
|
733
746
|
return res
|
734
|
-
|
735
747
|
except:
|
736
|
-
self.
|
748
|
+
self._pfg.usage.record_errors(type=ErrorType.API)
|
737
749
|
Logger().log(level="error", message="Failed to execute the task.", color="red")
|
738
750
|
return None
|
739
751
|
|
740
752
|
|
741
753
|
@property
|
742
754
|
def key(self) -> str:
|
743
|
-
output_format = "json" if self.
|
755
|
+
output_format = "json" if self.response_schema else "raw"
|
744
756
|
source = [self.description, output_format]
|
745
757
|
return md5("|".join(source).encode(), usedforsecurity=False).hexdigest()
|
746
758
|
|
versionhq/task_graph/draft.py
CHANGED
@@ -41,7 +41,7 @@ def workflow(final_output: Type[BaseModel], context: Any = None, human: bool = F
|
|
41
41
|
"https://www.geeksforgeeks.org/graph-and-its-representations/",
|
42
42
|
", ".join([k for k in DependencyType._member_map_.keys()]),
|
43
43
|
],
|
44
|
-
llm="gemini-2.0",
|
44
|
+
llm="gemini/gemini-2.0-flash",
|
45
45
|
with_memory=with_memory,
|
46
46
|
maxit=1,
|
47
47
|
max_retry_limit=1,
|
@@ -49,7 +49,7 @@ def workflow(final_output: Type[BaseModel], context: Any = None, human: bool = F
|
|
49
49
|
|
50
50
|
task = Task(
|
51
51
|
description=dedent(f"Design a resource-efficient workflow to achieve the following goal: {final_output_prompt}. The workflow should consist of a list of detailed tasks that represent decision making points, each with the following information:\nname: A concise name of the task\ndescription: A concise description of the task.\nconnections: A list of target tasks that this task connects to.\ndependency_types: The type of dependency between this task and each of its connected task. \noutput: key output from the task in a word.\n\nUse the following dependency types: {dep_type_prompt}.\n\nPrioritize minimizing resource consumption (computation, memory, and data transfer) when defining tasks, connections, and dependencies. Consider how data is passed between tasks and aim to reduce unnecessary data duplication or transfer. Explain any design choices made to optimize resource usage."),
|
52
|
-
|
52
|
+
response_schema=[
|
53
53
|
ResponseField(title="tasks", data_type=list, items=dict, properties=[
|
54
54
|
ResponseField(title="name", data_type=str),
|
55
55
|
ResponseField(title="description", data_type=str),
|
@@ -73,13 +73,13 @@ def workflow(final_output: Type[BaseModel], context: Any = None, human: bool = F
|
|
73
73
|
|
74
74
|
for item in task_items:
|
75
75
|
key = item["output"].lower().replace(" ", "_") if item["output"] else "output"
|
76
|
-
task = Task(name=item["name"], description=item["description"],
|
76
|
+
task = Task(name=item["name"], description=item["description"], response_schema=[ResponseField(title=key, data_type=str)])
|
77
77
|
tasks.append(task)
|
78
78
|
nodes.append(Node(task=task))
|
79
79
|
|
80
80
|
task_graph = TaskGraph(
|
81
81
|
nodes={node.identifier: node for node in nodes},
|
82
|
-
|
82
|
+
concl_response_schema=final_output,
|
83
83
|
concl=None,
|
84
84
|
should_reform=human,
|
85
85
|
reform_trigger_event=ReformTriggerEvent.USER_INPUT if human else None,
|