versionhq 1.2.4.3__py3-none-any.whl → 1.2.4.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- versionhq/__init__.py +1 -1
- versionhq/_prompt/auto_feedback.py +1 -1
- versionhq/_prompt/model.py +13 -21
- versionhq/agent/inhouse_agents.py +2 -2
- versionhq/agent/model.py +12 -5
- versionhq/agent_network/formation.py +3 -3
- versionhq/agent_network/model.py +1 -1
- versionhq/llm/llm_vars.py +7 -4
- versionhq/llm/model.py +3 -1
- versionhq/task/model.py +41 -42
- versionhq/task_graph/draft.py +4 -4
- versionhq/task_graph/model.py +4 -4
- {versionhq-1.2.4.3.dist-info → versionhq-1.2.4.5.dist-info}/METADATA +4 -4
- {versionhq-1.2.4.3.dist-info → versionhq-1.2.4.5.dist-info}/RECORD +17 -17
- {versionhq-1.2.4.3.dist-info → versionhq-1.2.4.5.dist-info}/LICENSE +0 -0
- {versionhq-1.2.4.3.dist-info → versionhq-1.2.4.5.dist-info}/WHEEL +0 -0
- {versionhq-1.2.4.3.dist-info → versionhq-1.2.4.5.dist-info}/top_level.txt +0 -0
versionhq/__init__.py
CHANGED
@@ -67,7 +67,7 @@ class PromptFeedbackGraph(TaskGraph):
|
|
67
67
|
if not agents:
|
68
68
|
return None
|
69
69
|
|
70
|
-
self.
|
70
|
+
self.concl_response_schema = base_task.response_schema
|
71
71
|
base_agent.callbacks.append(self._reflect)
|
72
72
|
init_node = Node(task=base_task, assigned_to=base_agent)
|
73
73
|
self.add_node(init_node)
|
versionhq/_prompt/model.py
CHANGED
@@ -2,7 +2,7 @@
|
|
2
2
|
from typing import Dict, List, Tuple, Any
|
3
3
|
from textwrap import dedent
|
4
4
|
|
5
|
-
from pydantic import InstanceOf
|
5
|
+
from pydantic import InstanceOf, BaseModel
|
6
6
|
|
7
7
|
from versionhq._utils import is_valid_url
|
8
8
|
|
@@ -25,34 +25,26 @@ class Prompt:
|
|
25
25
|
|
26
26
|
|
27
27
|
def _draft_output_prompt(self) -> str:
|
28
|
-
"""Drafts prompt for output
|
28
|
+
"""Drafts prompt for output format using `response_schema`."""
|
29
29
|
|
30
|
-
from versionhq.
|
30
|
+
from versionhq.task.model import ResponseField
|
31
31
|
|
32
32
|
output_prompt = ""
|
33
|
-
|
33
|
+
output_formats_to_follow = dict()
|
34
34
|
|
35
|
-
if self.task.
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
35
|
+
if self.task.response_schema:
|
36
|
+
if isinstance(self.task.response_schema, list):
|
37
|
+
for item in self.task.response_schema:
|
38
|
+
if isinstance(item, ResponseField):
|
39
|
+
output_formats_to_follow[item.title] = f"<Return your answer in {item.data_type.__name__}>"
|
40
40
|
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
"""
|
45
|
-
elif self.task.response_fields:
|
46
|
-
output_prompt, output_formats_to_follow = "", dict()
|
47
|
-
response_format = str(self.task._structure_response_format(model_provider=model_provider))
|
48
|
-
for item in self.task.response_fields:
|
49
|
-
if item:
|
50
|
-
output_formats_to_follow[item.title] = f"<Return your answer in {item.data_type.__name__}>"
|
41
|
+
elif issubclass(self.task.response_schema, BaseModel):
|
42
|
+
for k, v in self.task.response_schema.model_fields.items():
|
43
|
+
output_formats_to_follow[k] = f"<Return your answer in {v.annotation}>"
|
51
44
|
|
52
45
|
output_prompt = f"""Your response MUST be a valid JSON string that strictly follows the response format. Use double quotes for all keys and string values. Do not use single quotes, trailing commas, or any other non-standard JSON syntax.
|
53
|
-
Response format: {response_format}
|
54
46
|
Ref. Output image: {output_formats_to_follow}
|
55
|
-
"""
|
47
|
+
"""
|
56
48
|
else:
|
57
49
|
output_prompt = "You MUST return your response as a valid JSON serializable string, enclosed in double quotes. Use double quotes for all keys and string values. Do NOT use single quotes, trailing commas, or other non-standard JSON syntax."
|
58
50
|
|
@@ -30,7 +30,7 @@ vhq_task_evaluator = Agent(
|
|
30
30
|
vhq_formation_planner = Agent(
|
31
31
|
role="vhq-Formation Planner",
|
32
32
|
goal="Plan a formation of agents based on the given task descirption.",
|
33
|
-
llm="gemini/gemini-2.0-flash
|
33
|
+
llm="gemini/gemini-2.0-flash",
|
34
34
|
llm_config=dict(top_p=0.8, topK=40, temperature=0.9),
|
35
35
|
maxit=1,
|
36
36
|
max_retry_limit=1,
|
@@ -46,7 +46,7 @@ vhq_formation_planner = Agent(
|
|
46
46
|
vhq_agent_creator = Agent(
|
47
47
|
role="vhq-Agent Creator",
|
48
48
|
goal="build an agent that can handle the given task",
|
49
|
-
llm="gemini/gemini-2.0-flash
|
49
|
+
llm="gemini/gemini-2.0-flash",
|
50
50
|
maxit=1,
|
51
51
|
max_retry_limit=1,
|
52
52
|
)
|
versionhq/agent/model.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
import os
|
2
2
|
import uuid
|
3
|
-
from typing import Any, Dict, List, Optional, TypeVar, Callable, Type
|
3
|
+
from typing import Any, Dict, List, Optional, TypeVar, Callable, Type, Tuple
|
4
4
|
from typing_extensions import Self
|
5
5
|
from dotenv import load_dotenv
|
6
6
|
|
@@ -475,9 +475,16 @@ class Agent(BaseModel):
|
|
475
475
|
return self
|
476
476
|
|
477
477
|
|
478
|
-
def start(
|
478
|
+
def start(
|
479
|
+
self,
|
480
|
+
context: Any = None,
|
481
|
+
tool_res_as_final: bool = False,
|
482
|
+
image: str = None,
|
483
|
+
file: str = None,
|
484
|
+
audio: str = None
|
485
|
+
) -> Tuple[Any | None, Any | None]:
|
479
486
|
"""
|
480
|
-
Defines and executes a task
|
487
|
+
Defines and executes a task, then returns TaskOutput object with the generated task.
|
481
488
|
"""
|
482
489
|
|
483
490
|
if not self.role:
|
@@ -491,14 +498,14 @@ class Agent(BaseModel):
|
|
491
498
|
|
492
499
|
task = Task(
|
493
500
|
description=f"Generate a simple result in a sentence to achieve the goal: {self.goal if self.goal else self.role}. If needed, list up necessary steps in concise manner.",
|
494
|
-
|
501
|
+
response_schema=Output,
|
495
502
|
tool_res_as_final=tool_res_as_final,
|
496
503
|
image=image, #REFINEME - query memory/knowledge or self create
|
497
504
|
file=file,
|
498
505
|
audio=audio,
|
499
506
|
)
|
500
507
|
res = task.execute(agent=self, context=context)
|
501
|
-
return res
|
508
|
+
return res, task
|
502
509
|
|
503
510
|
|
504
511
|
def execute_task(self, task, context: Optional[Any] = None, task_tools: Optional[List[Tool | ToolSet]] = list()) -> str:
|
@@ -74,8 +74,8 @@ def form_agent_network(
|
|
74
74
|
leader_agent: str
|
75
75
|
|
76
76
|
vhq_task = Task(
|
77
|
-
description=f"Design a team of specialized agents to fully automate the following task and
|
78
|
-
|
77
|
+
description=f"Design a team of specialized agents to fully automate the following task and deliver the expected outcome. For each agent, define its role, task description, and expected outputs via the task with items in a list. Then specify the formation if the formation is not given. If you think SUPERVISING or HYBRID is the best formation, include a leader_agent role, else leave the leader_agent role blank.\nTask: {str(task)}\nExpected outcome: {prompt_expected_outcome}\nFormation: {prompt_formation}",
|
78
|
+
response_schema=Outcome
|
79
79
|
)
|
80
80
|
|
81
81
|
if agents:
|
@@ -120,7 +120,7 @@ def form_agent_network(
|
|
120
120
|
except:
|
121
121
|
pass
|
122
122
|
output = create_model("Output", **fields) if fields else None
|
123
|
-
_task = Task(description=task_descriptions[i],
|
123
|
+
_task = Task(description=task_descriptions[i], response_schema=output)
|
124
124
|
created_tasks.append(_task)
|
125
125
|
|
126
126
|
if len(created_tasks) <= len(created_agents):
|
versionhq/agent_network/model.py
CHANGED
@@ -206,7 +206,7 @@ class AgentNetwork(BaseModel):
|
|
206
206
|
for unassgined_task in unassigned_tasks:
|
207
207
|
task = Task(
|
208
208
|
description=f"Based on the following task summary, draft an agent's role and goal in concise manner. Task summary: {unassgined_task.summary}",
|
209
|
-
|
209
|
+
response_schema=[
|
210
210
|
ResponseField(title="goal", data_type=str, required=True),
|
211
211
|
ResponseField(title="role", data_type=str, required=True),
|
212
212
|
],
|
versionhq/llm/llm_vars.py
CHANGED
@@ -27,8 +27,9 @@ MODELS = {
|
|
27
27
|
"o1-preview",
|
28
28
|
],
|
29
29
|
"gemini": [
|
30
|
-
"gemini/gemini-
|
31
|
-
"gemini/gemini-
|
30
|
+
"gemini/gemini-2.0-flash",
|
31
|
+
"gemini/gemini-2.0-flash-thinking-exp",
|
32
|
+
"gemini/gemini-2.0-flash-lite-preview-02-05",
|
32
33
|
"gemini/gemini-2.0-flash-exp",
|
33
34
|
],
|
34
35
|
"anthropic": [
|
@@ -75,6 +76,7 @@ ENV_VARS = {
|
|
75
76
|
"huggingface": ["HUGGINGFACE_API_KEY", ],
|
76
77
|
"bedrock": ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION_NAME"],
|
77
78
|
"sagemaker": ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION_NAME"],
|
79
|
+
"azure_ai": ["AZURE_AI_API_KEY", "AZURE_AI_API_BASE"],
|
78
80
|
}
|
79
81
|
|
80
82
|
|
@@ -90,9 +92,10 @@ LLM_CONTEXT_WINDOW_SIZES = {
|
|
90
92
|
"o1-preview": 128000,
|
91
93
|
"o1-mini": 128000,
|
92
94
|
|
93
|
-
"gemini/gemini-1.5-flash": 1048576,
|
94
|
-
"gemini/gemini-1.5-pro": 2097152,
|
95
95
|
"gemini/gemini-2.0-flash-exp": 1048576,
|
96
|
+
"gemini/gemini-2.0-flash": 1048576,
|
97
|
+
"gemini/gemini-2.0-flash-thinking-exp": 1048576,
|
98
|
+
"gemini/gemini-2.0-flash-lite-preview-02-05": 1048576,
|
96
99
|
|
97
100
|
"claude-3-7-sonnet-latest": 200000,
|
98
101
|
"claude-3-5-haiku-latest": 200000,
|
versionhq/llm/model.py
CHANGED
@@ -228,6 +228,9 @@ class LLM(BaseModel):
|
|
228
228
|
if self.context_window_size and valid_config["max_tokens"] > self.context_window_size:
|
229
229
|
valid_config["max_tokens"] = self.context_window_size
|
230
230
|
|
231
|
+
if "model" in valid_config:
|
232
|
+
self.model = valid_config.pop("model")
|
233
|
+
|
231
234
|
self.llm_config = valid_config
|
232
235
|
return valid_config
|
233
236
|
|
@@ -389,7 +392,6 @@ class LLM(BaseModel):
|
|
389
392
|
self._usages.append(res["usage"])
|
390
393
|
return res.choices[0].message.content
|
391
394
|
|
392
|
-
|
393
395
|
except JSONSchemaValidationError as e:
|
394
396
|
self._logger.log(level="error", message="Raw Response: {}".format(e.raw_response), color="red")
|
395
397
|
raise e
|
versionhq/task/model.py
CHANGED
@@ -236,7 +236,7 @@ class TaskOutput(BaseModel):
|
|
236
236
|
description = EVALUATE.format(task_description=task.description, task_output=self.raw, eval_criteria=str(item))
|
237
237
|
description = description + fsl_prompt if fsl_prompt else description
|
238
238
|
|
239
|
-
task_eval = Task(description=description,
|
239
|
+
task_eval = Task(description=description, response_schema=EvaluationItem)
|
240
240
|
res = task_eval.execute(agent=self.evaluation.eval_by)
|
241
241
|
|
242
242
|
if res.pydantic:
|
@@ -308,11 +308,7 @@ class Task(BaseModel):
|
|
308
308
|
id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True, description="unique identifier for the object, not set by user")
|
309
309
|
name: Optional[str] = Field(default=None)
|
310
310
|
description: str = Field(description="Description of the actual task")
|
311
|
-
|
312
|
-
# response format
|
313
|
-
response_schema: Optional[Type[BaseModel] | List[ResponseField]] = Field(default=None)
|
314
|
-
pydantic_output: Optional[Type[BaseModel]] = Field(default=None, description="store Pydantic class as structured response format")
|
315
|
-
response_fields: Optional[List[ResponseField]] = Field(default_factory=list, description="store list of ResponseField as structured response format")
|
311
|
+
response_schema: Optional[Type[BaseModel] | List[ResponseField]] = Field(default=None, description="stores response format")
|
316
312
|
|
317
313
|
# tool usage
|
318
314
|
tools: Optional[List[ToolSet | Tool | Any]] = Field(default_factory=list, description="tools that the agent can use aside from their tools")
|
@@ -388,7 +384,7 @@ class Task(BaseModel):
|
|
388
384
|
|
389
385
|
|
390
386
|
def _structure_response_format(self, data_type: str = "object", model_provider: str = "gemini") -> Dict[str, Any] | None:
|
391
|
-
"""Structures `
|
387
|
+
"""Structures `response_schema` into the LLM response format."""
|
392
388
|
|
393
389
|
from versionhq.task.structured_response import StructuredOutput
|
394
390
|
|
@@ -398,28 +394,29 @@ class Task(BaseModel):
|
|
398
394
|
return response_format
|
399
395
|
|
400
396
|
else:
|
401
|
-
if self.
|
402
|
-
|
403
|
-
|
404
|
-
|
405
|
-
|
406
|
-
|
407
|
-
|
408
|
-
|
409
|
-
|
410
|
-
|
411
|
-
|
412
|
-
|
413
|
-
|
414
|
-
|
415
|
-
|
416
|
-
|
417
|
-
|
397
|
+
if self.response_schema:
|
398
|
+
if isinstance(self.response_schema, list):
|
399
|
+
properties, required_fields = {}, []
|
400
|
+
for i, item in enumerate(self.response_schema):
|
401
|
+
if isinstance(item, ResponseField):
|
402
|
+
properties.update(item._format_props())
|
403
|
+
required_fields.append(item.title)
|
404
|
+
|
405
|
+
response_schema = {
|
406
|
+
"type": data_type,
|
407
|
+
"properties": properties,
|
408
|
+
"required": required_fields,
|
409
|
+
"additionalProperties": False,
|
410
|
+
}
|
411
|
+
response_format = {
|
412
|
+
"type": "json_schema",
|
413
|
+
"json_schema": { "name": "outcome", "schema": response_schema }
|
414
|
+
}
|
418
415
|
|
419
|
-
|
420
|
-
|
416
|
+
elif issubclass(self.response_schema, BaseModel):
|
417
|
+
response_format = StructuredOutput(response_format=self.response_schema, provider=model_provider)._format()
|
421
418
|
|
422
|
-
|
419
|
+
return response_format
|
423
420
|
|
424
421
|
|
425
422
|
def _sanitize_raw_output(self, raw: str) -> Dict[str, str]:
|
@@ -435,6 +432,7 @@ class Task(BaseModel):
|
|
435
432
|
r = re.sub(r"'\b", '"', r)
|
436
433
|
r = r.strip()
|
437
434
|
r = r.replace(" ", "")
|
435
|
+
|
438
436
|
try:
|
439
437
|
output = json.loads(r)
|
440
438
|
except:
|
@@ -452,7 +450,6 @@ class Task(BaseModel):
|
|
452
450
|
except:
|
453
451
|
output = ast.literal_eval(r)
|
454
452
|
|
455
|
-
|
456
453
|
return output["json_schema"] if isinstance(output, dict) and "json_schema" in output else output if isinstance(output, dict) else { "output": str(r) }
|
457
454
|
|
458
455
|
|
@@ -471,30 +468,32 @@ class Task(BaseModel):
|
|
471
468
|
if isinstance(output, dict):
|
472
469
|
return output["json_schema"] if "json_schema" in output else output
|
473
470
|
else:
|
474
|
-
|
475
|
-
|
471
|
+
output = self._sanitize_raw_output(raw=raw)
|
472
|
+
return output
|
476
473
|
except:
|
477
474
|
output = self._sanitize_raw_output(raw=raw)
|
478
475
|
self._usage.record_errors(type=ErrorType.FORMAT)
|
479
476
|
return output
|
480
477
|
|
481
478
|
|
482
|
-
def _create_pydantic_output(self, raw: str = None, json_dict: Dict[str, Any] = None) -> InstanceOf[BaseModel]:
|
479
|
+
def _create_pydantic_output(self, raw: str = None, json_dict: Dict[str, Any] = None) -> InstanceOf[BaseModel] | None:
|
483
480
|
"""
|
484
481
|
Create pydantic output from raw or json_dict output.
|
485
482
|
"""
|
486
483
|
|
487
|
-
|
488
|
-
|
489
|
-
|
490
|
-
|
484
|
+
if self.response_schema and not isinstance(self.response_schema, list):
|
485
|
+
output_pydantic = self.response_schema
|
486
|
+
try:
|
487
|
+
json_dict = json_dict if json_dict else self._create_json_output(raw=raw)
|
491
488
|
|
492
|
-
|
493
|
-
|
494
|
-
|
495
|
-
|
489
|
+
for k, v in json_dict.items():
|
490
|
+
setattr(output_pydantic, k, v)
|
491
|
+
except:
|
492
|
+
pass
|
493
|
+
return output_pydantic
|
496
494
|
|
497
|
-
|
495
|
+
else:
|
496
|
+
return None
|
498
497
|
|
499
498
|
|
500
499
|
def interpolate_inputs(self, inputs: Dict[str, Any]) -> None:
|
@@ -666,7 +665,7 @@ class Task(BaseModel):
|
|
666
665
|
if "outcome" in json_dict_output:
|
667
666
|
json_dict_output = self._create_json_output(raw=str(json_dict_output["outcome"]))
|
668
667
|
|
669
|
-
pydantic_output = self._create_pydantic_output(raw=raw_output, json_dict=json_dict_output)
|
668
|
+
pydantic_output = self._create_pydantic_output(raw=raw_output, json_dict=json_dict_output)
|
670
669
|
|
671
670
|
task_output = TaskOutput(
|
672
671
|
task_id=self.id,
|
@@ -740,7 +739,7 @@ class Task(BaseModel):
|
|
740
739
|
|
741
740
|
@property
|
742
741
|
def key(self) -> str:
|
743
|
-
output_format = "json" if self.
|
742
|
+
output_format = "json" if self.response_schema else "raw"
|
744
743
|
source = [self.description, output_format]
|
745
744
|
return md5("|".join(source).encode(), usedforsecurity=False).hexdigest()
|
746
745
|
|
versionhq/task_graph/draft.py
CHANGED
@@ -41,7 +41,7 @@ def workflow(final_output: Type[BaseModel], context: Any = None, human: bool = F
|
|
41
41
|
"https://www.geeksforgeeks.org/graph-and-its-representations/",
|
42
42
|
", ".join([k for k in DependencyType._member_map_.keys()]),
|
43
43
|
],
|
44
|
-
llm="gemini-2.0",
|
44
|
+
llm="gemini/gemini-2.0-flash",
|
45
45
|
with_memory=with_memory,
|
46
46
|
maxit=1,
|
47
47
|
max_retry_limit=1,
|
@@ -49,7 +49,7 @@ def workflow(final_output: Type[BaseModel], context: Any = None, human: bool = F
|
|
49
49
|
|
50
50
|
task = Task(
|
51
51
|
description=dedent(f"Design a resource-efficient workflow to achieve the following goal: {final_output_prompt}. The workflow should consist of a list of detailed tasks that represent decision making points, each with the following information:\nname: A concise name of the task\ndescription: A concise description of the task.\nconnections: A list of target tasks that this task connects to.\ndependency_types: The type of dependency between this task and each of its connected task. \noutput: key output from the task in a word.\n\nUse the following dependency types: {dep_type_prompt}.\n\nPrioritize minimizing resource consumption (computation, memory, and data transfer) when defining tasks, connections, and dependencies. Consider how data is passed between tasks and aim to reduce unnecessary data duplication or transfer. Explain any design choices made to optimize resource usage."),
|
52
|
-
|
52
|
+
response_schema=[
|
53
53
|
ResponseField(title="tasks", data_type=list, items=dict, properties=[
|
54
54
|
ResponseField(title="name", data_type=str),
|
55
55
|
ResponseField(title="description", data_type=str),
|
@@ -73,13 +73,13 @@ def workflow(final_output: Type[BaseModel], context: Any = None, human: bool = F
|
|
73
73
|
|
74
74
|
for item in task_items:
|
75
75
|
key = item["output"].lower().replace(" ", "_") if item["output"] else "output"
|
76
|
-
task = Task(name=item["name"], description=item["description"],
|
76
|
+
task = Task(name=item["name"], description=item["description"], response_schema=[ResponseField(title=key, data_type=str)])
|
77
77
|
tasks.append(task)
|
78
78
|
nodes.append(Node(task=task))
|
79
79
|
|
80
80
|
task_graph = TaskGraph(
|
81
81
|
nodes={node.identifier: node for node in nodes},
|
82
|
-
|
82
|
+
concl_response_schema=final_output,
|
83
83
|
concl=None,
|
84
84
|
should_reform=human,
|
85
85
|
reform_trigger_event=ReformTriggerEvent.USER_INPUT if human else None,
|
versionhq/task_graph/model.py
CHANGED
@@ -14,7 +14,7 @@ from pydantic import BaseModel, InstanceOf, Field, UUID4, field_validator, model
|
|
14
14
|
from pydantic_core import PydanticCustomError
|
15
15
|
|
16
16
|
from versionhq.agent.model import Agent
|
17
|
-
from versionhq.task.model import Task, TaskOutput, Evaluation
|
17
|
+
from versionhq.task.model import Task, TaskOutput, Evaluation, ResponseField
|
18
18
|
from versionhq._utils import Logger, UsageMetrics, ErrorType
|
19
19
|
|
20
20
|
|
@@ -129,7 +129,7 @@ class Node(BaseModel):
|
|
129
129
|
else:
|
130
130
|
self.status = TaskStatus.IN_PROGRESS
|
131
131
|
agent = agent if agent else self.assigned_to
|
132
|
-
self.task.
|
132
|
+
self.task.response_schema = self.task.response_schema if self.task.response_schema else response_format if type(response_format) == BaseModel or isinstance(response_format, list) else None
|
133
133
|
res = self.task.execute(agent=agent, context=context)
|
134
134
|
|
135
135
|
if isinstance(res, Future): # activate async
|
@@ -399,7 +399,7 @@ class TaskGraph(Graph):
|
|
399
399
|
should_reform: bool = False
|
400
400
|
reform_trigger_event: Optional[ReformTriggerEvent] = None
|
401
401
|
outputs: Dict[str, TaskOutput] = Field(default_factory=dict, description="stores node identifier and TaskOutput")
|
402
|
-
|
402
|
+
concl_response_schema: Optional[List[ResponseField] | Type[BaseModel]] = Field(default=None, description="stores final response schema in Pydantic class or response fields")
|
403
403
|
concl: Optional[TaskOutput] = Field(default=None, description="stores the final or latest conclusion of the entire task graph")
|
404
404
|
|
405
405
|
|
@@ -669,7 +669,7 @@ class TaskGraph(Graph):
|
|
669
669
|
res, _ = self.handle_reform(target=target)
|
670
670
|
|
671
671
|
self.concl = res
|
672
|
-
self.
|
672
|
+
self.concl_response_schema = self.concl_response_schema if self.concl_response_schema else res.pydantic.__class__ if res.pydantic else None
|
673
673
|
# last_task_output = [v for v in self.outputs.values()][len([v for v in self.outputs.values()]) - 1] if [v for v in self.outputs.values()] else None
|
674
674
|
self._handle_usage()
|
675
675
|
return res, self.outputs
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.2
|
2
2
|
Name: versionhq
|
3
|
-
Version: 1.2.4.
|
3
|
+
Version: 1.2.4.5
|
4
4
|
Summary: Autonomous agent networks for task automation with multi-step reasoning.
|
5
5
|
Author-email: Kuriko Iwai <kuriko@versi0n.io>
|
6
6
|
License: MIT License
|
@@ -295,7 +295,7 @@ def dummy_func(message: str, test1: str, test2: list[str]) -> str:
|
|
295
295
|
|
296
296
|
task = vhq.Task(
|
297
297
|
description="Amazing task",
|
298
|
-
|
298
|
+
response_schema=CustomOutput,
|
299
299
|
callback=dummy_func,
|
300
300
|
callback_kwargs=dict(message="Hi! Here is the result: ")
|
301
301
|
)
|
@@ -317,13 +317,13 @@ agent_b = vhq.Agent(role="Leader", llm="gemini-2.0")
|
|
317
317
|
|
318
318
|
task_1 = vhq.Task(
|
319
319
|
description="Analyze the client's business model.",
|
320
|
-
|
320
|
+
response_schema=[vhq.ResponseField(title="test1", data_type=str, required=True),],
|
321
321
|
allow_delegation=True
|
322
322
|
)
|
323
323
|
|
324
324
|
task_2 = vhq.Task(
|
325
325
|
description="Define a cohort.",
|
326
|
-
|
326
|
+
response_schema=[vhq.ResponseField(title="test1", data_type=int, required=True),],
|
327
327
|
allow_delegation=False
|
328
328
|
)
|
329
329
|
|
@@ -1,7 +1,7 @@
|
|
1
|
-
versionhq/__init__.py,sha256=
|
2
|
-
versionhq/_prompt/auto_feedback.py,sha256=
|
1
|
+
versionhq/__init__.py,sha256=8XLNCUY-3bjs8eI6mMUDBZLeY_Gj39QTut0tpzzyE3c,3026
|
2
|
+
versionhq/_prompt/auto_feedback.py,sha256=QDYd8mKlluDaOjHIfL9B8Tr5tg40tsDVyBXqCuUdTCU,3800
|
3
3
|
versionhq/_prompt/constants.py,sha256=DOwUFnVVObEFqgnaMCDnW8fnw1oPMgS8JAqOiTuqleI,932
|
4
|
-
versionhq/_prompt/model.py,sha256=
|
4
|
+
versionhq/_prompt/model.py,sha256=kokH7axDDPEZKPBmrOKi5L0aZLtApdVOJDKsMjkJmvw,8020
|
5
5
|
versionhq/_utils/__init__.py,sha256=UggL2r-idlWDh0cIPFLyJ7AvO17NLzhjheW4IBFLBj4,300
|
6
6
|
versionhq/_utils/i18n.py,sha256=TwA_PnYfDLA6VqlUDPuybdV9lgi3Frh_ASsb_X8jJo8,1483
|
7
7
|
versionhq/_utils/is_valid_url.py,sha256=m8Mswvb-90FJtx1Heq6hPFDbwGgrv_R3wSbZQmEPM9Q,379
|
@@ -11,15 +11,15 @@ versionhq/_utils/process_config.py,sha256=YTGY_erW335RfceQfzS18YAqq-AAb-iSvKSjN7
|
|
11
11
|
versionhq/_utils/usage_metrics.py,sha256=zaoH6xjWX69UrQJmViBiX3sEUpnwSoHaapCPfWU2oM8,2632
|
12
12
|
versionhq/_utils/vars.py,sha256=bZ5Dx_bFKlt3hi4-NNGXqdk7B23If_WaTIju2fiTyPQ,57
|
13
13
|
versionhq/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
14
|
-
versionhq/agent/inhouse_agents.py,sha256=
|
15
|
-
versionhq/agent/model.py,sha256=
|
14
|
+
versionhq/agent/inhouse_agents.py,sha256=D2WAiXCYsnQK3_Fe7CbbtvXsHWOaN6vde6m_QoW7fH4,2629
|
15
|
+
versionhq/agent/model.py,sha256=auIrSWqt4NnANgA2YOCS2l9kxPCWkZG7YUD22NjrkAA,24810
|
16
16
|
versionhq/agent/parser.py,sha256=riG0dkdQCxH7uJ0AbdVdg7WvL0BXhUgJht0VtQvxJBc,4082
|
17
17
|
versionhq/agent/rpm_controller.py,sha256=grezIxyBci_lDlwAlgWFRyR5KOocXeOhYkgN02dNFNE,2360
|
18
18
|
versionhq/agent/TEMPLATES/Backstory.py,sha256=dkfuATUQ2g2WoUKkmgAIch-RB--bektGoQaUlsDOn0g,529
|
19
19
|
versionhq/agent/TEMPLATES/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
20
20
|
versionhq/agent_network/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
21
|
-
versionhq/agent_network/formation.py,sha256=
|
22
|
-
versionhq/agent_network/model.py,sha256=
|
21
|
+
versionhq/agent_network/formation.py,sha256=jXCc9dS9gOgjNoPNoqKaqKPrHqmcFIvR2JwKYTgiQW0,7505
|
22
|
+
versionhq/agent_network/model.py,sha256=CYkIU1W1Ijh5DQD18vRcD1g8myZWmqAYNS0PlRnEX-o,15899
|
23
23
|
versionhq/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
24
24
|
versionhq/clients/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
25
25
|
versionhq/clients/customer/__init__.py,sha256=-YXh1FQfvpfLacK8SUC7bD7Wx_eIEi4yrkCC_cUasFg,217
|
@@ -36,8 +36,8 @@ versionhq/knowledge/source.py,sha256=-hEUPtJUHHMx4rUKtiHl19J8xAMw-WVBw34zwa2jZ08
|
|
36
36
|
versionhq/knowledge/source_docling.py,sha256=XpavmLvh4dLcuTikj8MCE9KG52oQMafy7_wBneliMK0,4994
|
37
37
|
versionhq/knowledge/storage.py,sha256=Kd-4r6aWM5EDaoXrzKXbgi1hY6tysSQARPGXM95qMmU,8266
|
38
38
|
versionhq/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
39
|
-
versionhq/llm/llm_vars.py,sha256=
|
40
|
-
versionhq/llm/model.py,sha256=
|
39
|
+
versionhq/llm/llm_vars.py,sha256=fl9MOjPzBEIymp99BNLD39VmB7rQ9L4YDnfLNXbUZws,5896
|
40
|
+
versionhq/llm/model.py,sha256=m4OaFgGWKCZjmfN-OY3KoqG1K4T31UF7QVkYUcnpjdg,17273
|
41
41
|
versionhq/memory/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
42
42
|
versionhq/memory/contextual_memory.py,sha256=QEMVvHuEXxY7M6-12S8HhyFKf108KfX8Zzt7paPW048,3882
|
43
43
|
versionhq/memory/model.py,sha256=VQR1229t7GQPMItlGAHLtJrb6LrZfSoRA1DRW4z0SOU,8234
|
@@ -51,13 +51,13 @@ versionhq/storage/utils.py,sha256=r5ghA_ktdR2IuzlzKqZYCjsNxztEMzyhWLneA4cFuWY,74
|
|
51
51
|
versionhq/task/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
52
52
|
versionhq/task/evaluation.py,sha256=qQSA5ZWTWA3he54ystsYpTKXJWv68gBL6DCq8ZW1bl8,3813
|
53
53
|
versionhq/task/formatter.py,sha256=N8Kmk9vtrMtBdgJ8J7RmlKNMdZWSmV8O1bDexmCWgU0,643
|
54
|
-
versionhq/task/model.py,sha256=
|
54
|
+
versionhq/task/model.py,sha256=C5QttgJrfhx0UL82845ZBIhhc8oLiQkAWz2i6UkskMM,29023
|
55
55
|
versionhq/task/structured_response.py,sha256=tqOHpch8CVmMj0aZXjdDWtPNcVmBW8DVZnBvPBwS4PM,5053
|
56
56
|
versionhq/task/TEMPLATES/Description.py,sha256=hKhpbz0ztbkUMXz9KiL-P40fis9OB5ICOdL9jCtgAhU,864
|
57
57
|
versionhq/task_graph/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
58
58
|
versionhq/task_graph/colors.py,sha256=naJCx4Vho4iuJtbW8USUXb-M5uYvd5ds2p8qbjUfRus,669
|
59
|
-
versionhq/task_graph/draft.py,sha256=
|
60
|
-
versionhq/task_graph/model.py,sha256=
|
59
|
+
versionhq/task_graph/draft.py,sha256=mVhCHDH-7N-SQRssE50KGIAgd9gdvdeWjt8ofm-SYI4,4943
|
60
|
+
versionhq/task_graph/model.py,sha256=l4Alvdtdl-fwYG7eMo655HF0zx1HkKRiPiST_Ra7hzg,29305
|
61
61
|
versionhq/tool/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
62
62
|
versionhq/tool/cache_handler.py,sha256=iL8FH7X0G-cdT0uhJwzuhLDaadTXOdfybZcDy151-es,1085
|
63
63
|
versionhq/tool/composio_tool.py,sha256=IATfsEnF_1RPJyGtPBmAtEJh5XPcgDHpyG3SUR461Og,8572
|
@@ -66,8 +66,8 @@ versionhq/tool/decorator.py,sha256=C4ZM7Xi2gwtEMaSeRo-geo_g_MAkY77WkSLkAuY0AyI,1
|
|
66
66
|
versionhq/tool/model.py,sha256=ve9C4WyiRjQigOU0hRWVxtSUWAQNntlmeW-_DL0_lJY,12328
|
67
67
|
versionhq/tool/rag_tool.py,sha256=dW5o-83V4bMFFJEj3PUm7XjblwrYJGmZVBlCpPj6CeM,3852
|
68
68
|
versionhq/tool/tool_handler.py,sha256=2m41K8qo5bGCCbwMFferEjT-XZ-mE9F0mDUOBkgivOI,1416
|
69
|
-
versionhq-1.2.4.
|
70
|
-
versionhq-1.2.4.
|
71
|
-
versionhq-1.2.4.
|
72
|
-
versionhq-1.2.4.
|
73
|
-
versionhq-1.2.4.
|
69
|
+
versionhq-1.2.4.5.dist-info/LICENSE,sha256=cRoGGdM73IiDs6nDWKqPlgSv7aR4n-qBXYnJlCMHCeE,1082
|
70
|
+
versionhq-1.2.4.5.dist-info/METADATA,sha256=gwFpoEsZC3Jfo5k2bvpZBPix_pWZCJ0h_UDDgRfrsBw,21146
|
71
|
+
versionhq-1.2.4.5.dist-info/WHEEL,sha256=52BFRY2Up02UkjOa29eZOS2VxUrpPORXg1pkohGGUS8,91
|
72
|
+
versionhq-1.2.4.5.dist-info/top_level.txt,sha256=DClQwxDWqIUGeRJkA8vBlgeNsYZs4_nJWMonzFt5Wj0,10
|
73
|
+
versionhq-1.2.4.5.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|