versionhq 1.2.4.1__py3-none-any.whl → 1.2.4.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- versionhq/__init__.py +3 -2
- versionhq/_prompt/auto_feedback.py +103 -0
- versionhq/_prompt/constants.py +30 -0
- versionhq/_prompt/model.py +134 -63
- versionhq/agent/model.py +25 -77
- versionhq/agent_network/formation.py +6 -5
- versionhq/task/model.py +92 -145
- versionhq/task_graph/draft.py +3 -13
- versionhq/task_graph/model.py +90 -34
- {versionhq-1.2.4.1.dist-info → versionhq-1.2.4.2.dist-info}/METADATA +3 -8
- {versionhq-1.2.4.1.dist-info → versionhq-1.2.4.2.dist-info}/RECORD +14 -12
- {versionhq-1.2.4.1.dist-info → versionhq-1.2.4.2.dist-info}/WHEEL +1 -1
- {versionhq-1.2.4.1.dist-info → versionhq-1.2.4.2.dist-info}/LICENSE +0 -0
- {versionhq-1.2.4.1.dist-info → versionhq-1.2.4.2.dist-info}/top_level.txt +0 -0
versionhq/task/model.py
CHANGED
@@ -4,10 +4,9 @@ import datetime
|
|
4
4
|
import uuid
|
5
5
|
import inspect
|
6
6
|
import enum
|
7
|
-
from textwrap import dedent
|
8
7
|
from concurrent.futures import Future
|
9
8
|
from hashlib import md5
|
10
|
-
from typing import Any, Dict, List, Set, Optional, Callable, Type
|
9
|
+
from typing import Any, Dict, List, Set, Optional, Callable, Type, Tuple
|
11
10
|
from typing_extensions import Annotated, Self
|
12
11
|
|
13
12
|
from pydantic import UUID4, BaseModel, Field, PrivateAttr, field_validator, model_validator, InstanceOf, field_validator
|
@@ -16,7 +15,7 @@ from pydantic_core import PydanticCustomError
|
|
16
15
|
import versionhq as vhq
|
17
16
|
from versionhq.task.evaluation import Evaluation, EvaluationItem
|
18
17
|
from versionhq.tool.model import Tool, ToolSet
|
19
|
-
from versionhq._utils import process_config, Logger
|
18
|
+
from versionhq._utils import process_config, Logger
|
20
19
|
|
21
20
|
|
22
21
|
class TaskExecutionType(enum.Enum):
|
@@ -175,6 +174,7 @@ class TaskOutput(BaseModel):
|
|
175
174
|
"""
|
176
175
|
A class to store the final output of the given task in raw (string), json_dict, and pydantic class formats.
|
177
176
|
"""
|
177
|
+
|
178
178
|
_tokens: int = PrivateAttr(default=0)
|
179
179
|
|
180
180
|
task_id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True, description="store Task ID")
|
@@ -188,18 +188,18 @@ class TaskOutput(BaseModel):
|
|
188
188
|
|
189
189
|
|
190
190
|
def _to_context_prompt(self) -> str:
|
191
|
-
"""
|
192
|
-
|
193
|
-
"""
|
191
|
+
"""Formats prompt context in text formats from the final response."""
|
192
|
+
|
194
193
|
context = ""
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
context = self.
|
201
|
-
|
202
|
-
context = self.
|
194
|
+
match self.final:
|
195
|
+
case dict() | self.pydantic:
|
196
|
+
try:
|
197
|
+
context = json.dumps(self.final)
|
198
|
+
except:
|
199
|
+
context = str(self.final)
|
200
|
+
case _:
|
201
|
+
context = str(self.final)
|
202
|
+
|
203
203
|
return context
|
204
204
|
|
205
205
|
|
@@ -252,6 +252,24 @@ class TaskOutput(BaseModel):
|
|
252
252
|
return self.evaluation
|
253
253
|
|
254
254
|
|
255
|
+
@property
|
256
|
+
def final(self) -> Any:
|
257
|
+
"""Returns final output from the task."""
|
258
|
+
|
259
|
+
output = None
|
260
|
+
|
261
|
+
if self.callback_output:
|
262
|
+
output = self.callback_output
|
263
|
+
|
264
|
+
elif self.tool_output and str(self.tool_output) == self.raw: # tool_output_as_final
|
265
|
+
output = self.tool_output
|
266
|
+
|
267
|
+
else:
|
268
|
+
output = self.pydantic if self.pydantic else self.json_dict if self.json_dict else self.raw
|
269
|
+
|
270
|
+
return output
|
271
|
+
|
272
|
+
|
255
273
|
@property
|
256
274
|
def aggregate_score(self) -> float | int:
|
257
275
|
return self.evaluation.aggregate_score if self.evaluation is not None else 0
|
@@ -280,6 +298,7 @@ class Task(BaseModel):
|
|
280
298
|
description: str = Field(description="Description of the actual task")
|
281
299
|
|
282
300
|
# response format
|
301
|
+
response_schema: Optional[Type[BaseModel] | List[ResponseField]] = Field(default=None)
|
283
302
|
pydantic_output: Optional[Type[BaseModel]] = Field(default=None, description="store Pydantic class as structured response format")
|
284
303
|
response_fields: Optional[List[ResponseField]] = Field(default_factory=list, description="store list of ResponseField as structured response format")
|
285
304
|
|
@@ -292,6 +311,11 @@ class Task(BaseModel):
|
|
292
311
|
file: Optional[str] = Field(default=None, description="absolute file path or url in string")
|
293
312
|
audio: Optional[str] = Field(default=None, description="absolute file path or url in string")
|
294
313
|
|
314
|
+
# test run
|
315
|
+
should_test_run: bool = Field(default=False)
|
316
|
+
human: bool = Field(default=False)
|
317
|
+
_pfg: Any = None
|
318
|
+
|
295
319
|
# executing
|
296
320
|
execution_type: TaskExecutionType = Field(default=TaskExecutionType.SYNC)
|
297
321
|
allow_delegation: bool = Field(default=False, description="whether to delegate the task to another agent")
|
@@ -351,121 +375,6 @@ class Task(BaseModel):
|
|
351
375
|
return self
|
352
376
|
|
353
377
|
|
354
|
-
def _draft_output_prompt(self, model_provider: str = None) -> str:
|
355
|
-
output_prompt = ""
|
356
|
-
|
357
|
-
if self.pydantic_output:
|
358
|
-
output_prompt, output_formats_to_follow = "", dict()
|
359
|
-
response_format = str(self._structure_response_format(model_provider=model_provider))
|
360
|
-
for k, v in self.pydantic_output.model_fields.items():
|
361
|
-
output_formats_to_follow[k] = f"<Return your answer in {v.annotation}>"
|
362
|
-
|
363
|
-
output_prompt = f"""Your response MUST be a valid JSON string that strictly follows the response format. Use double quotes for all keys and string values. Do not use single quotes, trailing commas, or any other non-standard JSON syntax.
|
364
|
-
Response format: {response_format}
|
365
|
-
Ref. Output image: {output_formats_to_follow}
|
366
|
-
"""
|
367
|
-
elif self.response_fields:
|
368
|
-
output_prompt, output_formats_to_follow = "", dict()
|
369
|
-
response_format = str(self._structure_response_format(model_provider=model_provider))
|
370
|
-
for item in self.response_fields:
|
371
|
-
if item:
|
372
|
-
output_formats_to_follow[item.title] = f"<Return your answer in {item.data_type.__name__}>"
|
373
|
-
|
374
|
-
output_prompt = f"""Your response MUST be a valid JSON string that strictly follows the response format. Use double quotes for all keys and string values. Do not use single quotes, trailing commas, or any other non-standard JSON syntax.
|
375
|
-
Response format: {response_format}
|
376
|
-
Ref. Output image: {output_formats_to_follow}
|
377
|
-
"""
|
378
|
-
# elif not self.tools or self.can_use_agent_tools == False:
|
379
|
-
else:
|
380
|
-
output_prompt = "You MUST return your response as a valid JSON serializable string, enclosed in double quotes. Use double quotes for all keys and string values. Do NOT use single quotes, trailing commas, or other non-standard JSON syntax."
|
381
|
-
|
382
|
-
# else:
|
383
|
-
# output_prompt = "You will return a response in a concise manner."
|
384
|
-
|
385
|
-
return dedent(output_prompt)
|
386
|
-
|
387
|
-
|
388
|
-
def _draft_context_prompt(self, context: Any) -> str:
|
389
|
-
"""
|
390
|
-
Create a context prompt from the given context in any format: a task object, task output object, list, dict.
|
391
|
-
"""
|
392
|
-
|
393
|
-
context_to_add = None
|
394
|
-
if not context:
|
395
|
-
# Logger().log(level="error", color="red", message="Missing a context to add to the prompt. We'll return ''.")
|
396
|
-
return context_to_add
|
397
|
-
|
398
|
-
match context:
|
399
|
-
case str():
|
400
|
-
context_to_add = context
|
401
|
-
|
402
|
-
case Task():
|
403
|
-
if not context.output:
|
404
|
-
res = context.execute()
|
405
|
-
context_to_add = res._to_context_prompt()
|
406
|
-
|
407
|
-
else:
|
408
|
-
context_to_add = context.output.raw
|
409
|
-
|
410
|
-
case TaskOutput():
|
411
|
-
context_to_add = context._to_context_prompt()
|
412
|
-
|
413
|
-
|
414
|
-
case dict():
|
415
|
-
context_to_add = str(context)
|
416
|
-
|
417
|
-
case list():
|
418
|
-
res = ", ".join([self._draft_context_prompt(context=item) for item in context])
|
419
|
-
context_to_add = res
|
420
|
-
|
421
|
-
case _:
|
422
|
-
pass
|
423
|
-
|
424
|
-
return dedent(context_to_add)
|
425
|
-
|
426
|
-
|
427
|
-
def _user_prompt(self, model_provider: str = None, context: Optional[Any] = None) -> str:
|
428
|
-
"""
|
429
|
-
Format the task prompt and cascade it to the agent.
|
430
|
-
"""
|
431
|
-
output_prompt = self._draft_output_prompt(model_provider=model_provider)
|
432
|
-
task_slices = [self.description, output_prompt, ]
|
433
|
-
|
434
|
-
if context:
|
435
|
-
context_prompt = self._draft_context_prompt(context=context)
|
436
|
-
task_slices.insert(len(task_slices), f"Consider the following context when responding: {context_prompt}")
|
437
|
-
|
438
|
-
return "\n".join(task_slices)
|
439
|
-
|
440
|
-
|
441
|
-
def _format_content_prompt(self) -> Dict[str, str]:
|
442
|
-
"""Formats content (file, image, audio) prompts that added to the messages sent to the LLM."""
|
443
|
-
|
444
|
-
from pathlib import Path
|
445
|
-
import base64
|
446
|
-
|
447
|
-
content_messages = {}
|
448
|
-
|
449
|
-
if self.image:
|
450
|
-
with open(self.image, "rb") as file:
|
451
|
-
content = file.read()
|
452
|
-
if content:
|
453
|
-
encoded_file = base64.b64encode(content).decode("utf-8")
|
454
|
-
img_url = f"data:image/jpeg;base64,{encoded_file}"
|
455
|
-
content_messages.update({ "type": "image_url", "image_url": { "url": img_url }})
|
456
|
-
|
457
|
-
if self.file:
|
458
|
-
if is_valid_url(self.file):
|
459
|
-
content_messages.update({ "type": "image_url", "image_url": self.file })
|
460
|
-
|
461
|
-
if self.audio:
|
462
|
-
audio_bytes = Path(self.audio).read_bytes()
|
463
|
-
encoded_data = base64.b64encode(audio_bytes).decode("utf-8")
|
464
|
-
content_messages.update({ "type": "image_url", "image_url": "data:audio/mp3;base64,{}".format(encoded_data)})
|
465
|
-
|
466
|
-
return content_messages
|
467
|
-
|
468
|
-
|
469
378
|
def _structure_response_format(self, data_type: str = "object", model_provider: str = "gemini") -> Dict[str, Any] | None:
|
470
379
|
"""Structures `response_fields` or `pydantic_output` to a LLM response format."""
|
471
380
|
|
@@ -495,7 +404,6 @@ Ref. Output image: {output_formats_to_follow}
|
|
495
404
|
"json_schema": { "name": "outcome", "schema": response_schema }
|
496
405
|
}
|
497
406
|
|
498
|
-
|
499
407
|
elif self.pydantic_output:
|
500
408
|
response_format = StructuredOutput(response_format=self.pydantic_output, provider=model_provider)._format()
|
501
409
|
|
@@ -673,23 +581,32 @@ Ref. Output image: {output_formats_to_follow}
|
|
673
581
|
|
674
582
|
|
675
583
|
# task execution
|
676
|
-
def execute(
|
677
|
-
|
678
|
-
|
679
|
-
"""
|
680
|
-
A main method to handle task execution. Build an agent when the agent is not given.
|
681
|
-
"""
|
584
|
+
def execute(self, type: TaskExecutionType = None, agent: "vhq.Agent" = None, context: Any = None) -> TaskOutput | Future[TaskOutput]:
|
585
|
+
"""A main method to handle task execution."""
|
586
|
+
|
682
587
|
type = type if type else self.execution_type if self.execution_type else TaskExecutionType.SYNC
|
588
|
+
agent = agent if agent else self._build_agent_from_task(task_description=self.description)
|
589
|
+
res = None
|
683
590
|
|
684
|
-
if not
|
685
|
-
|
591
|
+
if (self.should_test_run or agent.self_learn) and not self._pfg:
|
592
|
+
res = self._test_time_computation(agent=agent, context=context)
|
593
|
+
return res
|
594
|
+
|
595
|
+
# if self._pfg:
|
596
|
+
# res, all_outputs = self.pfg.activate()
|
597
|
+
# tokens, latency = self.pfg.usage
|
598
|
+
# self._tokens = tokens
|
599
|
+
# res.latency = latency
|
600
|
+
# return res
|
686
601
|
|
687
602
|
match type:
|
688
603
|
case TaskExecutionType.SYNC:
|
689
|
-
|
604
|
+
res = self._execute_sync(agent=agent, context=context)
|
690
605
|
|
691
606
|
case TaskExecutionType.ASYNC:
|
692
|
-
|
607
|
+
res = self._execute_async(agent=agent, context=context)
|
608
|
+
|
609
|
+
return res
|
693
610
|
|
694
611
|
|
695
612
|
def _execute_sync(self, agent, context: Optional[Any] = None) -> TaskOutput:
|
@@ -710,14 +627,14 @@ Ref. Output image: {output_formats_to_follow}
|
|
710
627
|
|
711
628
|
|
712
629
|
def _execute_core(self, agent, context: Optional[Any]) -> TaskOutput:
|
713
|
-
"""
|
714
|
-
|
715
|
-
"""
|
630
|
+
"""A core method to execute a single task."""
|
631
|
+
|
716
632
|
task_output: InstanceOf[TaskOutput] = None
|
717
633
|
raw_output: str = None
|
718
634
|
tool_output: str | list = None
|
719
635
|
task_tools: List[List[InstanceOf[Tool]| InstanceOf[ToolSet] | Type[Tool]]] = []
|
720
636
|
started_at, ended_at = datetime.datetime.now(), datetime.datetime.now()
|
637
|
+
user_prompt, dev_prompt = None, None
|
721
638
|
|
722
639
|
if self.tools:
|
723
640
|
for item in self.tools:
|
@@ -731,14 +648,14 @@ Ref. Output image: {output_formats_to_follow}
|
|
731
648
|
|
732
649
|
if self.tool_res_as_final == True:
|
733
650
|
started_at = datetime.datetime.now()
|
734
|
-
tool_output = agent.execute_task(task=self, context=context, task_tools=task_tools)
|
651
|
+
user_prompt, dev_prompt, tool_output = agent.execute_task(task=self, context=context, task_tools=task_tools)
|
735
652
|
raw_output = str(tool_output) if tool_output else ""
|
736
653
|
ended_at = datetime.datetime.now()
|
737
654
|
task_output = TaskOutput(task_id=self.id, tool_output=tool_output, raw=raw_output)
|
738
655
|
|
739
656
|
else:
|
740
657
|
started_at = datetime.datetime.now()
|
741
|
-
raw_output = agent.execute_task(task=self, context=context, task_tools=task_tools)
|
658
|
+
user_prompt, dev_prompt, raw_output = agent.execute_task(task=self, context=context, task_tools=task_tools)
|
742
659
|
ended_at = datetime.datetime.now()
|
743
660
|
|
744
661
|
json_dict_output = self._create_json_output(raw=raw_output)
|
@@ -767,6 +684,11 @@ Ref. Output image: {output_formats_to_follow}
|
|
767
684
|
# )
|
768
685
|
# self._save_file(content)
|
769
686
|
|
687
|
+
if self._pfg:
|
688
|
+
index = self._pfg.index
|
689
|
+
self._pfg.user_prompts.update({ index: user_prompt })
|
690
|
+
self._pfg.dev_prompts.update({ index: dev_prompt })
|
691
|
+
|
770
692
|
if raw_output:
|
771
693
|
if self.should_evaluate:
|
772
694
|
task_output.evaluate(task=self)
|
@@ -787,6 +709,31 @@ Ref. Output image: {output_formats_to_follow}
|
|
787
709
|
return task_output
|
788
710
|
|
789
711
|
|
712
|
+
def _test_time_computation(self, agent, context: Optional[Any]) -> TaskOutput | None:
|
713
|
+
"""Handles test-time computation."""
|
714
|
+
|
715
|
+
from versionhq.task_graph.model import ReformTriggerEvent
|
716
|
+
from versionhq._prompt.model import Prompt
|
717
|
+
from versionhq._prompt.auto_feedback import PromptFeedbackGraph
|
718
|
+
|
719
|
+
prompt = Prompt(task=self, agent=agent, context=context)
|
720
|
+
pfg = PromptFeedbackGraph(prompt=prompt, should_reform=self.human, reform_trigger_event=ReformTriggerEvent.USER_INPUT if self.human else None)
|
721
|
+
pfg = pfg.set_up_graph()
|
722
|
+
self._pfg = pfg
|
723
|
+
|
724
|
+
# try:
|
725
|
+
if self._pfg and self.output is None:
|
726
|
+
res, _ = self._pfg.activate()
|
727
|
+
tokens, latency = self._pfg.usage
|
728
|
+
self._tokens = tokens
|
729
|
+
res.latency = latency
|
730
|
+
return res
|
731
|
+
|
732
|
+
# except:
|
733
|
+
# Logger().log(level="error", message="Failed to execute the task.", color="red")
|
734
|
+
# return None, None
|
735
|
+
|
736
|
+
|
790
737
|
@property
|
791
738
|
def key(self) -> str:
|
792
739
|
output_format = "json" if self.response_fields else "pydantic" if self.pydantic_output is not None else "raw"
|
versionhq/task_graph/draft.py
CHANGED
@@ -10,7 +10,7 @@ sys.modules['pydantic.main'].ModelMetaclass = ModelMetaclass
|
|
10
10
|
|
11
11
|
from versionhq.agent.model import Agent
|
12
12
|
from versionhq.task.model import ResponseField
|
13
|
-
from versionhq.task_graph.model import TaskGraph, Task, DependencyType, Node
|
13
|
+
from versionhq.task_graph.model import TaskGraph, Task, DependencyType, Node, ReformTriggerEvent
|
14
14
|
from versionhq._utils.logger import Logger
|
15
15
|
|
16
16
|
|
@@ -81,7 +81,8 @@ def workflow(final_output: Type[BaseModel], context: Any = None, human: bool = F
|
|
81
81
|
nodes={node.identifier: node for node in nodes},
|
82
82
|
concl_format=final_output,
|
83
83
|
concl=None,
|
84
|
-
should_reform=
|
84
|
+
should_reform=human,
|
85
|
+
reform_trigger_event=ReformTriggerEvent.USER_INPUT if human else None,
|
85
86
|
)
|
86
87
|
|
87
88
|
for res in task_items:
|
@@ -97,15 +98,4 @@ def workflow(final_output: Type[BaseModel], context: Any = None, human: bool = F
|
|
97
98
|
|
98
99
|
task_graph.visualize()
|
99
100
|
|
100
|
-
if human:
|
101
|
-
print('Proceed? Y/n:')
|
102
|
-
x = input()
|
103
|
-
|
104
|
-
if x.lower() == "y":
|
105
|
-
print("ok. generating agent network")
|
106
|
-
|
107
|
-
else:
|
108
|
-
request = input("request?")
|
109
|
-
print('ok. regenerating the graph based on your input: ', request)
|
110
|
-
|
111
101
|
return task_graph
|
versionhq/task_graph/model.py
CHANGED
@@ -15,7 +15,14 @@ from pydantic_core import PydanticCustomError
|
|
15
15
|
|
16
16
|
from versionhq.agent.model import Agent
|
17
17
|
from versionhq.task.model import Task, TaskOutput, Evaluation
|
18
|
-
from versionhq._utils
|
18
|
+
from versionhq._utils import Logger
|
19
|
+
|
20
|
+
|
21
|
+
class ReformTriggerEvent(enum.Enum):
|
22
|
+
USER_INPUT = 1 # ask human
|
23
|
+
TEST_TIME_COMPUTATION = 2 # mismatch between actual responses and expected outcome
|
24
|
+
ERROR_DETECTION = 3 # response error
|
25
|
+
|
19
26
|
|
20
27
|
class ConditionType(enum.Enum):
|
21
28
|
AND = 1
|
@@ -46,7 +53,6 @@ class Condition(BaseModel):
|
|
46
53
|
res = method(**args) if args else method()
|
47
54
|
return res
|
48
55
|
|
49
|
-
|
50
56
|
def condition_met(self) -> bool:
|
51
57
|
if not self.methods:
|
52
58
|
return True
|
@@ -54,7 +60,6 @@ class Condition(BaseModel):
|
|
54
60
|
if len(self.methods) == 1:
|
55
61
|
for k, v in self.methods.items():
|
56
62
|
return self._execute_method(key=k, method=v)
|
57
|
-
|
58
63
|
else:
|
59
64
|
cond_list = []
|
60
65
|
for k, v in self.methods.items():
|
@@ -96,8 +101,8 @@ class Node(BaseModel):
|
|
96
101
|
|
97
102
|
id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
|
98
103
|
task: InstanceOf[Task] = Field(default=None)
|
99
|
-
in_degree_nodes: List[
|
100
|
-
out_degree_nodes: List[
|
104
|
+
in_degree_nodes: List["Node"] = Field(default_factory=list, description="list of Node objects")
|
105
|
+
out_degree_nodes: List["Node"] = Field(default_factory=list, description="list of Node objects")
|
101
106
|
assigned_to: InstanceOf[Agent] = Field(default=None)
|
102
107
|
status: TaskStatus = Field(default=TaskStatus.NOT_STARTED)
|
103
108
|
|
@@ -107,29 +112,31 @@ class Node(BaseModel):
|
|
107
112
|
if v:
|
108
113
|
raise PydanticCustomError("may_not_set_field", "This field is not to be set by client.", {})
|
109
114
|
|
110
|
-
|
111
115
|
def is_independent(self) -> bool:
|
112
116
|
return not self.in_degree_nodes and not self.out_degree_nodes
|
113
117
|
|
114
118
|
def handle_task_execution(self, agent: Agent = None, context: str = None, response_format: Type[BaseModel] = None) -> TaskOutput | None:
|
115
119
|
"""Executes the task and updates its status"""
|
116
120
|
|
117
|
-
self.status = TaskStatus.IN_PROGRESS
|
118
|
-
|
119
121
|
if not self.task:
|
120
122
|
Logger().log(level="error", message="Missing a task to execute. We'll return None.", color="red")
|
121
123
|
self.status = TaskStatus.ERROR
|
122
124
|
return None
|
123
125
|
|
124
|
-
|
125
|
-
|
126
|
-
res = self.task.execute(agent=agent, context=context)
|
126
|
+
if self.status == TaskStatus.COMPLETED:
|
127
|
+
return self.task.output
|
127
128
|
|
128
|
-
|
129
|
-
|
129
|
+
else:
|
130
|
+
self.status = TaskStatus.IN_PROGRESS
|
131
|
+
agent = agent if agent else self.assigned_to
|
132
|
+
self.task.pydantic_output = self.task.pydantic_output if self.task.pydantic_output else response_format if type(response_format) == BaseModel else None
|
133
|
+
res = self.task.execute(agent=agent, context=context)
|
130
134
|
|
131
|
-
|
132
|
-
|
135
|
+
if isinstance(res, Future): # activate async
|
136
|
+
res = res.result()
|
137
|
+
|
138
|
+
self.status = TaskStatus.COMPLETED if res else TaskStatus.ERROR
|
139
|
+
return res
|
133
140
|
|
134
141
|
@property
|
135
142
|
def in_degrees(self) -> int:
|
@@ -238,7 +245,6 @@ class Edge(BaseModel):
|
|
238
245
|
"""
|
239
246
|
Activates the edge to initiate task execution of the target node.
|
240
247
|
"""
|
241
|
-
|
242
248
|
if not self.source or not self.target:
|
243
249
|
Logger(verbose=True).log(level="warning", message="Cannot find source or target nodes. We'll return None.", color="yellow")
|
244
250
|
return None
|
@@ -251,10 +257,11 @@ class Edge(BaseModel):
|
|
251
257
|
import time
|
252
258
|
time.sleep(self.lag)
|
253
259
|
|
254
|
-
context = self.source.task.output.
|
260
|
+
context = self.source.task.output._to_context_prompt() if self.data_transfer else None
|
255
261
|
res = self.target.handle_task_execution(context=context, response_format=response_format)
|
256
262
|
return res
|
257
263
|
|
264
|
+
|
258
265
|
@property
|
259
266
|
def label(self):
|
260
267
|
"""Human friendly label for visualization."""
|
@@ -283,6 +290,14 @@ class Graph(ABC, BaseModel):
|
|
283
290
|
else:
|
284
291
|
return None
|
285
292
|
|
293
|
+
def _format_Graph(self) -> None:
|
294
|
+
"""Formats dxGraph using edges and nodes."""
|
295
|
+
edges, nodes = [k for k in self.edges.keys()], [k for k in self.nodes.keys()]
|
296
|
+
if self.graph:
|
297
|
+
self.graph.update(edges=edges, nodes=nodes)
|
298
|
+
else:
|
299
|
+
self.graph = nx.Graph(directed=self.directed, edges=edges, nodes=nodes)
|
300
|
+
|
286
301
|
def add_node(self, node: Node) -> None:
|
287
302
|
if node.identifier in self.nodes.keys():
|
288
303
|
return
|
@@ -333,13 +348,14 @@ class Graph(ABC, BaseModel):
|
|
333
348
|
critical_edge = max(edges, key=lambda item: item['weight']) if edges else None
|
334
349
|
return critical_edge.target if critical_edge else None
|
335
350
|
|
336
|
-
def find_path(self, source: Optional[str]
|
351
|
+
def find_path(self, target: str, source: Optional[str] = None, weight: Optional[Any] = None) -> Any:
|
337
352
|
try:
|
353
|
+
self._format_Graph()
|
338
354
|
return nx.shortest_path(self.graph, source=source, target=target, weight=weight)
|
339
|
-
except
|
355
|
+
except:
|
340
356
|
return None
|
341
357
|
|
342
|
-
def find_all_paths(self,
|
358
|
+
def find_all_paths(self, source: str, target: str) -> List[Any]:
|
343
359
|
return list(nx.all_simple_paths(self.graph, source=source, target=target))
|
344
360
|
|
345
361
|
def find_critical_path(self) -> tuple[List[Any], int, Dict[str, int]]:
|
@@ -378,7 +394,8 @@ class Graph(ABC, BaseModel):
|
|
378
394
|
|
379
395
|
class TaskGraph(Graph):
|
380
396
|
id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
|
381
|
-
should_reform: bool =
|
397
|
+
should_reform: bool = False
|
398
|
+
reform_trigger_event: Optional[ReformTriggerEvent] = None
|
382
399
|
outputs: Dict[str, TaskOutput] = Field(default_factory=dict, description="stores node identifier and TaskOutput")
|
383
400
|
concl_template: Optional[Dict[str, Any] | Type[BaseModel]] = Field(default=None, description="stores final response format in Pydantic class or JSON dict")
|
384
401
|
concl: Optional[TaskOutput] = Field(default=None, description="stores the final or latest conclusion of the entire task graph")
|
@@ -432,10 +449,8 @@ class TaskGraph(Graph):
|
|
432
449
|
"""
|
433
450
|
Add an edge that connect task 1 (source) and task 2 (target) using task_node.name as an identifier
|
434
451
|
"""
|
435
|
-
|
436
|
-
|
437
|
-
Logger().log(level="error", message="Edge attributes are missing.", color="red")
|
438
|
-
|
452
|
+
# if not edge_attributes:
|
453
|
+
# Logger().log(level="error", message="Edge attributes are missing.", color="red")
|
439
454
|
edge = Edge()
|
440
455
|
for k in Edge.model_fields.keys():
|
441
456
|
v = edge_attributes.get(k, None)
|
@@ -443,7 +458,6 @@ class TaskGraph(Graph):
|
|
443
458
|
setattr(edge, k, v)
|
444
459
|
else:
|
445
460
|
pass
|
446
|
-
|
447
461
|
self.add_edge(source, target, edge)
|
448
462
|
|
449
463
|
|
@@ -554,15 +568,10 @@ class TaskGraph(Graph):
|
|
554
568
|
|
555
569
|
# find a shortest path to each in-degree node of the node and see if dependency met.
|
556
570
|
node = self._return_node_object(target)
|
557
|
-
|
558
|
-
edge_status = []
|
571
|
+
edges = [v for k, v in self.edges.items() if v.target.identifier == node.identifier]
|
559
572
|
res = None
|
560
573
|
|
561
|
-
for item in
|
562
|
-
edge = self.find_path(source=item, target=target)
|
563
|
-
edge_status.append(dict(edge=edge if edge else None, dep_met=edge.dependency_met() if edge else False))
|
564
|
-
|
565
|
-
if len([item for item in edge_status if item["dep_met"] == True]) == len(sources):
|
574
|
+
if not edges or len([item for item in edges if item.dependency_met()]) == len(edges):
|
566
575
|
res = node.handle_task_execution()
|
567
576
|
self.outputs.update({ target: res })
|
568
577
|
|
@@ -598,7 +607,7 @@ class TaskGraph(Graph):
|
|
598
607
|
res = node.handle_task_execution()
|
599
608
|
self.outputs.update({ node.identifier: res })
|
600
609
|
else:
|
601
|
-
for
|
610
|
+
for edge in self.edges.values():
|
602
611
|
res = edge.activate()
|
603
612
|
node_identifier = edge.target.identifier
|
604
613
|
self.outputs.update({ node_identifier: res })
|
@@ -614,6 +623,15 @@ class TaskGraph(Graph):
|
|
614
623
|
node_identifier = edge.target.identifier
|
615
624
|
self.outputs.update({ node_identifier: res })
|
616
625
|
|
626
|
+
|
627
|
+
if self.should_reform:
|
628
|
+
target = [k for k in self.outputs.keys()][-1] if self.outputs else self.find_start_nodes()[0].identifier if self.find_start_nodes() else None
|
629
|
+
|
630
|
+
if not target:
|
631
|
+
pass
|
632
|
+
else:
|
633
|
+
res, _ = self.handle_reform(target=target)
|
634
|
+
|
617
635
|
self.concl = res
|
618
636
|
self.concl_template = self.concl_template if self.concl_template else res.pydantic.__class__ if res.pydantic else None
|
619
637
|
# last_task_output = [v for v in self.outputs.values()][len([v for v in self.outputs.values()]) - 1] if [v for v in self.outputs.values()] else None
|
@@ -639,6 +657,44 @@ class TaskGraph(Graph):
|
|
639
657
|
return eval
|
640
658
|
|
641
659
|
|
660
|
+
def _handle_human_input(self) -> str | None:
|
661
|
+
"""Handles input from human."""
|
662
|
+
request = None
|
663
|
+
|
664
|
+
print('Proceed? Y/n:')
|
665
|
+
x = input()
|
666
|
+
|
667
|
+
if x.lower() == "y":
|
668
|
+
Logger().log(message="Ok, proceeding to the next graph execution.", level="info", color="blue")
|
669
|
+
|
670
|
+
else:
|
671
|
+
request = input("Request?")
|
672
|
+
|
673
|
+
if request:
|
674
|
+
Logger().log(message=f"Ok. regenerating the graph based on your input: ', {request}", level="info", color="blue")
|
675
|
+
else:
|
676
|
+
Logger().log(message="Cannot recognize your request.", level="error", color="red")
|
677
|
+
|
678
|
+
return request
|
679
|
+
|
680
|
+
|
681
|
+
def handle_reform(self, target: str = None) -> Self:
|
682
|
+
task_description = "Improve the given output: "
|
683
|
+
if target:
|
684
|
+
output = self.outputs[target].raw if self.outputs and target in self.outputs else None
|
685
|
+
if output:
|
686
|
+
task_description += str(output)
|
687
|
+
|
688
|
+
if self.reform_trigger_event == ReformTriggerEvent.USER_INPUT:
|
689
|
+
request = self._handle_human_input()
|
690
|
+
task_description += f"You MUST follow the instruction: {request}"
|
691
|
+
|
692
|
+
new_node = Node(task=Task(description=task_description))
|
693
|
+
self.add_node(node=new_node)
|
694
|
+
self.add_dependency(source=target, target=new_node.identifier)
|
695
|
+
return self.activate(target=new_node.identifier)
|
696
|
+
|
697
|
+
|
642
698
|
@property
|
643
699
|
def usage(self) -> Tuple[int, float]:
|
644
700
|
"""Returns aggregate number of consumed tokens and job latency in ms during the activation"""
|
@@ -1,7 +1,7 @@
|
|
1
1
|
Metadata-Version: 2.2
|
2
2
|
Name: versionhq
|
3
|
-
Version: 1.2.4.
|
4
|
-
Summary:
|
3
|
+
Version: 1.2.4.2
|
4
|
+
Summary: Autonomous agent networks for task automation with multi-step reasoning.
|
5
5
|
Author-email: Kuriko Iwai <kuriko@versi0n.io>
|
6
6
|
License: MIT License
|
7
7
|
|
@@ -29,7 +29,7 @@ Project-URL: Homepage, https://versi0n.io
|
|
29
29
|
Project-URL: Documentation, https://docs.versi0n.io
|
30
30
|
Project-URL: Repository, https://github.com/versionHQ/multi-agent-system
|
31
31
|
Project-URL: Issues, https://github.com/versionHQ/multi-agent-system/issues
|
32
|
-
Keywords:
|
32
|
+
Keywords: autonomic agent networks,deep agent,agentic orchestration framework
|
33
33
|
Classifier: Programming Language :: Python
|
34
34
|
Classifier: Programming Language :: Python :: 3
|
35
35
|
Classifier: Programming Language :: Python :: 3.11
|
@@ -47,12 +47,9 @@ License-File: LICENSE
|
|
47
47
|
Requires-Dist: regex==2024.11.6
|
48
48
|
Requires-Dist: requests>=2.32.3
|
49
49
|
Requires-Dist: pydantic>=2.10.6
|
50
|
-
Requires-Dist: werkzeug>=3.1.3
|
51
50
|
Requires-Dist: typing>=0.0.0
|
52
51
|
Requires-Dist: json-repair>=0.0.0
|
53
52
|
Requires-Dist: litellm>=1.55.8
|
54
|
-
Requires-Dist: openai>=1.64.0
|
55
|
-
Requires-Dist: composio-openai>=0.6.9
|
56
53
|
Requires-Dist: composio>=0.1.0
|
57
54
|
Requires-Dist: setuptools>=75.6.0
|
58
55
|
Requires-Dist: wheel>=0.45.1
|
@@ -60,10 +57,8 @@ Requires-Dist: python-dotenv>=1.0.0
|
|
60
57
|
Requires-Dist: appdirs>=1.4.4
|
61
58
|
Requires-Dist: langchain>=0.3.14
|
62
59
|
Requires-Dist: langchain-openai>=0.2.14
|
63
|
-
Requires-Dist: composio-langchain>=0.6.12
|
64
60
|
Requires-Dist: chromadb>=0.6.3
|
65
61
|
Requires-Dist: wheel>=0.45.1
|
66
|
-
Requires-Dist: envoy>=0.0.3
|
67
62
|
Requires-Dist: composio-core==0.7.0
|
68
63
|
Requires-Dist: networkx>=3.4.2
|
69
64
|
Requires-Dist: matplotlib>=3.10.0
|