versionhq 1.2.3.8__py3-none-any.whl → 1.2.4.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- versionhq/__init__.py +3 -2
- versionhq/_prompt/auto_feedback.py +103 -0
- versionhq/_prompt/constants.py +30 -0
- versionhq/_prompt/model.py +218 -0
- versionhq/_utils/__init__.py +1 -0
- versionhq/_utils/is_valid_url.py +15 -0
- versionhq/agent/model.py +33 -67
- versionhq/agent_network/formation.py +18 -10
- versionhq/agent_network/model.py +2 -6
- versionhq/knowledge/source_docling.py +3 -19
- versionhq/llm/model.py +8 -12
- versionhq/task/evaluation.py +1 -1
- versionhq/task/model.py +142 -142
- versionhq/task/structured_response.py +3 -1
- versionhq/task_graph/draft.py +11 -19
- versionhq/task_graph/model.py +90 -34
- {versionhq-1.2.3.8.dist-info → versionhq-1.2.4.2.dist-info}/METADATA +11 -16
- {versionhq-1.2.3.8.dist-info → versionhq-1.2.4.2.dist-info}/RECORD +21 -17
- {versionhq-1.2.3.8.dist-info → versionhq-1.2.4.2.dist-info}/WHEEL +1 -1
- {versionhq-1.2.3.8.dist-info → versionhq-1.2.4.2.dist-info}/LICENSE +0 -0
- {versionhq-1.2.3.8.dist-info → versionhq-1.2.4.2.dist-info}/top_level.txt +0 -0
@@ -47,9 +47,12 @@ def form_agent_network(
|
|
47
47
|
Logger(verbose=True).log(level="warning", message=f"The formation {formation} is invalid. We'll recreate a valid formation.", color="yellow")
|
48
48
|
formation = None
|
49
49
|
|
50
|
-
case int()
|
50
|
+
case int():
|
51
51
|
formation = Formation(int(formation))
|
52
52
|
|
53
|
+
case float():
|
54
|
+
formation = Formation(round(formation))
|
55
|
+
|
53
56
|
case _:
|
54
57
|
Logger(verbose=True).log(level="warning", message=f"The formation {formation} is invalid. We'll recreate a valid formation.", color="yellow")
|
55
58
|
formation = None
|
@@ -64,14 +67,14 @@ def form_agent_network(
|
|
64
67
|
prompt_expected_outcome = expected_outcome if isinstance(expected_outcome, str) else str(expected_outcome.model_dump()) if type(expected_outcome) == BaseModel else ""
|
65
68
|
|
66
69
|
class Outcome(BaseModel):
|
67
|
-
formation:
|
70
|
+
formation: str
|
68
71
|
agent_roles: list[str]
|
69
72
|
task_descriptions: list[str]
|
70
73
|
task_outcomes: list[list[str]]
|
71
74
|
leader_agent: str
|
72
75
|
|
73
76
|
vhq_task = Task(
|
74
|
-
description=f"Design a team of specialized agents to fully automate the following task and achieve the expected outcome. For each agent, define its role, task description, and expected outputs via the task with items in a list. Then specify the
|
77
|
+
description=f"Design a team of specialized agents to fully automate the following task and achieve the expected outcome. For each agent, define its role, task description, and expected outputs via the task with items in a list. Then specify the formation if the formation is not given. If you think SUPERVISING or HYBRID is the best formation, include a leader_agent role, else leave the leader_agent role blank.\nTask: {str(task)}\nExpected outcome: {prompt_expected_outcome}\nFormation: {prompt_formation}",
|
75
78
|
pydantic_output=Outcome
|
76
79
|
)
|
77
80
|
|
@@ -80,13 +83,17 @@ def form_agent_network(
|
|
80
83
|
|
81
84
|
res = vhq_task.execute(agent=vhq_formation_planner, context=context)
|
82
85
|
|
83
|
-
formation_keys =
|
84
|
-
|
86
|
+
formation_keys = []
|
87
|
+
if hasattr(res.pydantic, "formation"):
|
88
|
+
formation_keys = [k for k in Formation._member_map_.keys() if k == res.pydantic.formation.upper()]
|
89
|
+
elif "formation" in res.json_dict:
|
90
|
+
formation_keys = [k for k in Formation._member_map_.keys() if k == res.json_dict["formation"].upper()]
|
91
|
+
|
85
92
|
_formation = Formation[formation_keys[0]] if formation_keys else Formation.SUPERVISING
|
86
93
|
|
87
94
|
network_tasks = []
|
88
95
|
members = []
|
89
|
-
leader = str(res.pydantic.leader_agent) if res.pydantic else str(res.json_dict["leader_agent"])
|
96
|
+
leader = str(res.pydantic.leader_agent) if res.pydantic and hasattr(res.pydantic, "leader_agent") else str(res.json_dict["leader_agent"]) if "leader_agent" in res.json_dict else None
|
90
97
|
|
91
98
|
agent_roles = res.pydantic.agent_roles if res.pydantic else res.json_dict["agent_roles"]
|
92
99
|
created_agents = [Agent(role=str(item), goal=str(item)) for item in agent_roles]
|
@@ -132,13 +139,13 @@ def form_agent_network(
|
|
132
139
|
|
133
140
|
if len(created_tasks) <= len(created_agents):
|
134
141
|
for i in range(len(created_tasks)):
|
135
|
-
is_manager = bool(created_agents[i].role.lower() == leader.lower())
|
142
|
+
is_manager = False if not leader else bool(created_agents[i].role.lower() == leader.lower())
|
136
143
|
member = Member(agent=created_agents[i], is_manager=is_manager, tasks=[created_tasks[i]])
|
137
144
|
members.append(member)
|
138
145
|
|
139
146
|
for i in range(len(created_tasks), len(created_agents)):
|
140
147
|
try:
|
141
|
-
is_manager = bool(created_agents[i].role.lower() == leader.lower())
|
148
|
+
is_manager = False if not leader else bool(created_agents[i].role.lower() == leader.lower())
|
142
149
|
member_w_o_task = Member(agent=created_agents[i], is_manager=is_manager)
|
143
150
|
members.append(member_w_o_task)
|
144
151
|
except:
|
@@ -146,7 +153,7 @@ def form_agent_network(
|
|
146
153
|
|
147
154
|
elif len(created_tasks) > len(created_agents):
|
148
155
|
for i in range(len(created_agents)):
|
149
|
-
is_manager = bool(created_agents[i].role.lower() == leader.lower())
|
156
|
+
is_manager = False if not leader else bool(created_agents[i].role.lower() == leader.lower())
|
150
157
|
member = Member(agent=created_agents[i], is_manager=is_manager, tasks=[created_tasks[i]])
|
151
158
|
members.append(member)
|
152
159
|
|
@@ -154,7 +161,8 @@ def form_agent_network(
|
|
154
161
|
|
155
162
|
|
156
163
|
if _formation == Formation.SUPERVISING and not [member for member in members if member.is_manager]:
|
157
|
-
|
164
|
+
role = leader if leader else "Leader"
|
165
|
+
manager = Member(agent=Agent(role=role), is_manager=True)
|
158
166
|
members.append(manager)
|
159
167
|
|
160
168
|
members.sort(key=lambda x: x.is_manager == False)
|
versionhq/agent_network/model.py
CHANGED
@@ -306,12 +306,8 @@ class AgentNetwork(BaseModel):
|
|
306
306
|
return res, task_graph
|
307
307
|
|
308
308
|
|
309
|
-
def launch(
|
310
|
-
|
311
|
-
) -> Tuple[TaskOutput, TaskGraph]:
|
312
|
-
"""
|
313
|
-
Launch the agent network - executing tasks and recording their outputs.
|
314
|
-
"""
|
309
|
+
def launch(self, kwargs_pre: Optional[Dict[str, str]] = None, kwargs_post: Optional[Dict[str, Any]] = None, start_index: int = None) -> Tuple[TaskOutput, TaskGraph]:
|
310
|
+
"""Launches agent network by executing tasks in the network and recording the outputs"""
|
315
311
|
|
316
312
|
self._assign_tasks()
|
317
313
|
|
@@ -1,6 +1,5 @@
|
|
1
1
|
from pathlib import Path
|
2
|
-
from typing import Iterator, List
|
3
|
-
from urllib.parse import urlparse
|
2
|
+
from typing import Iterator, List
|
4
3
|
|
5
4
|
try:
|
6
5
|
from docling.datamodel.base_models import InputFormat
|
@@ -20,7 +19,7 @@ from pydantic import Field
|
|
20
19
|
|
21
20
|
from versionhq.knowledge.source import BaseKnowledgeSource
|
22
21
|
from versionhq.storage.utils import fetch_db_storage_path
|
23
|
-
from versionhq._utils
|
22
|
+
from versionhq._utils import KNOWLEDGE_DIRECTORY, is_valid_url
|
24
23
|
|
25
24
|
|
26
25
|
class DoclingSource(BaseKnowledgeSource):
|
@@ -83,21 +82,6 @@ class DoclingSource(BaseKnowledgeSource):
|
|
83
82
|
yield chunk.text
|
84
83
|
|
85
84
|
|
86
|
-
@staticmethod
|
87
|
-
def _validate_url(url: str) -> bool:
|
88
|
-
try:
|
89
|
-
result = urlparse(url)
|
90
|
-
return all(
|
91
|
-
[
|
92
|
-
result.scheme in ("http", "https"),
|
93
|
-
result.netloc,
|
94
|
-
len(result.netloc.split(".")) >= 2, # Ensure domain has TLD
|
95
|
-
]
|
96
|
-
)
|
97
|
-
except Exception:
|
98
|
-
return False
|
99
|
-
|
100
|
-
|
101
85
|
def model_post_init(self, _) -> None:
|
102
86
|
self.valid_file_paths = self.validate_content()
|
103
87
|
self.content.extend(self._load_content())
|
@@ -109,7 +93,7 @@ class DoclingSource(BaseKnowledgeSource):
|
|
109
93
|
if isinstance(path, str):
|
110
94
|
if path.startswith(("http://", "https://")):
|
111
95
|
try:
|
112
|
-
if
|
96
|
+
if is_valid_url(path):
|
113
97
|
processed_paths.append(path)
|
114
98
|
else:
|
115
99
|
raise ValueError(f"Invalid URL format: {path}")
|
versionhq/llm/model.py
CHANGED
@@ -8,12 +8,13 @@ from contextlib import contextmanager
|
|
8
8
|
from typing import Any, Dict, List, Optional
|
9
9
|
from typing_extensions import Self
|
10
10
|
|
11
|
+
import litellm
|
11
12
|
from litellm import JSONSchemaValidationError, get_supported_openai_params
|
12
13
|
from pydantic import BaseModel, Field, PrivateAttr, model_validator, ConfigDict
|
13
14
|
|
14
15
|
from versionhq.llm.llm_vars import LLM_CONTEXT_WINDOW_SIZES, MODELS, PARAMS, PROVIDERS, ENDPOINT_PROVIDERS, ENV_VARS
|
15
16
|
from versionhq.tool.model import Tool, ToolSet
|
16
|
-
from versionhq._utils
|
17
|
+
from versionhq._utils import Logger
|
17
18
|
|
18
19
|
|
19
20
|
load_dotenv(override=True)
|
@@ -47,7 +48,6 @@ class FilteredStream:
|
|
47
48
|
@contextmanager
|
48
49
|
def suppress_warnings():
|
49
50
|
with warnings.catch_warnings():
|
50
|
-
import litellm
|
51
51
|
litellm.set_verbose = False
|
52
52
|
warnings.filterwarnings(action="ignore")
|
53
53
|
old_stdout = sys.stdout
|
@@ -102,7 +102,6 @@ class LLM(BaseModel):
|
|
102
102
|
"""
|
103
103
|
Validate the given model, provider, interface provider.
|
104
104
|
"""
|
105
|
-
import litellm
|
106
105
|
litellm.drop_params = True
|
107
106
|
|
108
107
|
self._init_model_name = self.model
|
@@ -180,7 +179,6 @@ class LLM(BaseModel):
|
|
180
179
|
"""
|
181
180
|
Set up valid config params after setting up a valid model, provider, interface provider names.
|
182
181
|
"""
|
183
|
-
import litellm
|
184
182
|
litellm.drop_params = True
|
185
183
|
|
186
184
|
self._tokens = 0
|
@@ -208,9 +206,7 @@ class LLM(BaseModel):
|
|
208
206
|
valid_config, valid_keys = dict(), list()
|
209
207
|
|
210
208
|
if self.model:
|
211
|
-
valid_keys = get_supported_openai_params(
|
212
|
-
model=self.model, custom_llm_provider=self.endpoint_provider, request_type="chat_completion"
|
213
|
-
)
|
209
|
+
valid_keys = get_supported_openai_params(model=self.model, custom_llm_provider=self.endpoint_provider, request_type="chat_completion")
|
214
210
|
|
215
211
|
if not valid_keys:
|
216
212
|
valid_keys = PARAMS.get("common")
|
@@ -269,7 +265,6 @@ class LLM(BaseModel):
|
|
269
265
|
|
270
266
|
|
271
267
|
def _supports_stop_words(self) -> bool:
|
272
|
-
import litellm
|
273
268
|
supported_params = get_supported_openai_params(model=self.model, custom_llm_provider=self.endpoint_provider)
|
274
269
|
return "stop" in supported_params if supported_params else False
|
275
270
|
|
@@ -282,7 +277,6 @@ class LLM(BaseModel):
|
|
282
277
|
|
283
278
|
|
284
279
|
def _set_callbacks(self, callbacks: List[Any]):
|
285
|
-
import litellm
|
286
280
|
callback_types = [type(callback) for callback in callbacks]
|
287
281
|
for callback in litellm.success_callback[:]:
|
288
282
|
if type(callback) in callback_types:
|
@@ -306,7 +300,6 @@ class LLM(BaseModel):
|
|
306
300
|
"""
|
307
301
|
Execute LLM based on the agent's params and model params.
|
308
302
|
"""
|
309
|
-
import litellm
|
310
303
|
litellm.drop_params = True
|
311
304
|
litellm.set_verbose = True
|
312
305
|
|
@@ -318,8 +311,12 @@ class LLM(BaseModel):
|
|
318
311
|
res, tool_res = None, ""
|
319
312
|
cred = self._set_env_vars()
|
320
313
|
|
321
|
-
if
|
314
|
+
if self.provider == "gemini":
|
315
|
+
self.response_format = { "type": "json_object" } if not tools else None
|
316
|
+
else:
|
322
317
|
self.response_format = response_format
|
318
|
+
|
319
|
+
if not tools:
|
323
320
|
params = self._create_valid_params(config=config)
|
324
321
|
res = litellm.completion(model=self.model, messages=messages, stream=False, **params, **cred)
|
325
322
|
self._tokens += int(res["usage"]["total_tokens"])
|
@@ -327,7 +324,6 @@ class LLM(BaseModel):
|
|
327
324
|
|
328
325
|
else:
|
329
326
|
try:
|
330
|
-
self.response_format = { "type": "json_object" } if tool_res_as_final and self.provider != "gemini" else response_format
|
331
327
|
self.tools = [item.tool.properties if isinstance(item, ToolSet) else item.properties for item in tools]
|
332
328
|
params = self._create_valid_params(config=config)
|
333
329
|
res = litellm.completion(model=self.model, messages=messages, **params, **cred)
|
versionhq/task/evaluation.py
CHANGED
versionhq/task/model.py
CHANGED
@@ -4,10 +4,9 @@ import datetime
|
|
4
4
|
import uuid
|
5
5
|
import inspect
|
6
6
|
import enum
|
7
|
-
from textwrap import dedent
|
8
7
|
from concurrent.futures import Future
|
9
8
|
from hashlib import md5
|
10
|
-
from typing import Any, Dict, List, Set, Optional, Callable, Type
|
9
|
+
from typing import Any, Dict, List, Set, Optional, Callable, Type, Tuple
|
11
10
|
from typing_extensions import Annotated, Self
|
12
11
|
|
13
12
|
from pydantic import UUID4, BaseModel, Field, PrivateAttr, field_validator, model_validator, InstanceOf, field_validator
|
@@ -175,6 +174,7 @@ class TaskOutput(BaseModel):
|
|
175
174
|
"""
|
176
175
|
A class to store the final output of the given task in raw (string), json_dict, and pydantic class formats.
|
177
176
|
"""
|
177
|
+
|
178
178
|
_tokens: int = PrivateAttr(default=0)
|
179
179
|
|
180
180
|
task_id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True, description="store Task ID")
|
@@ -187,19 +187,19 @@ class TaskOutput(BaseModel):
|
|
187
187
|
evaluation: Optional[InstanceOf[Evaluation]] = Field(default=None, description="stores overall evaluation of the task output. stored in ltm")
|
188
188
|
|
189
189
|
|
190
|
-
def
|
191
|
-
"""
|
192
|
-
|
193
|
-
"""
|
190
|
+
def _to_context_prompt(self) -> str:
|
191
|
+
"""Formats prompt context in text formats from the final response."""
|
192
|
+
|
194
193
|
context = ""
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
context = self.
|
201
|
-
|
202
|
-
context = self.
|
194
|
+
match self.final:
|
195
|
+
case dict() | self.pydantic:
|
196
|
+
try:
|
197
|
+
context = json.dumps(self.final)
|
198
|
+
except:
|
199
|
+
context = str(self.final)
|
200
|
+
case _:
|
201
|
+
context = str(self.final)
|
202
|
+
|
203
203
|
return context
|
204
204
|
|
205
205
|
|
@@ -252,6 +252,24 @@ class TaskOutput(BaseModel):
|
|
252
252
|
return self.evaluation
|
253
253
|
|
254
254
|
|
255
|
+
@property
|
256
|
+
def final(self) -> Any:
|
257
|
+
"""Returns final output from the task."""
|
258
|
+
|
259
|
+
output = None
|
260
|
+
|
261
|
+
if self.callback_output:
|
262
|
+
output = self.callback_output
|
263
|
+
|
264
|
+
elif self.tool_output and str(self.tool_output) == self.raw: # tool_output_as_final
|
265
|
+
output = self.tool_output
|
266
|
+
|
267
|
+
else:
|
268
|
+
output = self.pydantic if self.pydantic else self.json_dict if self.json_dict else self.raw
|
269
|
+
|
270
|
+
return output
|
271
|
+
|
272
|
+
|
255
273
|
@property
|
256
274
|
def aggregate_score(self) -> float | int:
|
257
275
|
return self.evaluation.aggregate_score if self.evaluation is not None else 0
|
@@ -280,6 +298,7 @@ class Task(BaseModel):
|
|
280
298
|
description: str = Field(description="Description of the actual task")
|
281
299
|
|
282
300
|
# response format
|
301
|
+
response_schema: Optional[Type[BaseModel] | List[ResponseField]] = Field(default=None)
|
283
302
|
pydantic_output: Optional[Type[BaseModel]] = Field(default=None, description="store Pydantic class as structured response format")
|
284
303
|
response_fields: Optional[List[ResponseField]] = Field(default_factory=list, description="store list of ResponseField as structured response format")
|
285
304
|
|
@@ -288,6 +307,15 @@ class Task(BaseModel):
|
|
288
307
|
can_use_agent_tools: bool = Field(default=True, description="whether the agent can use their own tools when executing the task")
|
289
308
|
tool_res_as_final: bool = Field(default=False, description="when set True, tools res will be stored in the `TaskOutput`")
|
290
309
|
|
310
|
+
image: Optional[str] = Field(default=None, description="absolute file path or url in string")
|
311
|
+
file: Optional[str] = Field(default=None, description="absolute file path or url in string")
|
312
|
+
audio: Optional[str] = Field(default=None, description="absolute file path or url in string")
|
313
|
+
|
314
|
+
# test run
|
315
|
+
should_test_run: bool = Field(default=False)
|
316
|
+
human: bool = Field(default=False)
|
317
|
+
_pfg: Any = None
|
318
|
+
|
291
319
|
# executing
|
292
320
|
execution_type: TaskExecutionType = Field(default=TaskExecutionType.SYNC)
|
293
321
|
allow_delegation: bool = Field(default=False, description="whether to delegate the task to another agent")
|
@@ -301,9 +329,10 @@ class Task(BaseModel):
|
|
301
329
|
|
302
330
|
# recording
|
303
331
|
_tokens: int = 0
|
332
|
+
_tool_errors: int = 0
|
333
|
+
_format_errors: int = 0
|
334
|
+
_delegations: int = 0
|
304
335
|
processed_agents: Set[str] = Field(default_factory=set, description="store keys of the agents that executed the task")
|
305
|
-
tool_errors: int = 0
|
306
|
-
delegations: int = 0
|
307
336
|
output: Optional[TaskOutput] = Field(default=None, description="store the final TaskOutput object")
|
308
337
|
|
309
338
|
|
@@ -346,95 +375,8 @@ class Task(BaseModel):
|
|
346
375
|
return self
|
347
376
|
|
348
377
|
|
349
|
-
def _draft_output_prompt(self, model_provider: str) -> str:
|
350
|
-
"""
|
351
|
-
Draft prompts on the output format by converting `
|
352
|
-
"""
|
353
|
-
|
354
|
-
output_prompt = ""
|
355
|
-
|
356
|
-
if self.pydantic_output:
|
357
|
-
output_prompt = f"""
|
358
|
-
Your response MUST STRICTLY follow the given repsonse format:
|
359
|
-
JSON schema: {str(self.pydantic_output)}
|
360
|
-
"""
|
361
|
-
|
362
|
-
elif self.response_fields:
|
363
|
-
output_prompt, output_formats_to_follow = "", dict()
|
364
|
-
response_format = str(self._structure_response_format(model_provider=model_provider))
|
365
|
-
for item in self.response_fields:
|
366
|
-
if item:
|
367
|
-
output_formats_to_follow[item.title] = f"<Return your answer in {item.data_type.__name__}>"
|
368
|
-
|
369
|
-
output_prompt = f"""
|
370
|
-
Your response MUST be a valid JSON string that strictly follows the response format. Use double quotes for all keys and string values. Do not use single quotes, trailing commas, or any other non-standard JSON syntax.
|
371
|
-
Response format: {response_format}
|
372
|
-
Ref. Output image: {output_formats_to_follow}
|
373
|
-
"""
|
374
|
-
else:
|
375
|
-
output_prompt = "You MUST Return your response as a valid JSON serializable string, enclosed in double quotes. Do not use single quotes, trailing commas, or other non-standard JSON syntax."
|
376
|
-
|
377
|
-
return dedent(output_prompt)
|
378
|
-
|
379
|
-
|
380
|
-
def _draft_context_prompt(self, context: Any) -> str:
|
381
|
-
"""
|
382
|
-
Create a context prompt from the given context in any format: a task object, task output object, list, dict.
|
383
|
-
"""
|
384
|
-
|
385
|
-
context_to_add = None
|
386
|
-
if not context:
|
387
|
-
# Logger().log(level="error", color="red", message="Missing a context to add to the prompt. We'll return ''.")
|
388
|
-
return context_to_add
|
389
|
-
|
390
|
-
match context:
|
391
|
-
case str():
|
392
|
-
context_to_add = context
|
393
|
-
|
394
|
-
case Task():
|
395
|
-
if not context.output:
|
396
|
-
res = context.execute()
|
397
|
-
context_to_add = res.to_context_prompt()
|
398
|
-
|
399
|
-
else:
|
400
|
-
context_to_add = context.output.raw
|
401
|
-
|
402
|
-
case TaskOutput():
|
403
|
-
context_to_add = context.to_context_prompt()
|
404
|
-
|
405
|
-
|
406
|
-
case dict():
|
407
|
-
context_to_add = str(context)
|
408
|
-
|
409
|
-
case list():
|
410
|
-
res = ", ".join([self._draft_context_prompt(context=item) for item in context])
|
411
|
-
context_to_add = res
|
412
|
-
|
413
|
-
case _:
|
414
|
-
pass
|
415
|
-
|
416
|
-
return dedent(context_to_add)
|
417
|
-
|
418
|
-
|
419
|
-
def _prompt(self, model_provider: str = None, context: Optional[Any] = None) -> str:
|
420
|
-
"""
|
421
|
-
Format the task prompt and cascade it to the agent.
|
422
|
-
"""
|
423
|
-
output_prompt = self._draft_output_prompt(model_provider=model_provider)
|
424
|
-
task_slices = [self.description, output_prompt, ]
|
425
|
-
|
426
|
-
if context:
|
427
|
-
context_prompt = self._draft_context_prompt(context=context)
|
428
|
-
task_slices.insert(len(task_slices), f"Consider the following context when responding: {context_prompt}")
|
429
|
-
|
430
|
-
return "\n".join(task_slices)
|
431
|
-
|
432
|
-
|
433
378
|
def _structure_response_format(self, data_type: str = "object", model_provider: str = "gemini") -> Dict[str, Any] | None:
|
434
|
-
"""
|
435
|
-
Structure a response format either from`response_fields` or `pydantic_output`.
|
436
|
-
1 nested item is accepted.
|
437
|
-
"""
|
379
|
+
"""Structures `response_fields` or `pydantic_output` to a LLM response format."""
|
438
380
|
|
439
381
|
from versionhq.task.structured_response import StructuredOutput
|
440
382
|
|
@@ -452,7 +394,7 @@ Ref. Output image: {output_formats_to_follow}
|
|
452
394
|
required_fields.append(item.title)
|
453
395
|
|
454
396
|
response_schema = {
|
455
|
-
"type":
|
397
|
+
"type": data_type,
|
456
398
|
"properties": properties,
|
457
399
|
"required": required_fields,
|
458
400
|
"additionalProperties": False,
|
@@ -462,17 +404,49 @@ Ref. Output image: {output_formats_to_follow}
|
|
462
404
|
"json_schema": { "name": "outcome", "schema": response_schema }
|
463
405
|
}
|
464
406
|
|
465
|
-
|
466
407
|
elif self.pydantic_output:
|
467
408
|
response_format = StructuredOutput(response_format=self.pydantic_output, provider=model_provider)._format()
|
468
409
|
|
469
410
|
return response_format
|
470
411
|
|
471
412
|
|
413
|
+
def _sanitize_raw_output(self, raw: str) -> Dict[str, str]:
|
414
|
+
"""Sanitizes raw output and prepare for json.loads"""
|
415
|
+
|
416
|
+
import re
|
417
|
+
import ast
|
418
|
+
|
419
|
+
output, j = None, None
|
420
|
+
r = str(raw).strip()
|
421
|
+
r = r.replace("true", "True").replace("false", "False").replace("```json", '"').replace("```", '"').replace('\n', '').replace('\\', '')
|
422
|
+
r = re.sub("^'", '"', r)
|
423
|
+
r = re.sub(r"'\b", '"', r)
|
424
|
+
r = r.strip()
|
425
|
+
r = r.replace(" ", "")
|
426
|
+
try:
|
427
|
+
output = json.loads(r)
|
428
|
+
except:
|
429
|
+
try: j = json.dumps(eval(r))
|
430
|
+
except:
|
431
|
+
try: j = json.dumps(str(r))
|
432
|
+
except: j = r
|
433
|
+
output = json.loads(j)
|
434
|
+
|
435
|
+
if isinstance(output, dict):
|
436
|
+
return output
|
437
|
+
else:
|
438
|
+
try:
|
439
|
+
output = ast.literal_eval(j)
|
440
|
+
except:
|
441
|
+
output = ast.literal_eval(r)
|
442
|
+
|
443
|
+
return output if isinstance(output, dict) else { "output": str(r) }
|
444
|
+
|
445
|
+
|
472
446
|
def _create_json_output(self, raw: str) -> Dict[str, Any]:
|
473
|
-
"""
|
474
|
-
|
475
|
-
|
447
|
+
"""Creates JSON output from the raw output."""
|
448
|
+
|
449
|
+
output = None
|
476
450
|
|
477
451
|
if raw is None or raw == "":
|
478
452
|
Logger().log(level="warning", message="The model returned an empty response. Returning an empty dict.", color="yellow")
|
@@ -480,27 +454,14 @@ Ref. Output image: {output_formats_to_follow}
|
|
480
454
|
return output
|
481
455
|
|
482
456
|
try:
|
483
|
-
|
484
|
-
j = json.dumps(eval(r))
|
485
|
-
output = json.loads(j)
|
457
|
+
output = json.loads(raw)
|
486
458
|
if isinstance(output, dict):
|
487
459
|
return output
|
488
|
-
|
489
460
|
else:
|
490
|
-
|
491
|
-
|
492
|
-
output = json.loads(j)
|
493
|
-
|
494
|
-
if isinstance(output, dict):
|
495
|
-
return output
|
496
|
-
|
497
|
-
else:
|
498
|
-
import ast
|
499
|
-
output = ast.literal_eval(r)
|
500
|
-
return output if isinstance(output, dict) else { "output": str(r) }
|
501
|
-
|
461
|
+
output = self._sanitize_raw_output(raw=raw)
|
462
|
+
return output
|
502
463
|
except:
|
503
|
-
output =
|
464
|
+
output = self._sanitize_raw_output(raw=raw)
|
504
465
|
return output
|
505
466
|
|
506
467
|
|
@@ -620,23 +581,32 @@ Ref. Output image: {output_formats_to_follow}
|
|
620
581
|
|
621
582
|
|
622
583
|
# task execution
|
623
|
-
def execute(
|
624
|
-
|
625
|
-
|
626
|
-
"""
|
627
|
-
A main method to handle task execution. Build an agent when the agent is not given.
|
628
|
-
"""
|
584
|
+
def execute(self, type: TaskExecutionType = None, agent: "vhq.Agent" = None, context: Any = None) -> TaskOutput | Future[TaskOutput]:
|
585
|
+
"""A main method to handle task execution."""
|
586
|
+
|
629
587
|
type = type if type else self.execution_type if self.execution_type else TaskExecutionType.SYNC
|
588
|
+
agent = agent if agent else self._build_agent_from_task(task_description=self.description)
|
589
|
+
res = None
|
630
590
|
|
631
|
-
if not
|
632
|
-
|
591
|
+
if (self.should_test_run or agent.self_learn) and not self._pfg:
|
592
|
+
res = self._test_time_computation(agent=agent, context=context)
|
593
|
+
return res
|
594
|
+
|
595
|
+
# if self._pfg:
|
596
|
+
# res, all_outputs = self.pfg.activate()
|
597
|
+
# tokens, latency = self.pfg.usage
|
598
|
+
# self._tokens = tokens
|
599
|
+
# res.latency = latency
|
600
|
+
# return res
|
633
601
|
|
634
602
|
match type:
|
635
603
|
case TaskExecutionType.SYNC:
|
636
|
-
|
604
|
+
res = self._execute_sync(agent=agent, context=context)
|
637
605
|
|
638
606
|
case TaskExecutionType.ASYNC:
|
639
|
-
|
607
|
+
res = self._execute_async(agent=agent, context=context)
|
608
|
+
|
609
|
+
return res
|
640
610
|
|
641
611
|
|
642
612
|
def _execute_sync(self, agent, context: Optional[Any] = None) -> TaskOutput:
|
@@ -657,14 +627,14 @@ Ref. Output image: {output_formats_to_follow}
|
|
657
627
|
|
658
628
|
|
659
629
|
def _execute_core(self, agent, context: Optional[Any]) -> TaskOutput:
|
660
|
-
"""
|
661
|
-
|
662
|
-
"""
|
630
|
+
"""A core method to execute a single task."""
|
631
|
+
|
663
632
|
task_output: InstanceOf[TaskOutput] = None
|
664
633
|
raw_output: str = None
|
665
634
|
tool_output: str | list = None
|
666
635
|
task_tools: List[List[InstanceOf[Tool]| InstanceOf[ToolSet] | Type[Tool]]] = []
|
667
636
|
started_at, ended_at = datetime.datetime.now(), datetime.datetime.now()
|
637
|
+
user_prompt, dev_prompt = None, None
|
668
638
|
|
669
639
|
if self.tools:
|
670
640
|
for item in self.tools:
|
@@ -674,18 +644,18 @@ Ref. Output image: {output_formats_to_follow}
|
|
674
644
|
if self.allow_delegation == True:
|
675
645
|
agent_to_delegate = self._select_agent_to_delegate(agent=agent)
|
676
646
|
agent = agent_to_delegate
|
677
|
-
self.
|
647
|
+
self._delegations += 1
|
678
648
|
|
679
649
|
if self.tool_res_as_final == True:
|
680
650
|
started_at = datetime.datetime.now()
|
681
|
-
tool_output = agent.execute_task(task=self, context=context, task_tools=task_tools)
|
651
|
+
user_prompt, dev_prompt, tool_output = agent.execute_task(task=self, context=context, task_tools=task_tools)
|
682
652
|
raw_output = str(tool_output) if tool_output else ""
|
683
653
|
ended_at = datetime.datetime.now()
|
684
654
|
task_output = TaskOutput(task_id=self.id, tool_output=tool_output, raw=raw_output)
|
685
655
|
|
686
656
|
else:
|
687
657
|
started_at = datetime.datetime.now()
|
688
|
-
raw_output = agent.execute_task(task=self, context=context, task_tools=task_tools)
|
658
|
+
user_prompt, dev_prompt, raw_output = agent.execute_task(task=self, context=context, task_tools=task_tools)
|
689
659
|
ended_at = datetime.datetime.now()
|
690
660
|
|
691
661
|
json_dict_output = self._create_json_output(raw=raw_output)
|
@@ -714,6 +684,11 @@ Ref. Output image: {output_formats_to_follow}
|
|
714
684
|
# )
|
715
685
|
# self._save_file(content)
|
716
686
|
|
687
|
+
if self._pfg:
|
688
|
+
index = self._pfg.index
|
689
|
+
self._pfg.user_prompts.update({ index: user_prompt })
|
690
|
+
self._pfg.dev_prompts.update({ index: dev_prompt })
|
691
|
+
|
717
692
|
if raw_output:
|
718
693
|
if self.should_evaluate:
|
719
694
|
task_output.evaluate(task=self)
|
@@ -734,6 +709,31 @@ Ref. Output image: {output_formats_to_follow}
|
|
734
709
|
return task_output
|
735
710
|
|
736
711
|
|
712
|
+
def _test_time_computation(self, agent, context: Optional[Any]) -> TaskOutput | None:
|
713
|
+
"""Handles test-time computation."""
|
714
|
+
|
715
|
+
from versionhq.task_graph.model import ReformTriggerEvent
|
716
|
+
from versionhq._prompt.model import Prompt
|
717
|
+
from versionhq._prompt.auto_feedback import PromptFeedbackGraph
|
718
|
+
|
719
|
+
prompt = Prompt(task=self, agent=agent, context=context)
|
720
|
+
pfg = PromptFeedbackGraph(prompt=prompt, should_reform=self.human, reform_trigger_event=ReformTriggerEvent.USER_INPUT if self.human else None)
|
721
|
+
pfg = pfg.set_up_graph()
|
722
|
+
self._pfg = pfg
|
723
|
+
|
724
|
+
# try:
|
725
|
+
if self._pfg and self.output is None:
|
726
|
+
res, _ = self._pfg.activate()
|
727
|
+
tokens, latency = self._pfg.usage
|
728
|
+
self._tokens = tokens
|
729
|
+
res.latency = latency
|
730
|
+
return res
|
731
|
+
|
732
|
+
# except:
|
733
|
+
# Logger().log(level="error", message="Failed to execute the task.", color="red")
|
734
|
+
# return None, None
|
735
|
+
|
736
|
+
|
737
737
|
@property
|
738
738
|
def key(self) -> str:
|
739
739
|
output_format = "json" if self.response_fields else "pydantic" if self.pydantic_output is not None else "raw"
|